1 /* 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $ 33 */ 34 35 /* 36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 45 * combines a tri-speed ethernet MAC and PHY, with the following 46 * features: 47 * 48 * o Jumbo frame support up to 16K 49 * o Transmit and receive flow control 50 * o IPv4 checksum offload 51 * o VLAN tag insertion and stripping 52 * o TCP large send 53 * o 64-bit multicast hash table filter 54 * o 64 entry CAM filter 55 * o 16K RX FIFO and 48K TX FIFO memory 56 * o Interrupt moderation 57 * 58 * The VT6122 supports up to four transmit DMA queues. The descriptors 59 * in the transmit ring can address up to 7 data fragments; frames which 60 * span more than 7 data buffers must be coalesced, but in general the 61 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 62 * long. The receive descriptors address only a single buffer. 63 * 64 * There are two peculiar design issues with the VT6122. One is that 65 * receive data buffers must be aligned on a 32-bit boundary. This is 66 * not a problem where the VT6122 is used as a LOM device in x86-based 67 * systems, but on architectures that generate unaligned access traps, we 68 * have to do some copying. 69 * 70 * The other issue has to do with the way 64-bit addresses are handled. 71 * The DMA descriptors only allow you to specify 48 bits of addressing 72 * information. The remaining 16 bits are specified using one of the 73 * I/O registers. If you only have a 32-bit system, then this isn't 74 * an issue, but if you have a 64-bit system and more than 4GB of 75 * memory, you must have to make sure your network data buffers reside 76 * in the same 48-bit 'segment.' 77 * 78 * Special thanks to Ryan Fu at VIA Networking for providing documentation 79 * and sample NICs for testing. 80 */ 81 82 #include "opt_ifpoll.h" 83 84 #include <sys/param.h> 85 #include <sys/endian.h> 86 #include <sys/systm.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/module.h> 91 #include <sys/kernel.h> 92 #include <sys/socket.h> 93 #include <sys/serialize.h> 94 #include <sys/proc.h> 95 #include <sys/bus.h> 96 #include <sys/rman.h> 97 #include <sys/interrupt.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/if_poll.h> 105 #include <net/ifq_var.h> 106 #include <net/if_types.h> 107 #include <net/vlan/if_vlan_var.h> 108 #include <net/vlan/if_vlan_ether.h> 109 110 #include <net/bpf.h> 111 112 #include <dev/netif/mii_layer/mii.h> 113 #include <dev/netif/mii_layer/miivar.h> 114 115 #include <bus/pci/pcireg.h> 116 #include <bus/pci/pcivar.h> 117 #include "pcidevs.h" 118 119 #include "miibus_if.h" 120 121 #include <dev/netif/vge/if_vgereg.h> 122 #include <dev/netif/vge/if_vgevar.h> 123 124 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 125 126 /* 127 * Various supported device vendors/types and their names. 128 */ 129 static const struct vge_type vge_devs[] = { 130 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X, 131 "VIA Networking Gigabit Ethernet" }, 132 { 0, 0, NULL } 133 }; 134 135 static int vge_probe (device_t); 136 static int vge_attach (device_t); 137 static int vge_detach (device_t); 138 139 static int vge_encap (struct vge_softc *, struct mbuf *, int); 140 141 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 142 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 143 bus_size_t, int); 144 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 145 bus_size_t, int); 146 static int vge_dma_alloc (device_t); 147 static void vge_dma_free (struct vge_softc *); 148 static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 149 static int vge_rx_list_init (struct vge_softc *); 150 static int vge_tx_list_init (struct vge_softc *); 151 #ifdef VGE_FIXUP_RX 152 static __inline void vge_fixup_rx 153 (struct mbuf *); 154 #endif 155 static void vge_rxeof (struct vge_softc *, int); 156 static void vge_txeof (struct vge_softc *); 157 static void vge_intr (void *); 158 static void vge_tick (struct vge_softc *); 159 static void vge_start (struct ifnet *, struct ifaltq_subque *); 160 static int vge_ioctl (struct ifnet *, u_long, caddr_t, 161 struct ucred *); 162 static void vge_init (void *); 163 static void vge_stop (struct vge_softc *); 164 static void vge_watchdog (struct ifnet *); 165 static int vge_suspend (device_t); 166 static int vge_resume (device_t); 167 static void vge_shutdown (device_t); 168 static int vge_ifmedia_upd (struct ifnet *); 169 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 170 171 #ifdef VGE_EEPROM 172 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 173 #endif 174 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int); 175 176 static void vge_miipoll_start (struct vge_softc *); 177 static void vge_miipoll_stop (struct vge_softc *); 178 static int vge_miibus_readreg (device_t, int, int); 179 static int vge_miibus_writereg (device_t, int, int, int); 180 static void vge_miibus_statchg (device_t); 181 182 static void vge_cam_clear (struct vge_softc *); 183 static int vge_cam_set (struct vge_softc *, uint8_t *); 184 static void vge_setmulti (struct vge_softc *); 185 static void vge_reset (struct vge_softc *); 186 187 #ifdef IFPOLL_ENABLE 188 static void vge_npoll(struct ifnet *, struct ifpoll_info *); 189 static void vge_npoll_compat(struct ifnet *, void *, int); 190 static void vge_disable_intr(struct vge_softc *); 191 #endif 192 static void vge_enable_intr(struct vge_softc *, uint32_t); 193 194 #define VGE_PCI_LOIO 0x10 195 #define VGE_PCI_LOMEM 0x14 196 197 static device_method_t vge_methods[] = { 198 /* Device interface */ 199 DEVMETHOD(device_probe, vge_probe), 200 DEVMETHOD(device_attach, vge_attach), 201 DEVMETHOD(device_detach, vge_detach), 202 DEVMETHOD(device_suspend, vge_suspend), 203 DEVMETHOD(device_resume, vge_resume), 204 DEVMETHOD(device_shutdown, vge_shutdown), 205 206 /* bus interface */ 207 DEVMETHOD(bus_print_child, bus_generic_print_child), 208 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 209 210 /* MII interface */ 211 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 212 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 213 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 214 215 DEVMETHOD_END 216 }; 217 218 static driver_t vge_driver = { 219 "vge", 220 vge_methods, 221 sizeof(struct vge_softc) 222 }; 223 224 static devclass_t vge_devclass; 225 226 DECLARE_DUMMY_MODULE(if_vge); 227 MODULE_DEPEND(if_vge, miibus, 1, 1, 1); 228 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL); 229 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL); 230 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL); 231 232 #ifdef VGE_EEPROM 233 /* 234 * Read a word of data stored in the EEPROM at address 'addr.' 235 */ 236 static void 237 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest) 238 { 239 uint16_t word = 0; 240 int i; 241 242 /* 243 * Enter EEPROM embedded programming mode. In order to 244 * access the EEPROM at all, we first have to set the 245 * EELOAD bit in the CHIPCFG2 register. 246 */ 247 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 248 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 249 250 /* Select the address of the word we want to read */ 251 CSR_WRITE_1(sc, VGE_EEADDR, addr); 252 253 /* Issue read command */ 254 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 255 256 /* Wait for the done bit to be set. */ 257 for (i = 0; i < VGE_TIMEOUT; i++) { 258 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 259 break; 260 } 261 if (i == VGE_TIMEOUT) { 262 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 263 *dest = 0; 264 return; 265 } 266 267 /* Read the result */ 268 word = CSR_READ_2(sc, VGE_EERDDAT); 269 270 /* Turn off EEPROM access mode. */ 271 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 272 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 273 274 *dest = word; 275 } 276 #endif 277 278 /* 279 * Read a sequence of words from the EEPROM. 280 */ 281 static void 282 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap) 283 { 284 int i; 285 #ifdef VGE_EEPROM 286 uint16_t word = 0, *ptr; 287 288 for (i = 0; i < cnt; i++) { 289 vge_eeprom_getword(sc, off + i, &word); 290 ptr = (uint16_t *)(dest + (i * 2)); 291 if (swap) 292 *ptr = ntohs(word); 293 else 294 *ptr = word; 295 } 296 #else 297 for (i = 0; i < ETHER_ADDR_LEN; i++) 298 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 299 #endif 300 } 301 302 static void 303 vge_miipoll_stop(struct vge_softc *sc) 304 { 305 int i; 306 307 CSR_WRITE_1(sc, VGE_MIICMD, 0); 308 309 for (i = 0; i < VGE_TIMEOUT; i++) { 310 DELAY(1); 311 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 312 break; 313 } 314 if (i == VGE_TIMEOUT) 315 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 316 } 317 318 static void 319 vge_miipoll_start(struct vge_softc *sc) 320 { 321 int i; 322 323 /* First, make sure we're idle. */ 324 CSR_WRITE_1(sc, VGE_MIICMD, 0); 325 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 326 327 for (i = 0; i < VGE_TIMEOUT; i++) { 328 DELAY(1); 329 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 330 break; 331 } 332 if (i == VGE_TIMEOUT) { 333 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 334 return; 335 } 336 337 /* Now enable auto poll mode. */ 338 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 339 340 /* And make sure it started. */ 341 for (i = 0; i < VGE_TIMEOUT; i++) { 342 DELAY(1); 343 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 344 break; 345 } 346 if (i == VGE_TIMEOUT) 347 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n"); 348 } 349 350 static int 351 vge_miibus_readreg(device_t dev, int phy, int reg) 352 { 353 struct vge_softc *sc; 354 int i; 355 uint16_t rval = 0; 356 357 sc = device_get_softc(dev); 358 359 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 360 return(0); 361 362 vge_miipoll_stop(sc); 363 364 /* Specify the register we want to read. */ 365 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 366 367 /* Issue read command. */ 368 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 369 370 /* Wait for the read command bit to self-clear. */ 371 for (i = 0; i < VGE_TIMEOUT; i++) { 372 DELAY(1); 373 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 374 break; 375 } 376 if (i == VGE_TIMEOUT) 377 if_printf(&sc->arpcom.ac_if, "MII read timed out\n"); 378 else 379 rval = CSR_READ_2(sc, VGE_MIIDATA); 380 381 vge_miipoll_start(sc); 382 383 return (rval); 384 } 385 386 static int 387 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 388 { 389 struct vge_softc *sc; 390 int i, rval = 0; 391 392 sc = device_get_softc(dev); 393 394 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 395 return(0); 396 397 vge_miipoll_stop(sc); 398 399 /* Specify the register we want to write. */ 400 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 401 402 /* Specify the data we want to write. */ 403 CSR_WRITE_2(sc, VGE_MIIDATA, data); 404 405 /* Issue write command. */ 406 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 407 408 /* Wait for the write command bit to self-clear. */ 409 for (i = 0; i < VGE_TIMEOUT; i++) { 410 DELAY(1); 411 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 412 break; 413 } 414 if (i == VGE_TIMEOUT) { 415 if_printf(&sc->arpcom.ac_if, "MII write timed out\n"); 416 rval = EIO; 417 } 418 419 vge_miipoll_start(sc); 420 421 return (rval); 422 } 423 424 static void 425 vge_cam_clear(struct vge_softc *sc) 426 { 427 int i; 428 429 /* 430 * Turn off all the mask bits. This tells the chip 431 * that none of the entries in the CAM filter are valid. 432 * desired entries will be enabled as we fill the filter in. 433 */ 434 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 436 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 437 for (i = 0; i < 8; i++) 438 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 439 440 /* Clear the VLAN filter too. */ 441 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 442 for (i = 0; i < 8; i++) 443 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 444 445 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 446 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 447 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 448 449 sc->vge_camidx = 0; 450 } 451 452 static int 453 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 454 { 455 int i, error = 0; 456 457 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 458 return(ENOSPC); 459 460 /* Select the CAM data page. */ 461 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 462 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 463 464 /* Set the filter entry we want to update and enable writing. */ 465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 466 467 /* Write the address to the CAM registers */ 468 for (i = 0; i < ETHER_ADDR_LEN; i++) 469 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 470 471 /* Issue a write command. */ 472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 473 474 /* Wake for it to clear. */ 475 for (i = 0; i < VGE_TIMEOUT; i++) { 476 DELAY(1); 477 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 478 break; 479 } 480 if (i == VGE_TIMEOUT) { 481 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n"); 482 error = EIO; 483 goto fail; 484 } 485 486 /* Select the CAM mask page. */ 487 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 488 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 489 490 /* Set the mask bit that enables this filter. */ 491 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 492 1<<(sc->vge_camidx & 7)); 493 494 sc->vge_camidx++; 495 496 fail: 497 /* Turn off access to CAM. */ 498 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 499 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 500 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 501 502 return (error); 503 } 504 505 /* 506 * Program the multicast filter. We use the 64-entry CAM filter 507 * for perfect filtering. If there's more than 64 multicast addresses, 508 * we use the hash filter insted. 509 */ 510 static void 511 vge_setmulti(struct vge_softc *sc) 512 { 513 struct ifnet *ifp = &sc->arpcom.ac_if; 514 int error = 0; 515 struct ifmultiaddr *ifma; 516 uint32_t h, hashes[2] = { 0, 0 }; 517 518 /* First, zot all the multicast entries. */ 519 vge_cam_clear(sc); 520 CSR_WRITE_4(sc, VGE_MAR0, 0); 521 CSR_WRITE_4(sc, VGE_MAR1, 0); 522 523 /* 524 * If the user wants allmulti or promisc mode, enable reception 525 * of all multicast frames. 526 */ 527 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 528 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 529 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 530 return; 531 } 532 533 /* Now program new ones */ 534 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 535 if (ifma->ifma_addr->sa_family != AF_LINK) 536 continue; 537 error = vge_cam_set(sc, 538 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 539 if (error) 540 break; 541 } 542 543 /* If there were too many addresses, use the hash filter. */ 544 if (error) { 545 vge_cam_clear(sc); 546 547 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 548 if (ifma->ifma_addr->sa_family != AF_LINK) 549 continue; 550 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 551 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 552 if (h < 32) 553 hashes[0] |= (1 << h); 554 else 555 hashes[1] |= (1 << (h - 32)); 556 } 557 558 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 559 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 560 } 561 } 562 563 static void 564 vge_reset(struct vge_softc *sc) 565 { 566 int i; 567 568 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 569 570 for (i = 0; i < VGE_TIMEOUT; i++) { 571 DELAY(5); 572 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 573 break; 574 } 575 576 if (i == VGE_TIMEOUT) { 577 if_printf(&sc->arpcom.ac_if, "soft reset timed out"); 578 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 579 DELAY(2000); 580 } 581 582 DELAY(5000); 583 584 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 585 586 for (i = 0; i < VGE_TIMEOUT; i++) { 587 DELAY(5); 588 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 589 break; 590 } 591 if (i == VGE_TIMEOUT) { 592 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n"); 593 return; 594 } 595 596 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 597 } 598 599 /* 600 * Probe for a VIA gigabit chip. Check the PCI vendor and device 601 * IDs against our list and return a device name if we find a match. 602 */ 603 static int 604 vge_probe(device_t dev) 605 { 606 const struct vge_type *t; 607 uint16_t did, vid; 608 609 did = pci_get_device(dev); 610 vid = pci_get_vendor(dev); 611 for (t = vge_devs; t->vge_name != NULL; ++t) { 612 if (vid == t->vge_vid && did == t->vge_did) { 613 device_set_desc(dev, t->vge_name); 614 return 0; 615 } 616 } 617 return (ENXIO); 618 } 619 620 static void 621 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 622 bus_size_t mapsize, int error) 623 { 624 625 struct vge_dmaload_arg *ctx; 626 struct vge_rx_desc *d = NULL; 627 628 if (error) 629 return; 630 631 ctx = arg; 632 633 /* Signal error to caller if there's too many segments */ 634 if (nseg > ctx->vge_maxsegs) { 635 ctx->vge_maxsegs = 0; 636 return; 637 } 638 639 /* 640 * Map the segment array into descriptors. 641 */ 642 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 643 644 /* If this descriptor is still owned by the chip, bail. */ 645 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 646 if_printf(&ctx->sc->arpcom.ac_if, 647 "tried to map busy descriptor\n"); 648 ctx->vge_maxsegs = 0; 649 return; 650 } 651 652 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 653 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 654 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 655 d->vge_sts = 0; 656 d->vge_ctl = 0; 657 658 ctx->vge_maxsegs = 1; 659 } 660 661 static void 662 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 663 bus_size_t mapsize, int error) 664 { 665 struct vge_dmaload_arg *ctx; 666 struct vge_tx_desc *d = NULL; 667 struct vge_tx_frag *f; 668 int i = 0; 669 670 if (error) 671 return; 672 673 ctx = arg; 674 675 /* Signal error to caller if there's too many segments */ 676 if (nseg > ctx->vge_maxsegs) { 677 ctx->vge_maxsegs = 0; 678 return; 679 } 680 681 /* Map the segment array into descriptors. */ 682 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 683 684 /* If this descriptor is still owned by the chip, bail. */ 685 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 686 ctx->vge_maxsegs = 0; 687 return; 688 } 689 690 for (i = 0; i < nseg; i++) { 691 f = &d->vge_frag[i]; 692 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 693 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 694 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 695 } 696 697 /* Argh. This chip does not autopad short frames */ 698 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 699 f = &d->vge_frag[i]; 700 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 701 ctx->vge_m0->m_pkthdr.len)); 702 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 703 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 704 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 705 i++; 706 } 707 708 /* 709 * When telling the chip how many segments there are, we 710 * must use nsegs + 1 instead of just nsegs. Darned if I 711 * know why. 712 */ 713 i++; 714 715 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 716 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 717 718 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 719 d->vge_ctl |= VGE_TDCTL_JUMBO; 720 721 ctx->vge_maxsegs = nseg; 722 } 723 724 /* 725 * Map a single buffer address. 726 */ 727 728 static void 729 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 730 { 731 if (error) 732 return; 733 734 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 735 *((bus_addr_t *)arg) = segs->ds_addr; 736 } 737 738 static int 739 vge_dma_alloc(device_t dev) 740 { 741 struct vge_softc *sc = device_get_softc(dev); 742 int error, nseg, i, tx_pos = 0, rx_pos = 0; 743 744 /* 745 * Allocate the parent bus DMA tag appropriate for PCI. 746 */ 747 #define VGE_NSEG_NEW 32 748 error = bus_dma_tag_create(NULL, /* parent */ 749 1, 0, /* alignment, boundary */ 750 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 751 BUS_SPACE_MAXADDR, /* highaddr */ 752 NULL, NULL, /* filter, filterarg */ 753 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 754 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 755 BUS_DMA_ALLOCNOW, /* flags */ 756 &sc->vge_parent_tag); 757 if (error) { 758 device_printf(dev, "can't create parent dma tag\n"); 759 return error; 760 } 761 762 /* 763 * Allocate map for RX mbufs. 764 */ 765 nseg = 32; 766 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 767 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 768 NULL, NULL, 769 MCLBYTES * nseg, nseg, MCLBYTES, 770 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag); 771 if (error) { 772 device_printf(dev, "could not allocate mbuf dma tag\n"); 773 return error; 774 } 775 776 /* 777 * Allocate map for TX descriptor list. 778 */ 779 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 780 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 781 NULL, NULL, 782 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 783 BUS_DMA_ALLOCNOW, 784 &sc->vge_ldata.vge_tx_list_tag); 785 if (error) { 786 device_printf(dev, "could not allocate tx list dma tag\n"); 787 return error; 788 } 789 790 /* Allocate DMA'able memory for the TX ring */ 791 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 792 (void **)&sc->vge_ldata.vge_tx_list, 793 BUS_DMA_WAITOK | BUS_DMA_ZERO, 794 &sc->vge_ldata.vge_tx_list_map); 795 if (error) { 796 device_printf(dev, "could not allocate tx list dma memory\n"); 797 return error; 798 } 799 800 /* Load the map for the TX ring. */ 801 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 802 sc->vge_ldata.vge_tx_list_map, 803 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, 804 vge_dma_map_addr, 805 &sc->vge_ldata.vge_tx_list_addr, 806 BUS_DMA_WAITOK); 807 if (error) { 808 device_printf(dev, "could not load tx list\n"); 809 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 810 sc->vge_ldata.vge_tx_list, 811 sc->vge_ldata.vge_tx_list_map); 812 sc->vge_ldata.vge_tx_list = NULL; 813 return error; 814 } 815 816 /* Create DMA maps for TX buffers */ 817 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 818 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 819 &sc->vge_ldata.vge_tx_dmamap[i]); 820 if (error) { 821 device_printf(dev, "can't create DMA map for TX\n"); 822 tx_pos = i; 823 goto map_fail; 824 } 825 } 826 tx_pos = VGE_TX_DESC_CNT; 827 828 /* 829 * Allocate map for RX descriptor list. 830 */ 831 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 832 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 833 NULL, NULL, 834 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 835 BUS_DMA_ALLOCNOW, 836 &sc->vge_ldata.vge_rx_list_tag); 837 if (error) { 838 device_printf(dev, "could not allocate rx list dma tag\n"); 839 return error; 840 } 841 842 /* Allocate DMA'able memory for the RX ring */ 843 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 844 (void **)&sc->vge_ldata.vge_rx_list, 845 BUS_DMA_WAITOK | BUS_DMA_ZERO, 846 &sc->vge_ldata.vge_rx_list_map); 847 if (error) { 848 device_printf(dev, "could not allocate rx list dma memory\n"); 849 return error; 850 } 851 852 /* Load the map for the RX ring. */ 853 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 854 sc->vge_ldata.vge_rx_list_map, 855 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, 856 vge_dma_map_addr, 857 &sc->vge_ldata.vge_rx_list_addr, 858 BUS_DMA_WAITOK); 859 if (error) { 860 device_printf(dev, "could not load rx list\n"); 861 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 862 sc->vge_ldata.vge_rx_list, 863 sc->vge_ldata.vge_rx_list_map); 864 sc->vge_ldata.vge_rx_list = NULL; 865 return error; 866 } 867 868 /* Create DMA maps for RX buffers */ 869 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 870 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 871 &sc->vge_ldata.vge_rx_dmamap[i]); 872 if (error) { 873 device_printf(dev, "can't create DMA map for RX\n"); 874 rx_pos = i; 875 goto map_fail; 876 } 877 } 878 return (0); 879 880 map_fail: 881 for (i = 0; i < tx_pos; ++i) { 882 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 883 sc->vge_ldata.vge_tx_dmamap[i]); 884 } 885 for (i = 0; i < rx_pos; ++i) { 886 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 887 sc->vge_ldata.vge_rx_dmamap[i]); 888 } 889 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 890 sc->vge_ldata.vge_mtag = NULL; 891 892 return error; 893 } 894 895 static void 896 vge_dma_free(struct vge_softc *sc) 897 { 898 /* Unload and free the RX DMA ring memory and map */ 899 if (sc->vge_ldata.vge_rx_list_tag) { 900 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 901 sc->vge_ldata.vge_rx_list_map); 902 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 903 sc->vge_ldata.vge_rx_list, 904 sc->vge_ldata.vge_rx_list_map); 905 } 906 907 if (sc->vge_ldata.vge_rx_list_tag) 908 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 909 910 /* Unload and free the TX DMA ring memory and map */ 911 if (sc->vge_ldata.vge_tx_list_tag) { 912 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 913 sc->vge_ldata.vge_tx_list_map); 914 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 915 sc->vge_ldata.vge_tx_list, 916 sc->vge_ldata.vge_tx_list_map); 917 } 918 919 if (sc->vge_ldata.vge_tx_list_tag) 920 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 921 922 /* Destroy all the RX and TX buffer maps */ 923 if (sc->vge_ldata.vge_mtag) { 924 int i; 925 926 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 927 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 928 sc->vge_ldata.vge_tx_dmamap[i]); 929 } 930 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 931 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 932 sc->vge_ldata.vge_rx_dmamap[i]); 933 } 934 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 935 } 936 937 if (sc->vge_parent_tag) 938 bus_dma_tag_destroy(sc->vge_parent_tag); 939 } 940 941 /* 942 * Attach the interface. Allocate softc structures, do ifmedia 943 * setup and ethernet/BPF attach. 944 */ 945 static int 946 vge_attach(device_t dev) 947 { 948 uint8_t eaddr[ETHER_ADDR_LEN]; 949 struct vge_softc *sc; 950 struct ifnet *ifp; 951 int error = 0; 952 953 sc = device_get_softc(dev); 954 ifp = &sc->arpcom.ac_if; 955 956 /* Initialize if_xname early, so if_printf() can be used */ 957 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 958 959 /* 960 * Map control/status registers. 961 */ 962 pci_enable_busmaster(dev); 963 964 sc->vge_res_rid = VGE_PCI_LOMEM; 965 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 966 &sc->vge_res_rid, RF_ACTIVE); 967 if (sc->vge_res == NULL) { 968 device_printf(dev, "couldn't map ports/memory\n"); 969 return ENXIO; 970 } 971 972 sc->vge_btag = rman_get_bustag(sc->vge_res); 973 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 974 975 /* Allocate interrupt */ 976 sc->vge_irq_rid = 0; 977 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid, 978 RF_SHAREABLE | RF_ACTIVE); 979 if (sc->vge_irq == NULL) { 980 device_printf(dev, "couldn't map interrupt\n"); 981 error = ENXIO; 982 goto fail; 983 } 984 985 /* Reset the adapter. */ 986 vge_reset(sc); 987 988 /* 989 * Get station address from the EEPROM. 990 */ 991 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0); 992 993 /* Allocate DMA related stuffs */ 994 error = vge_dma_alloc(dev); 995 if (error) 996 goto fail; 997 998 /* Do MII setup */ 999 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, 1000 vge_ifmedia_sts); 1001 if (error) { 1002 device_printf(dev, "MII without any phy!\n"); 1003 goto fail; 1004 } 1005 1006 ifp->if_softc = sc; 1007 ifp->if_mtu = ETHERMTU; 1008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1009 ifp->if_init = vge_init; 1010 ifp->if_start = vge_start; 1011 ifp->if_watchdog = vge_watchdog; 1012 ifp->if_ioctl = vge_ioctl; 1013 #ifdef IFPOLL_ENABLE 1014 ifp->if_npoll = vge_npoll; 1015 #endif 1016 ifp->if_hwassist = VGE_CSUM_FEATURES; 1017 ifp->if_capabilities = IFCAP_VLAN_MTU | 1018 IFCAP_HWCSUM | 1019 IFCAP_VLAN_HWTAGGING; 1020 ifp->if_capenable = ifp->if_capabilities; 1021 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN); 1022 ifq_set_ready(&ifp->if_snd); 1023 1024 /* 1025 * Call MI attach routine. 1026 */ 1027 ether_ifattach(ifp, eaddr, NULL); 1028 1029 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq)); 1030 1031 #ifdef IFPOLL_ENABLE 1032 ifpoll_compat_setup(&sc->vge_npoll, NULL, NULL, device_get_unit(dev), 1033 ifp->if_serializer); 1034 #endif 1035 1036 /* Hook interrupt last to avoid having to lock softc */ 1037 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc, 1038 &sc->vge_intrhand, ifp->if_serializer); 1039 if (error) { 1040 device_printf(dev, "couldn't set up irq\n"); 1041 ether_ifdetach(ifp); 1042 goto fail; 1043 } 1044 1045 return 0; 1046 fail: 1047 vge_detach(dev); 1048 return error; 1049 } 1050 1051 /* 1052 * Shutdown hardware and free up resources. This can be called any 1053 * time after the mutex has been initialized. It is called in both 1054 * the error case in attach and the normal detach case so it needs 1055 * to be careful about only freeing resources that have actually been 1056 * allocated. 1057 */ 1058 static int 1059 vge_detach(device_t dev) 1060 { 1061 struct vge_softc *sc = device_get_softc(dev); 1062 struct ifnet *ifp = &sc->arpcom.ac_if; 1063 1064 /* These should only be active if attach succeeded */ 1065 if (device_is_attached(dev)) { 1066 lwkt_serialize_enter(ifp->if_serializer); 1067 1068 vge_stop(sc); 1069 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1070 /* 1071 * Force off the IFF_UP flag here, in case someone 1072 * still had a BPF descriptor attached to this 1073 * interface. If they do, ether_ifattach() will cause 1074 * the BPF code to try and clear the promisc mode 1075 * flag, which will bubble down to vge_ioctl(), 1076 * which will try to call vge_init() again. This will 1077 * turn the NIC back on and restart the MII ticker, 1078 * which will panic the system when the kernel tries 1079 * to invoke the vge_tick() function that isn't there 1080 * anymore. 1081 */ 1082 ifp->if_flags &= ~IFF_UP; 1083 1084 lwkt_serialize_exit(ifp->if_serializer); 1085 1086 ether_ifdetach(ifp); 1087 } 1088 1089 if (sc->vge_miibus) 1090 device_delete_child(dev, sc->vge_miibus); 1091 bus_generic_detach(dev); 1092 1093 if (sc->vge_irq) { 1094 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid, 1095 sc->vge_irq); 1096 } 1097 1098 if (sc->vge_res) { 1099 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid, 1100 sc->vge_res); 1101 } 1102 1103 vge_dma_free(sc); 1104 return (0); 1105 } 1106 1107 static int 1108 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1109 { 1110 struct vge_dmaload_arg arg; 1111 struct mbuf *n = NULL; 1112 int i, error; 1113 1114 if (m == NULL) { 1115 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1116 if (n == NULL) 1117 return (ENOBUFS); 1118 m = n; 1119 } else { 1120 m->m_data = m->m_ext.ext_buf; 1121 } 1122 1123 1124 #ifdef VGE_FIXUP_RX 1125 /* 1126 * This is part of an evil trick to deal with non-x86 platforms. 1127 * The VIA chip requires RX buffers to be aligned on 32-bit 1128 * boundaries, but that will hose non-x86 machines. To get around 1129 * this, we leave some empty space at the start of each buffer 1130 * and for non-x86 hosts, we copy the buffer back two bytes 1131 * to achieve word alignment. This is slightly more efficient 1132 * than allocating a new buffer, copying the contents, and 1133 * discarding the old buffer. 1134 */ 1135 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1136 m_adj(m, VGE_ETHER_ALIGN); 1137 #else 1138 m->m_len = m->m_pkthdr.len = MCLBYTES; 1139 #endif 1140 1141 arg.sc = sc; 1142 arg.vge_idx = idx; 1143 arg.vge_maxsegs = 1; 1144 arg.vge_flags = 0; 1145 1146 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1147 sc->vge_ldata.vge_rx_dmamap[idx], m, 1148 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); 1149 if (error || arg.vge_maxsegs != 1) { 1150 if (n != NULL) 1151 m_freem(n); 1152 return (ENOMEM); 1153 } 1154 1155 /* 1156 * Note: the manual fails to document the fact that for 1157 * proper opration, the driver needs to replentish the RX 1158 * DMA ring 4 descriptors at a time (rather than one at a 1159 * time, like most chips). We can allocate the new buffers 1160 * but we should not set the OWN bits until we're ready 1161 * to hand back 4 of them in one shot. 1162 */ 1163 1164 #define VGE_RXCHUNK 4 1165 sc->vge_rx_consumed++; 1166 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1167 for (i = idx; i != idx - sc->vge_rx_consumed; i--) { 1168 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1169 htole32(VGE_RDSTS_OWN); 1170 } 1171 sc->vge_rx_consumed = 0; 1172 } 1173 1174 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1175 1176 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1177 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); 1178 1179 return (0); 1180 } 1181 1182 static int 1183 vge_tx_list_init(struct vge_softc *sc) 1184 { 1185 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1186 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1187 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1188 1189 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1190 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1191 sc->vge_ldata.vge_tx_prodidx = 0; 1192 sc->vge_ldata.vge_tx_considx = 0; 1193 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1194 1195 return (0); 1196 } 1197 1198 static int 1199 vge_rx_list_init(struct vge_softc *sc) 1200 { 1201 int i; 1202 1203 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1204 bzero(&sc->vge_ldata.vge_rx_mbuf, 1205 VGE_RX_DESC_CNT * sizeof(struct mbuf *)); 1206 1207 sc->vge_rx_consumed = 0; 1208 1209 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1210 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1211 return (ENOBUFS); 1212 } 1213 1214 /* Flush the RX descriptors */ 1215 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1216 sc->vge_ldata.vge_rx_list_map, 1217 BUS_DMASYNC_PREWRITE); 1218 1219 sc->vge_ldata.vge_rx_prodidx = 0; 1220 sc->vge_rx_consumed = 0; 1221 sc->vge_head = sc->vge_tail = NULL; 1222 return (0); 1223 } 1224 1225 #ifdef VGE_FIXUP_RX 1226 static __inline void 1227 vge_fixup_rx(struct mbuf *m) 1228 { 1229 uint16_t *src, *dst; 1230 int i; 1231 1232 src = mtod(m, uint16_t *); 1233 dst = src - 1; 1234 1235 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1236 *dst++ = *src++; 1237 1238 m->m_data -= ETHER_ALIGN; 1239 } 1240 #endif 1241 1242 /* 1243 * RX handler. We support the reception of jumbo frames that have 1244 * been fragmented across multiple 2K mbuf cluster buffers. 1245 */ 1246 static void 1247 vge_rxeof(struct vge_softc *sc, int count) 1248 { 1249 struct ifnet *ifp = &sc->arpcom.ac_if; 1250 struct mbuf *m; 1251 int i, total_len, lim = 0; 1252 struct vge_rx_desc *cur_rx; 1253 uint32_t rxstat, rxctl; 1254 1255 ASSERT_SERIALIZED(ifp->if_serializer); 1256 1257 i = sc->vge_ldata.vge_rx_prodidx; 1258 1259 /* Invalidate the descriptor memory */ 1260 1261 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1262 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); 1263 1264 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1265 #ifdef IFPOLL_ENABLE 1266 if (count >= 0 && count-- == 0) 1267 break; 1268 #endif 1269 1270 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1271 m = sc->vge_ldata.vge_rx_mbuf[i]; 1272 total_len = VGE_RXBYTES(cur_rx); 1273 rxstat = le32toh(cur_rx->vge_sts); 1274 rxctl = le32toh(cur_rx->vge_ctl); 1275 1276 /* Invalidate the RX mbuf and unload its map */ 1277 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1278 sc->vge_ldata.vge_rx_dmamap[i], 1279 BUS_DMASYNC_POSTWRITE); 1280 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1281 sc->vge_ldata.vge_rx_dmamap[i]); 1282 1283 /* 1284 * If the 'start of frame' bit is set, this indicates 1285 * either the first fragment in a multi-fragment receive, 1286 * or an intermediate fragment. Either way, we want to 1287 * accumulate the buffers. 1288 */ 1289 if (rxstat & VGE_RXPKT_SOF) { 1290 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1291 if (sc->vge_head == NULL) { 1292 sc->vge_head = sc->vge_tail = m; 1293 } else { 1294 m->m_flags &= ~M_PKTHDR; 1295 sc->vge_tail->m_next = m; 1296 sc->vge_tail = m; 1297 } 1298 vge_newbuf(sc, i, NULL); 1299 VGE_RX_DESC_INC(i); 1300 continue; 1301 } 1302 1303 /* 1304 * Bad/error frames will have the RXOK bit cleared. 1305 * However, there's one error case we want to allow: 1306 * if a VLAN tagged frame arrives and the chip can't 1307 * match it against the CAM filter, it considers this 1308 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1309 * We don't want to drop the frame though: our VLAN 1310 * filtering is done in software. 1311 */ 1312 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && 1313 !(rxstat & VGE_RDSTS_CSUMERR)) { 1314 IFNET_STAT_INC(ifp, ierrors, 1); 1315 /* 1316 * If this is part of a multi-fragment packet, 1317 * discard all the pieces. 1318 */ 1319 if (sc->vge_head != NULL) { 1320 m_freem(sc->vge_head); 1321 sc->vge_head = sc->vge_tail = NULL; 1322 } 1323 vge_newbuf(sc, i, m); 1324 VGE_RX_DESC_INC(i); 1325 continue; 1326 } 1327 1328 /* 1329 * If allocating a replacement mbuf fails, 1330 * reload the current one. 1331 */ 1332 if (vge_newbuf(sc, i, NULL)) { 1333 IFNET_STAT_INC(ifp, ierrors, 1); 1334 if (sc->vge_head != NULL) { 1335 m_freem(sc->vge_head); 1336 sc->vge_head = sc->vge_tail = NULL; 1337 } 1338 vge_newbuf(sc, i, m); 1339 VGE_RX_DESC_INC(i); 1340 continue; 1341 } 1342 1343 VGE_RX_DESC_INC(i); 1344 1345 if (sc->vge_head != NULL) { 1346 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1347 /* 1348 * Special case: if there's 4 bytes or less 1349 * in this buffer, the mbuf can be discarded: 1350 * the last 4 bytes is the CRC, which we don't 1351 * care about anyway. 1352 */ 1353 if (m->m_len <= ETHER_CRC_LEN) { 1354 sc->vge_tail->m_len -= 1355 (ETHER_CRC_LEN - m->m_len); 1356 m_freem(m); 1357 } else { 1358 m->m_len -= ETHER_CRC_LEN; 1359 m->m_flags &= ~M_PKTHDR; 1360 sc->vge_tail->m_next = m; 1361 } 1362 m = sc->vge_head; 1363 sc->vge_head = sc->vge_tail = NULL; 1364 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1365 } else { 1366 m->m_pkthdr.len = m->m_len = 1367 (total_len - ETHER_CRC_LEN); 1368 } 1369 1370 #ifdef VGE_FIXUP_RX 1371 vge_fixup_rx(m); 1372 #endif 1373 IFNET_STAT_INC(ifp, ipackets, 1); 1374 m->m_pkthdr.rcvif = ifp; 1375 1376 /* Do RX checksumming if enabled */ 1377 if (ifp->if_capenable & IFCAP_RXCSUM) { 1378 /* Check IP header checksum */ 1379 if (rxctl & VGE_RDCTL_IPPKT) 1380 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1381 if (rxctl & VGE_RDCTL_IPCSUMOK) 1382 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1383 1384 /* Check TCP/UDP checksum */ 1385 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1386 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1387 m->m_pkthdr.csum_flags |= 1388 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1389 CSUM_FRAG_NOT_CHECKED; 1390 m->m_pkthdr.csum_data = 0xffff; 1391 } 1392 } 1393 1394 if (rxstat & VGE_RDSTS_VTAG) { 1395 m->m_flags |= M_VLANTAG; 1396 m->m_pkthdr.ether_vlantag = 1397 ntohs((rxctl & VGE_RDCTL_VLANID)); 1398 } 1399 ifp->if_input(ifp, m, NULL, -1); 1400 1401 lim++; 1402 if (lim == VGE_RX_DESC_CNT) 1403 break; 1404 } 1405 1406 /* Flush the RX DMA ring */ 1407 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1408 sc->vge_ldata.vge_rx_list_map, 1409 BUS_DMASYNC_PREWRITE); 1410 1411 sc->vge_ldata.vge_rx_prodidx = i; 1412 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1413 } 1414 1415 static void 1416 vge_txeof(struct vge_softc *sc) 1417 { 1418 struct ifnet *ifp = &sc->arpcom.ac_if; 1419 uint32_t txstat; 1420 int idx; 1421 1422 idx = sc->vge_ldata.vge_tx_considx; 1423 1424 /* Invalidate the TX descriptor list */ 1425 1426 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1427 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); 1428 1429 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1430 1431 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1432 if (txstat & VGE_TDSTS_OWN) 1433 break; 1434 1435 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1436 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1437 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1438 sc->vge_ldata.vge_tx_dmamap[idx]); 1439 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1440 IFNET_STAT_INC(ifp, collisions, 1); 1441 if (txstat & VGE_TDSTS_TXERR) 1442 IFNET_STAT_INC(ifp, oerrors, 1); 1443 else 1444 IFNET_STAT_INC(ifp, opackets, 1); 1445 1446 sc->vge_ldata.vge_tx_free++; 1447 VGE_TX_DESC_INC(idx); 1448 } 1449 1450 /* No changes made to the TX ring, so no flush needed */ 1451 if (idx != sc->vge_ldata.vge_tx_considx) { 1452 sc->vge_ldata.vge_tx_considx = idx; 1453 ifq_clr_oactive(&ifp->if_snd); 1454 ifp->if_timer = 0; 1455 } 1456 1457 /* 1458 * If not all descriptors have been released reaped yet, 1459 * reload the timer so that we will eventually get another 1460 * interrupt that will cause us to re-enter this routine. 1461 * This is done in case the transmitter has gone idle. 1462 */ 1463 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1464 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1465 } 1466 1467 static void 1468 vge_tick(struct vge_softc *sc) 1469 { 1470 struct ifnet *ifp = &sc->arpcom.ac_if; 1471 struct mii_data *mii; 1472 1473 mii = device_get_softc(sc->vge_miibus); 1474 1475 mii_tick(mii); 1476 if (sc->vge_link) { 1477 if (!(mii->mii_media_status & IFM_ACTIVE)) 1478 sc->vge_link = 0; 1479 } else { 1480 if (mii->mii_media_status & IFM_ACTIVE && 1481 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1482 sc->vge_link = 1; 1483 if (!ifq_is_empty(&ifp->if_snd)) 1484 if_devstart(ifp); 1485 } 1486 } 1487 } 1488 1489 #ifdef IFPOLL_ENABLE 1490 1491 static void 1492 vge_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1493 { 1494 struct vge_softc *sc = ifp->if_softc; 1495 1496 ASSERT_SERIALIZED(ifp->if_serializer); 1497 1498 vge_rxeof(sc, count); 1499 vge_txeof(sc); 1500 1501 if (!ifq_is_empty(&ifp->if_snd)) 1502 if_devstart(ifp); 1503 1504 /* XXX copy & paste from vge_intr */ 1505 if (sc->vge_npoll.ifpc_stcount-- == 0) { 1506 uint32_t status; 1507 1508 sc->vge_npoll.ifpc_stcount = sc->vge_npoll.ifpc_stfrac; 1509 1510 status = CSR_READ_4(sc, VGE_ISR); 1511 if (status == 0xffffffff) 1512 return; 1513 1514 if (status) 1515 CSR_WRITE_4(sc, VGE_ISR, status); 1516 1517 if (status & (VGE_ISR_TXDMA_STALL | 1518 VGE_ISR_RXDMA_STALL)) 1519 vge_init(sc); 1520 1521 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1522 IFNET_STAT_INC(ifp, ierrors, 1); 1523 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1524 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1525 } 1526 } 1527 } 1528 1529 static void 1530 vge_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1531 { 1532 struct vge_softc *sc = ifp->if_softc; 1533 1534 ASSERT_SERIALIZED(ifp->if_serializer); 1535 1536 if (info != NULL) { 1537 int cpuid = sc->vge_npoll.ifpc_cpuid; 1538 1539 info->ifpi_rx[cpuid].poll_func = vge_npoll_compat; 1540 info->ifpi_rx[cpuid].arg = NULL; 1541 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1542 1543 if (ifp->if_flags & IFF_RUNNING) 1544 vge_disable_intr(sc); 1545 ifq_set_cpuid(&ifp->if_snd, cpuid); 1546 } else { 1547 if (ifp->if_flags & IFF_RUNNING) 1548 vge_enable_intr(sc, 0xffffffff); 1549 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq)); 1550 } 1551 } 1552 1553 #endif /* IFPOLL_ENABLE */ 1554 1555 static void 1556 vge_intr(void *arg) 1557 { 1558 struct vge_softc *sc = arg; 1559 struct ifnet *ifp = &sc->arpcom.ac_if; 1560 uint32_t status; 1561 1562 if (sc->suspended || !(ifp->if_flags & IFF_UP)) 1563 return; 1564 1565 /* Disable interrupts */ 1566 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1567 1568 for (;;) { 1569 status = CSR_READ_4(sc, VGE_ISR); 1570 /* If the card has gone away the read returns 0xffff. */ 1571 if (status == 0xFFFFFFFF) 1572 break; 1573 1574 if (status) 1575 CSR_WRITE_4(sc, VGE_ISR, status); 1576 1577 if ((status & VGE_INTRS) == 0) 1578 break; 1579 1580 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1581 vge_rxeof(sc, -1); 1582 1583 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1584 vge_rxeof(sc, -1); 1585 IFNET_STAT_INC(ifp, ierrors, 1); 1586 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1587 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1588 } 1589 1590 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1591 vge_txeof(sc); 1592 1593 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1594 vge_init(sc); 1595 1596 if (status & VGE_ISR_LINKSTS) 1597 vge_tick(sc); 1598 } 1599 1600 /* Re-enable interrupts */ 1601 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1602 1603 if (!ifq_is_empty(&ifp->if_snd)) 1604 if_devstart(ifp); 1605 } 1606 1607 static int 1608 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1609 { 1610 struct vge_dmaload_arg arg; 1611 bus_dmamap_t map; 1612 int error; 1613 1614 arg.vge_flags = 0; 1615 1616 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1617 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1618 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1619 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1620 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1621 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1622 1623 arg.sc = sc; 1624 arg.vge_idx = idx; 1625 arg.vge_m0 = m_head; 1626 arg.vge_maxsegs = VGE_TX_FRAGS; 1627 1628 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1629 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, 1630 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1631 if (error && error != EFBIG) { 1632 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n", 1633 error); 1634 goto fail; 1635 } 1636 1637 /* Too many segments to map, coalesce into a single mbuf */ 1638 if (error || arg.vge_maxsegs == 0) { 1639 struct mbuf *m_new; 1640 1641 m_new = m_defrag(m_head, MB_DONTWAIT); 1642 if (m_new == NULL) { 1643 error = ENOBUFS; 1644 goto fail; 1645 } else { 1646 m_head = m_new; 1647 } 1648 1649 arg.sc = sc; 1650 arg.vge_m0 = m_head; 1651 arg.vge_idx = idx; 1652 arg.vge_maxsegs = 1; 1653 1654 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1655 m_head, vge_dma_map_tx_desc, &arg, 1656 BUS_DMA_NOWAIT); 1657 if (error) { 1658 if_printf(&sc->arpcom.ac_if, 1659 "can't map mbuf (error %d)\n", error); 1660 goto fail; 1661 } 1662 } 1663 1664 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1665 sc->vge_ldata.vge_tx_free--; 1666 1667 /* 1668 * Set up hardware VLAN tagging. 1669 */ 1670 if (m_head->m_flags & M_VLANTAG) { 1671 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1672 htole32(htons(m_head->m_pkthdr.ether_vlantag) | 1673 VGE_TDCTL_VTAG); 1674 } 1675 1676 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1677 return (0); 1678 1679 fail: 1680 m_freem(m_head); 1681 return error; 1682 } 1683 1684 /* 1685 * Main transmit routine. 1686 */ 1687 1688 static void 1689 vge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1690 { 1691 struct vge_softc *sc = ifp->if_softc; 1692 struct mbuf *m_head = NULL; 1693 int idx, pidx = 0; 1694 1695 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1696 ASSERT_SERIALIZED(ifp->if_serializer); 1697 1698 if (!sc->vge_link) { 1699 ifq_purge(&ifp->if_snd); 1700 return; 1701 } 1702 1703 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1704 return; 1705 1706 idx = sc->vge_ldata.vge_tx_prodidx; 1707 1708 pidx = idx - 1; 1709 if (pidx < 0) 1710 pidx = VGE_TX_DESC_CNT - 1; 1711 1712 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1713 if (sc->vge_ldata.vge_tx_free <= 2) { 1714 ifq_set_oactive(&ifp->if_snd); 1715 break; 1716 } 1717 1718 m_head = ifq_dequeue(&ifp->if_snd); 1719 if (m_head == NULL) 1720 break; 1721 1722 if (vge_encap(sc, m_head, idx)) { 1723 /* If vge_encap() failed, it will free m_head for us */ 1724 ifq_set_oactive(&ifp->if_snd); 1725 break; 1726 } 1727 1728 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1729 htole16(VGE_TXDESC_Q); 1730 1731 pidx = idx; 1732 VGE_TX_DESC_INC(idx); 1733 1734 /* 1735 * If there's a BPF listener, bounce a copy of this frame 1736 * to him. 1737 */ 1738 ETHER_BPF_MTAP(ifp, m_head); 1739 } 1740 1741 if (idx == sc->vge_ldata.vge_tx_prodidx) 1742 return; 1743 1744 /* Flush the TX descriptors */ 1745 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1746 sc->vge_ldata.vge_tx_list_map, 1747 BUS_DMASYNC_PREWRITE); 1748 1749 /* Issue a transmit command. */ 1750 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1751 1752 sc->vge_ldata.vge_tx_prodidx = idx; 1753 1754 /* 1755 * Use the countdown timer for interrupt moderation. 1756 * 'TX done' interrupts are disabled. Instead, we reset the 1757 * countdown timer, which will begin counting until it hits 1758 * the value in the SSTIMER register, and then trigger an 1759 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1760 * the timer count is reloaded. Only when the transmitter 1761 * is idle will the timer hit 0 and an interrupt fire. 1762 */ 1763 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1764 1765 /* 1766 * Set a timeout in case the chip goes out to lunch. 1767 */ 1768 ifp->if_timer = 5; 1769 } 1770 1771 static void 1772 vge_init(void *xsc) 1773 { 1774 struct vge_softc *sc = xsc; 1775 struct ifnet *ifp = &sc->arpcom.ac_if; 1776 struct mii_data *mii; 1777 int i; 1778 1779 ASSERT_SERIALIZED(ifp->if_serializer); 1780 1781 mii = device_get_softc(sc->vge_miibus); 1782 1783 /* 1784 * Cancel pending I/O and free all RX/TX buffers. 1785 */ 1786 vge_stop(sc); 1787 vge_reset(sc); 1788 1789 /* 1790 * Initialize the RX and TX descriptors and mbufs. 1791 */ 1792 vge_rx_list_init(sc); 1793 vge_tx_list_init(sc); 1794 1795 /* Set our station address */ 1796 for (i = 0; i < ETHER_ADDR_LEN; i++) 1797 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]); 1798 1799 /* 1800 * Set receive FIFO threshold. Also allow transmission and 1801 * reception of VLAN tagged frames. 1802 */ 1803 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1804 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1805 1806 /* Set DMA burst length */ 1807 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1808 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1809 1810 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1811 1812 /* Set collision backoff algorithm */ 1813 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1814 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1815 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1816 1817 /* Disable LPSEL field in priority resolution */ 1818 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1819 1820 /* 1821 * Load the addresses of the DMA queues into the chip. 1822 * Note that we only use one transmit queue. 1823 */ 1824 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1825 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1826 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1827 1828 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1829 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1830 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1831 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1832 1833 /* Enable and wake up the RX descriptor queue */ 1834 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1835 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1836 1837 /* Enable the TX descriptor queue */ 1838 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1839 1840 /* Set up the receive filter -- allow large frames for VLANs. */ 1841 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1842 1843 /* If we want promiscuous mode, set the allframes bit. */ 1844 if (ifp->if_flags & IFF_PROMISC) 1845 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1846 1847 /* Set capture broadcast bit to capture broadcast frames. */ 1848 if (ifp->if_flags & IFF_BROADCAST) 1849 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1850 1851 /* Set multicast bit to capture multicast frames. */ 1852 if (ifp->if_flags & IFF_MULTICAST) 1853 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1854 1855 /* Init the cam filter. */ 1856 vge_cam_clear(sc); 1857 1858 /* Init the multicast filter. */ 1859 vge_setmulti(sc); 1860 1861 /* Enable flow control */ 1862 1863 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1864 1865 /* Enable jumbo frame reception (if desired) */ 1866 1867 /* Start the MAC. */ 1868 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1869 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1870 CSR_WRITE_1(sc, VGE_CRS0, 1871 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1872 1873 /* 1874 * Configure one-shot timer for microsecond 1875 * resulution and load it for 500 usecs. 1876 */ 1877 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1878 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1879 1880 /* 1881 * Configure interrupt moderation for receive. Enable 1882 * the holdoff counter and load it, and set the RX 1883 * suppression count to the number of descriptors we 1884 * want to allow before triggering an interrupt. 1885 * The holdoff timer is in units of 20 usecs. 1886 */ 1887 1888 #ifdef notyet 1889 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1890 /* Select the interrupt holdoff timer page. */ 1891 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1892 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1893 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1894 1895 /* Enable use of the holdoff timer. */ 1896 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1897 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1898 1899 /* Select the RX suppression threshold page. */ 1900 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1901 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1902 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1903 1904 /* Restore the page select bits. */ 1905 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1906 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1907 #endif 1908 1909 #ifdef IFPOLL_ENABLE 1910 /* Disable intr if polling(4) is enabled */ 1911 if (ifp->if_flags & IFF_NPOLLING) 1912 vge_disable_intr(sc); 1913 else 1914 #endif 1915 vge_enable_intr(sc, 0); 1916 1917 mii_mediachg(mii); 1918 1919 ifp->if_flags |= IFF_RUNNING; 1920 ifq_clr_oactive(&ifp->if_snd); 1921 1922 sc->vge_if_flags = 0; 1923 sc->vge_link = 0; 1924 } 1925 1926 /* 1927 * Set media options. 1928 */ 1929 static int 1930 vge_ifmedia_upd(struct ifnet *ifp) 1931 { 1932 struct vge_softc *sc = ifp->if_softc; 1933 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1934 1935 mii_mediachg(mii); 1936 1937 return (0); 1938 } 1939 1940 /* 1941 * Report current media status. 1942 */ 1943 static void 1944 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1945 { 1946 struct vge_softc *sc = ifp->if_softc; 1947 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1948 1949 mii_pollstat(mii); 1950 ifmr->ifm_active = mii->mii_media_active; 1951 ifmr->ifm_status = mii->mii_media_status; 1952 } 1953 1954 static void 1955 vge_miibus_statchg(device_t dev) 1956 { 1957 struct vge_softc *sc; 1958 struct mii_data *mii; 1959 struct ifmedia_entry *ife; 1960 1961 sc = device_get_softc(dev); 1962 mii = device_get_softc(sc->vge_miibus); 1963 ife = mii->mii_media.ifm_cur; 1964 1965 /* 1966 * If the user manually selects a media mode, we need to turn 1967 * on the forced MAC mode bit in the DIAGCTL register. If the 1968 * user happens to choose a full duplex mode, we also need to 1969 * set the 'force full duplex' bit. This applies only to 1970 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1971 * mode is disabled, and in 1000baseT mode, full duplex is 1972 * always implied, so we turn on the forced mode bit but leave 1973 * the FDX bit cleared. 1974 */ 1975 1976 switch (IFM_SUBTYPE(ife->ifm_media)) { 1977 case IFM_AUTO: 1978 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1979 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1980 break; 1981 case IFM_1000_T: 1982 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1983 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1984 break; 1985 case IFM_100_TX: 1986 case IFM_10_T: 1987 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1988 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1989 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1990 else 1991 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1992 break; 1993 default: 1994 device_printf(dev, "unknown media type: %x\n", 1995 IFM_SUBTYPE(ife->ifm_media)); 1996 break; 1997 } 1998 } 1999 2000 static int 2001 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2002 { 2003 struct vge_softc *sc = ifp->if_softc; 2004 struct ifreq *ifr = (struct ifreq *)data; 2005 struct mii_data *mii; 2006 int error = 0; 2007 2008 switch (command) { 2009 case SIOCSIFMTU: 2010 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2011 error = EINVAL; 2012 ifp->if_mtu = ifr->ifr_mtu; 2013 break; 2014 case SIOCSIFFLAGS: 2015 if (ifp->if_flags & IFF_UP) { 2016 if ((ifp->if_flags & IFF_RUNNING) && 2017 (ifp->if_flags & IFF_PROMISC) && 2018 !(sc->vge_if_flags & IFF_PROMISC)) { 2019 CSR_SETBIT_1(sc, VGE_RXCTL, 2020 VGE_RXCTL_RX_PROMISC); 2021 vge_setmulti(sc); 2022 } else if ((ifp->if_flags & IFF_RUNNING) && 2023 !(ifp->if_flags & IFF_PROMISC) && 2024 (sc->vge_if_flags & IFF_PROMISC)) { 2025 CSR_CLRBIT_1(sc, VGE_RXCTL, 2026 VGE_RXCTL_RX_PROMISC); 2027 vge_setmulti(sc); 2028 } else { 2029 vge_init(sc); 2030 } 2031 } else { 2032 if (ifp->if_flags & IFF_RUNNING) 2033 vge_stop(sc); 2034 } 2035 sc->vge_if_flags = ifp->if_flags; 2036 break; 2037 case SIOCADDMULTI: 2038 case SIOCDELMULTI: 2039 vge_setmulti(sc); 2040 break; 2041 case SIOCGIFMEDIA: 2042 case SIOCSIFMEDIA: 2043 mii = device_get_softc(sc->vge_miibus); 2044 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2045 break; 2046 case SIOCSIFCAP: 2047 { 2048 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2049 2050 if (mask & IFCAP_HWCSUM) { 2051 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM); 2052 if (ifp->if_capenable & IFCAP_TXCSUM) 2053 ifp->if_hwassist = VGE_CSUM_FEATURES; 2054 else 2055 ifp->if_hwassist = 0; 2056 if (ifp->if_flags & IFF_RUNNING) 2057 vge_init(sc); 2058 } 2059 } 2060 break; 2061 default: 2062 error = ether_ioctl(ifp, command, data); 2063 break; 2064 } 2065 return (error); 2066 } 2067 2068 static void 2069 vge_watchdog(struct ifnet *ifp) 2070 { 2071 struct vge_softc *sc = ifp->if_softc; 2072 2073 if_printf(ifp, "watchdog timeout\n"); 2074 IFNET_STAT_INC(ifp, oerrors, 1); 2075 2076 vge_txeof(sc); 2077 vge_rxeof(sc, -1); 2078 2079 vge_init(sc); 2080 } 2081 2082 /* 2083 * Stop the adapter and free any mbufs allocated to the 2084 * RX and TX lists. 2085 */ 2086 static void 2087 vge_stop(struct vge_softc *sc) 2088 { 2089 struct ifnet *ifp = &sc->arpcom.ac_if; 2090 int i; 2091 2092 ASSERT_SERIALIZED(ifp->if_serializer); 2093 2094 ifp->if_timer = 0; 2095 2096 ifp->if_flags &= ~IFF_RUNNING; 2097 ifq_clr_oactive(&ifp->if_snd); 2098 2099 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2100 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2101 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2102 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2103 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2104 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2105 2106 if (sc->vge_head != NULL) { 2107 m_freem(sc->vge_head); 2108 sc->vge_head = sc->vge_tail = NULL; 2109 } 2110 2111 /* Free the TX list buffers. */ 2112 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2113 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2114 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2115 sc->vge_ldata.vge_tx_dmamap[i]); 2116 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2117 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2118 } 2119 } 2120 2121 /* Free the RX list buffers. */ 2122 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2123 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2124 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2125 sc->vge_ldata.vge_rx_dmamap[i]); 2126 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2127 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2128 } 2129 } 2130 } 2131 2132 /* 2133 * Device suspend routine. Stop the interface and save some PCI 2134 * settings in case the BIOS doesn't restore them properly on 2135 * resume. 2136 */ 2137 static int 2138 vge_suspend(device_t dev) 2139 { 2140 struct vge_softc *sc = device_get_softc(dev); 2141 struct ifnet *ifp = &sc->arpcom.ac_if; 2142 2143 lwkt_serialize_enter(ifp->if_serializer); 2144 vge_stop(sc); 2145 sc->suspended = 1; 2146 lwkt_serialize_exit(ifp->if_serializer); 2147 2148 return (0); 2149 } 2150 2151 /* 2152 * Device resume routine. Restore some PCI settings in case the BIOS 2153 * doesn't, re-enable busmastering, and restart the interface if 2154 * appropriate. 2155 */ 2156 static int 2157 vge_resume(device_t dev) 2158 { 2159 struct vge_softc *sc = device_get_softc(dev); 2160 struct ifnet *ifp = &sc->arpcom.ac_if; 2161 2162 /* reenable busmastering */ 2163 pci_enable_busmaster(dev); 2164 pci_enable_io(dev, SYS_RES_MEMORY); 2165 2166 lwkt_serialize_enter(ifp->if_serializer); 2167 /* reinitialize interface if necessary */ 2168 if (ifp->if_flags & IFF_UP) 2169 vge_init(sc); 2170 2171 sc->suspended = 0; 2172 lwkt_serialize_exit(ifp->if_serializer); 2173 2174 return (0); 2175 } 2176 2177 /* 2178 * Stop all chip I/O so that the kernel's probe routines don't 2179 * get confused by errant DMAs when rebooting. 2180 */ 2181 static void 2182 vge_shutdown(device_t dev) 2183 { 2184 struct vge_softc *sc = device_get_softc(dev); 2185 struct ifnet *ifp = &sc->arpcom.ac_if; 2186 2187 lwkt_serialize_enter(ifp->if_serializer); 2188 vge_stop(sc); 2189 lwkt_serialize_exit(ifp->if_serializer); 2190 } 2191 2192 static void 2193 vge_enable_intr(struct vge_softc *sc, uint32_t isr) 2194 { 2195 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2196 CSR_WRITE_4(sc, VGE_ISR, isr); 2197 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2198 } 2199 2200 #ifdef IFPOLL_ENABLE 2201 2202 static void 2203 vge_disable_intr(struct vge_softc *sc) 2204 { 2205 CSR_WRITE_4(sc, VGE_IMR, 0); 2206 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2207 sc->vge_npoll.ifpc_stcount = 0; 2208 } 2209 2210 #endif /* IFPOLL_ENABLE */ 2211