1 /* $NetBSD: if_vge.c,v 1.51 2010/04/05 07:20:27 joerg Exp $ */ 2 3 /*- 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.51 2010/04/05 07:20:27 joerg Exp $"); 39 40 /* 41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 42 * 43 * Written by Bill Paul <wpaul@windriver.com> 44 * Senior Networking Software Engineer 45 * Wind River Systems 46 */ 47 48 /* 49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that 50 * combines a tri-speed ethernet MAC and PHY, with the following 51 * features: 52 * 53 * o Jumbo frame support up to 16K 54 * o Transmit and receive flow control 55 * o IPv4 checksum offload 56 * o VLAN tag insertion and stripping 57 * o TCP large send 58 * o 64-bit multicast hash table filter 59 * o 64 entry CAM filter 60 * o 16K RX FIFO and 48K TX FIFO memory 61 * o Interrupt moderation 62 * 63 * The VT6122 supports up to four transmit DMA queues. The descriptors 64 * in the transmit ring can address up to 7 data fragments; frames which 65 * span more than 7 data buffers must be coalesced, but in general the 66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 67 * long. The receive descriptors address only a single buffer. 68 * 69 * There are two peculiar design issues with the VT6122. One is that 70 * receive data buffers must be aligned on a 32-bit boundary. This is 71 * not a problem where the VT6122 is used as a LOM device in x86-based 72 * systems, but on architectures that generate unaligned access traps, we 73 * have to do some copying. 74 * 75 * The other issue has to do with the way 64-bit addresses are handled. 76 * The DMA descriptors only allow you to specify 48 bits of addressing 77 * information. The remaining 16 bits are specified using one of the 78 * I/O registers. If you only have a 32-bit system, then this isn't 79 * an issue, but if you have a 64-bit system and more than 4GB of 80 * memory, you must have to make sure your network data buffers reside 81 * in the same 48-bit 'segment.' 82 * 83 * Special thanks to Ryan Fu at VIA Networking for providing documentation 84 * and sample NICs for testing. 85 */ 86 87 88 #include <sys/param.h> 89 #include <sys/endian.h> 90 #include <sys/systm.h> 91 #include <sys/device.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/kernel.h> 96 #include <sys/socket.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/if_ether.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 104 #include <net/bpf.h> 105 106 #include <sys/bus.h> 107 108 #include <dev/mii/mii.h> 109 #include <dev/mii/miivar.h> 110 111 #include <dev/pci/pcireg.h> 112 #include <dev/pci/pcivar.h> 113 #include <dev/pci/pcidevs.h> 114 115 #include <dev/pci/if_vgereg.h> 116 117 #define VGE_IFQ_MAXLEN 64 118 119 #define VGE_RING_ALIGN 256 120 121 #define VGE_NTXDESC 256 122 #define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) 123 #define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) 124 #define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) 125 126 #define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ 127 #define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) 128 #define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) 129 #define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) 130 131 #define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) 132 #define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) 133 #define VGE_BUFLEN(y) ((y) & 0x7FFF) 134 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 135 136 #define VGE_POWER_MANAGEMENT 0 /* disabled for now */ 137 138 /* 139 * Mbuf adjust factor to force 32-bit alignment of IP header. 140 * Drivers should pad ETHER_ALIGN bytes when setting up a 141 * RX mbuf so the upper layers get the IP header properly aligned 142 * past the 14-byte Ethernet header. 143 * 144 * See also comment in vge_encap(). 145 */ 146 #define ETHER_ALIGN 2 147 148 #ifdef __NO_STRICT_ALIGNMENT 149 #define VGE_RX_BUFSIZE MCLBYTES 150 #else 151 #define VGE_RX_PAD sizeof(uint32_t) 152 #define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) 153 #endif 154 155 /* 156 * Control structures are DMA'd to the vge chip. We allocate them in 157 * a single clump that maps to a single DMA segment to make several things 158 * easier. 159 */ 160 struct vge_control_data { 161 /* TX descriptors */ 162 struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; 163 /* RX descriptors */ 164 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; 165 /* dummy data for TX padding */ 166 uint8_t vcd_pad[ETHER_PAD_LEN]; 167 }; 168 169 #define VGE_CDOFF(x) offsetof(struct vge_control_data, x) 170 #define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) 171 #define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) 172 #define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) 173 174 /* 175 * Software state for TX jobs. 176 */ 177 struct vge_txsoft { 178 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 179 bus_dmamap_t txs_dmamap; /* our DMA map */ 180 }; 181 182 /* 183 * Software state for RX jobs. 184 */ 185 struct vge_rxsoft { 186 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 187 bus_dmamap_t rxs_dmamap; /* our DMA map */ 188 }; 189 190 191 struct vge_softc { 192 device_t sc_dev; 193 194 bus_space_tag_t sc_bst; /* bus space tag */ 195 bus_space_handle_t sc_bsh; /* bus space handle */ 196 bus_dma_tag_t sc_dmat; 197 198 struct ethercom sc_ethercom; /* interface info */ 199 uint8_t sc_eaddr[ETHER_ADDR_LEN]; 200 201 void *sc_intrhand; 202 struct mii_data sc_mii; 203 uint8_t sc_type; 204 int sc_if_flags; 205 int sc_link; 206 int sc_camidx; 207 callout_t sc_timeout; 208 209 bus_dmamap_t sc_cddmamap; 210 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 211 212 struct vge_txsoft sc_txsoft[VGE_NTXDESC]; 213 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; 214 struct vge_control_data *sc_control_data; 215 #define sc_txdescs sc_control_data->vcd_txdescs 216 #define sc_rxdescs sc_control_data->vcd_rxdescs 217 218 int sc_tx_prodidx; 219 int sc_tx_considx; 220 int sc_tx_free; 221 222 struct mbuf *sc_rx_mhead; 223 struct mbuf *sc_rx_mtail; 224 int sc_rx_prodidx; 225 int sc_rx_consumed; 226 227 int sc_suspended; /* 0 = normal 1 = suspended */ 228 uint32_t sc_saved_maps[5]; /* pci data */ 229 uint32_t sc_saved_biosaddr; 230 uint8_t sc_saved_intline; 231 uint8_t sc_saved_cachelnsz; 232 uint8_t sc_saved_lattimer; 233 }; 234 235 #define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) 236 #define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) 237 #define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) 238 239 #define VGE_TXDESCSYNC(sc, idx, ops) \ 240 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ 241 VGE_CDTXOFF(idx), \ 242 offsetof(struct vge_txdesc, td_frag[0]), \ 243 (ops)) 244 #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ 245 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 246 VGE_CDTXOFF(idx) + \ 247 offsetof(struct vge_txdesc, td_frag[0]), \ 248 sizeof(struct vge_txfrag) * (nsegs), \ 249 (ops)) 250 #define VGE_RXDESCSYNC(sc, idx, ops) \ 251 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 252 VGE_CDRXOFF(idx), \ 253 sizeof(struct vge_rxdesc), \ 254 (ops)) 255 256 /* 257 * register space access macros 258 */ 259 #define CSR_WRITE_4(sc, reg, val) \ 260 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 261 #define CSR_WRITE_2(sc, reg, val) \ 262 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 263 #define CSR_WRITE_1(sc, reg, val) \ 264 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 265 266 #define CSR_READ_4(sc, reg) \ 267 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 268 #define CSR_READ_2(sc, reg) \ 269 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) 270 #define CSR_READ_1(sc, reg) \ 271 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) 272 273 #define CSR_SETBIT_1(sc, reg, x) \ 274 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) 275 #define CSR_SETBIT_2(sc, reg, x) \ 276 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) 277 #define CSR_SETBIT_4(sc, reg, x) \ 278 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) 279 280 #define CSR_CLRBIT_1(sc, reg, x) \ 281 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) 282 #define CSR_CLRBIT_2(sc, reg, x) \ 283 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) 284 #define CSR_CLRBIT_4(sc, reg, x) \ 285 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) 286 287 #define VGE_TIMEOUT 10000 288 289 #define VGE_PCI_LOIO 0x10 290 #define VGE_PCI_LOMEM 0x14 291 292 static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); 293 static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); 294 295 static int vge_ifflags_cb(struct ethercom *); 296 297 static int vge_match(device_t, cfdata_t, void *); 298 static void vge_attach(device_t, device_t, void *); 299 300 static int vge_encap(struct vge_softc *, struct mbuf *, int); 301 302 static int vge_allocmem(struct vge_softc *); 303 static int vge_newbuf(struct vge_softc *, int, struct mbuf *); 304 #ifndef __NO_STRICT_ALIGNMENT 305 static inline void vge_fixup_rx(struct mbuf *); 306 #endif 307 static void vge_rxeof(struct vge_softc *); 308 static void vge_txeof(struct vge_softc *); 309 static int vge_intr(void *); 310 static void vge_tick(void *); 311 static void vge_start(struct ifnet *); 312 static int vge_ioctl(struct ifnet *, u_long, void *); 313 static int vge_init(struct ifnet *); 314 static void vge_stop(struct ifnet *, int); 315 static void vge_watchdog(struct ifnet *); 316 #if VGE_POWER_MANAGEMENT 317 static int vge_suspend(device_t); 318 static int vge_resume(device_t); 319 #endif 320 static bool vge_shutdown(device_t, int); 321 322 static uint16_t vge_read_eeprom(struct vge_softc *, int); 323 324 static void vge_miipoll_start(struct vge_softc *); 325 static void vge_miipoll_stop(struct vge_softc *); 326 static int vge_miibus_readreg(device_t, int, int); 327 static void vge_miibus_writereg(device_t, int, int, int); 328 static void vge_miibus_statchg(device_t); 329 330 static void vge_cam_clear(struct vge_softc *); 331 static int vge_cam_set(struct vge_softc *, uint8_t *); 332 static void vge_setmulti(struct vge_softc *); 333 static void vge_reset(struct vge_softc *); 334 335 CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), 336 vge_match, vge_attach, NULL, NULL); 337 338 static inline void 339 vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) 340 { 341 342 f->tf_addrlo = htole32((uint32_t)daddr); 343 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 344 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 345 else 346 f->tf_addrhi = 0; 347 } 348 349 static inline void 350 vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) 351 { 352 353 rxd->rd_addrlo = htole32((uint32_t)daddr); 354 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 355 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 356 else 357 rxd->rd_addrhi = 0; 358 } 359 360 /* 361 * Defragment mbuf chain contents to be as linear as possible. 362 * Returns new mbuf chain on success, NULL on failure. Old mbuf 363 * chain is always freed. 364 * XXX temporary until there would be generic function doing this. 365 */ 366 #define m_defrag vge_m_defrag 367 struct mbuf * vge_m_defrag(struct mbuf *, int); 368 369 struct mbuf * 370 vge_m_defrag(struct mbuf *mold, int flags) 371 { 372 struct mbuf *m0, *mn, *n; 373 size_t sz = mold->m_pkthdr.len; 374 375 #ifdef DIAGNOSTIC 376 if ((mold->m_flags & M_PKTHDR) == 0) 377 panic("m_defrag: not a mbuf chain header"); 378 #endif 379 380 MGETHDR(m0, flags, MT_DATA); 381 if (m0 == NULL) 382 return NULL; 383 m0->m_pkthdr.len = mold->m_pkthdr.len; 384 mn = m0; 385 386 do { 387 if (sz > MHLEN) { 388 MCLGET(mn, M_DONTWAIT); 389 if ((mn->m_flags & M_EXT) == 0) { 390 m_freem(m0); 391 return NULL; 392 } 393 } 394 395 mn->m_len = MIN(sz, MCLBYTES); 396 397 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len, 398 mtod(mn, void *)); 399 400 sz -= mn->m_len; 401 402 if (sz > 0) { 403 /* need more mbufs */ 404 MGET(n, M_NOWAIT, MT_DATA); 405 if (n == NULL) { 406 m_freem(m0); 407 return NULL; 408 } 409 410 mn->m_next = n; 411 mn = n; 412 } 413 } while (sz > 0); 414 415 return m0; 416 } 417 418 /* 419 * Read a word of data stored in the EEPROM at address 'addr.' 420 */ 421 static uint16_t 422 vge_read_eeprom(struct vge_softc *sc, int addr) 423 { 424 int i; 425 uint16_t word = 0; 426 427 /* 428 * Enter EEPROM embedded programming mode. In order to 429 * access the EEPROM at all, we first have to set the 430 * EELOAD bit in the CHIPCFG2 register. 431 */ 432 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 433 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 434 435 /* Select the address of the word we want to read */ 436 CSR_WRITE_1(sc, VGE_EEADDR, addr); 437 438 /* Issue read command */ 439 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 440 441 /* Wait for the done bit to be set. */ 442 for (i = 0; i < VGE_TIMEOUT; i++) { 443 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 444 break; 445 } 446 447 if (i == VGE_TIMEOUT) { 448 printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); 449 return 0; 450 } 451 452 /* Read the result */ 453 word = CSR_READ_2(sc, VGE_EERDDAT); 454 455 /* Turn off EEPROM access mode. */ 456 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 457 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 458 459 return word; 460 } 461 462 static void 463 vge_miipoll_stop(struct vge_softc *sc) 464 { 465 int i; 466 467 CSR_WRITE_1(sc, VGE_MIICMD, 0); 468 469 for (i = 0; i < VGE_TIMEOUT; i++) { 470 DELAY(1); 471 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 472 break; 473 } 474 475 if (i == VGE_TIMEOUT) { 476 printf("%s: failed to idle MII autopoll\n", 477 device_xname(sc->sc_dev)); 478 } 479 } 480 481 static void 482 vge_miipoll_start(struct vge_softc *sc) 483 { 484 int i; 485 486 /* First, make sure we're idle. */ 487 488 CSR_WRITE_1(sc, VGE_MIICMD, 0); 489 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 490 491 for (i = 0; i < VGE_TIMEOUT; i++) { 492 DELAY(1); 493 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 494 break; 495 } 496 497 if (i == VGE_TIMEOUT) { 498 printf("%s: failed to idle MII autopoll\n", 499 device_xname(sc->sc_dev)); 500 return; 501 } 502 503 /* Now enable auto poll mode. */ 504 505 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 506 507 /* And make sure it started. */ 508 509 for (i = 0; i < VGE_TIMEOUT; i++) { 510 DELAY(1); 511 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 512 break; 513 } 514 515 if (i == VGE_TIMEOUT) { 516 printf("%s: failed to start MII autopoll\n", 517 device_xname(sc->sc_dev)); 518 } 519 } 520 521 static int 522 vge_miibus_readreg(device_t dev, int phy, int reg) 523 { 524 struct vge_softc *sc; 525 int i, s; 526 uint16_t rval; 527 528 sc = device_private(dev); 529 rval = 0; 530 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 531 return 0; 532 533 s = splnet(); 534 vge_miipoll_stop(sc); 535 536 /* Specify the register we want to read. */ 537 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 538 539 /* Issue read command. */ 540 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 541 542 /* Wait for the read command bit to self-clear. */ 543 for (i = 0; i < VGE_TIMEOUT; i++) { 544 DELAY(1); 545 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 546 break; 547 } 548 549 if (i == VGE_TIMEOUT) 550 printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); 551 else 552 rval = CSR_READ_2(sc, VGE_MIIDATA); 553 554 vge_miipoll_start(sc); 555 splx(s); 556 557 return rval; 558 } 559 560 static void 561 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 562 { 563 struct vge_softc *sc; 564 int i, s; 565 566 sc = device_private(dev); 567 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 568 return; 569 570 s = splnet(); 571 vge_miipoll_stop(sc); 572 573 /* Specify the register we want to write. */ 574 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 575 576 /* Specify the data we want to write. */ 577 CSR_WRITE_2(sc, VGE_MIIDATA, data); 578 579 /* Issue write command. */ 580 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 581 582 /* Wait for the write command bit to self-clear. */ 583 for (i = 0; i < VGE_TIMEOUT; i++) { 584 DELAY(1); 585 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 586 break; 587 } 588 589 if (i == VGE_TIMEOUT) { 590 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 591 } 592 593 vge_miipoll_start(sc); 594 splx(s); 595 } 596 597 static void 598 vge_cam_clear(struct vge_softc *sc) 599 { 600 int i; 601 602 /* 603 * Turn off all the mask bits. This tells the chip 604 * that none of the entries in the CAM filter are valid. 605 * desired entries will be enabled as we fill the filter in. 606 */ 607 608 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 609 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 610 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 611 for (i = 0; i < 8; i++) 612 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 613 614 /* Clear the VLAN filter too. */ 615 616 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 617 for (i = 0; i < 8; i++) 618 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 619 620 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 621 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 622 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 623 624 sc->sc_camidx = 0; 625 } 626 627 static int 628 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 629 { 630 int i, error; 631 632 error = 0; 633 634 if (sc->sc_camidx == VGE_CAM_MAXADDRS) 635 return ENOSPC; 636 637 /* Select the CAM data page. */ 638 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 639 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 640 641 /* Set the filter entry we want to update and enable writing. */ 642 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); 643 644 /* Write the address to the CAM registers */ 645 for (i = 0; i < ETHER_ADDR_LEN; i++) 646 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 647 648 /* Issue a write command. */ 649 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 650 651 /* Wake for it to clear. */ 652 for (i = 0; i < VGE_TIMEOUT; i++) { 653 DELAY(1); 654 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 655 break; 656 } 657 658 if (i == VGE_TIMEOUT) { 659 printf("%s: setting CAM filter failed\n", 660 device_xname(sc->sc_dev)); 661 error = EIO; 662 goto fail; 663 } 664 665 /* Select the CAM mask page. */ 666 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 667 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 668 669 /* Set the mask bit that enables this filter. */ 670 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), 671 1 << (sc->sc_camidx & 7)); 672 673 sc->sc_camidx++; 674 675 fail: 676 /* Turn off access to CAM. */ 677 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 678 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 679 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 680 681 return error; 682 } 683 684 /* 685 * Program the multicast filter. We use the 64-entry CAM filter 686 * for perfect filtering. If there's more than 64 multicast addresses, 687 * we use the hash filter instead. 688 */ 689 static void 690 vge_setmulti(struct vge_softc *sc) 691 { 692 struct ifnet *ifp; 693 int error; 694 uint32_t h, hashes[2] = { 0, 0 }; 695 struct ether_multi *enm; 696 struct ether_multistep step; 697 698 error = 0; 699 ifp = &sc->sc_ethercom.ec_if; 700 701 /* First, zot all the multicast entries. */ 702 vge_cam_clear(sc); 703 CSR_WRITE_4(sc, VGE_MAR0, 0); 704 CSR_WRITE_4(sc, VGE_MAR1, 0); 705 ifp->if_flags &= ~IFF_ALLMULTI; 706 707 /* 708 * If the user wants allmulti or promisc mode, enable reception 709 * of all multicast frames. 710 */ 711 if (ifp->if_flags & IFF_PROMISC) { 712 allmulti: 713 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 714 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 715 ifp->if_flags |= IFF_ALLMULTI; 716 return; 717 } 718 719 /* Now program new ones */ 720 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 721 while (enm != NULL) { 722 /* 723 * If multicast range, fall back to ALLMULTI. 724 */ 725 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 726 ETHER_ADDR_LEN) != 0) 727 goto allmulti; 728 729 error = vge_cam_set(sc, enm->enm_addrlo); 730 if (error) 731 break; 732 733 ETHER_NEXT_MULTI(step, enm); 734 } 735 736 /* If there were too many addresses, use the hash filter. */ 737 if (error) { 738 vge_cam_clear(sc); 739 740 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 741 while (enm != NULL) { 742 /* 743 * If multicast range, fall back to ALLMULTI. 744 */ 745 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 746 ETHER_ADDR_LEN) != 0) 747 goto allmulti; 748 749 h = ether_crc32_be(enm->enm_addrlo, 750 ETHER_ADDR_LEN) >> 26; 751 hashes[h >> 5] |= 1 << (h & 0x1f); 752 753 ETHER_NEXT_MULTI(step, enm); 754 } 755 756 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 757 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 758 } 759 } 760 761 static void 762 vge_reset(struct vge_softc *sc) 763 { 764 int i; 765 766 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 767 768 for (i = 0; i < VGE_TIMEOUT; i++) { 769 DELAY(5); 770 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 771 break; 772 } 773 774 if (i == VGE_TIMEOUT) { 775 printf("%s: soft reset timed out", device_xname(sc->sc_dev)); 776 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 777 DELAY(2000); 778 } 779 780 DELAY(5000); 781 782 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 783 784 for (i = 0; i < VGE_TIMEOUT; i++) { 785 DELAY(5); 786 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 787 break; 788 } 789 790 if (i == VGE_TIMEOUT) { 791 printf("%s: EEPROM reload timed out\n", 792 device_xname(sc->sc_dev)); 793 return; 794 } 795 796 /* 797 * On some machine, the first read data from EEPROM could be 798 * messed up, so read one dummy data here to avoid the mess. 799 */ 800 (void)vge_read_eeprom(sc, 0); 801 802 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 803 } 804 805 /* 806 * Probe for a VIA gigabit chip. Check the PCI vendor and device 807 * IDs against our list and return a device name if we find a match. 808 */ 809 static int 810 vge_match(device_t parent, cfdata_t match, void *aux) 811 { 812 struct pci_attach_args *pa = aux; 813 814 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH 815 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) 816 return 1; 817 818 return 0; 819 } 820 821 static int 822 vge_allocmem(struct vge_softc *sc) 823 { 824 int error; 825 int nseg; 826 int i; 827 bus_dma_segment_t seg; 828 829 /* 830 * Allocate memory for control data. 831 */ 832 833 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), 834 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); 835 if (error) { 836 aprint_error_dev(sc->sc_dev, 837 "could not allocate control data dma memory\n"); 838 goto fail_1; 839 } 840 841 /* Map the memory to kernel VA space */ 842 843 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 844 sizeof(struct vge_control_data), (void **)&sc->sc_control_data, 845 BUS_DMA_NOWAIT); 846 if (error) { 847 aprint_error_dev(sc->sc_dev, 848 "could not map control data dma memory\n"); 849 goto fail_2; 850 } 851 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); 852 853 /* 854 * Create map for control data. 855 */ 856 error = bus_dmamap_create(sc->sc_dmat, 857 sizeof(struct vge_control_data), 1, 858 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, 859 &sc->sc_cddmamap); 860 if (error) { 861 aprint_error_dev(sc->sc_dev, 862 "could not create control data dmamap\n"); 863 goto fail_3; 864 } 865 866 /* Load the map for the control data. */ 867 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 868 sc->sc_control_data, sizeof(struct vge_control_data), NULL, 869 BUS_DMA_NOWAIT); 870 if (error) { 871 aprint_error_dev(sc->sc_dev, 872 "could not load control data dma memory\n"); 873 goto fail_4; 874 } 875 876 /* Create DMA maps for TX buffers */ 877 878 for (i = 0; i < VGE_NTXDESC; i++) { 879 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, 880 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, 881 &sc->sc_txsoft[i].txs_dmamap); 882 if (error) { 883 aprint_error_dev(sc->sc_dev, 884 "can't create DMA map for TX descs\n"); 885 goto fail_5; 886 } 887 } 888 889 /* Create DMA maps for RX buffers */ 890 891 for (i = 0; i < VGE_NRXDESC; i++) { 892 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 893 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 894 &sc->sc_rxsoft[i].rxs_dmamap); 895 if (error) { 896 aprint_error_dev(sc->sc_dev, 897 "can't create DMA map for RX descs\n"); 898 goto fail_6; 899 } 900 sc->sc_rxsoft[i].rxs_mbuf = NULL; 901 } 902 903 return 0; 904 905 fail_6: 906 for (i = 0; i < VGE_NRXDESC; i++) { 907 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 908 bus_dmamap_destroy(sc->sc_dmat, 909 sc->sc_rxsoft[i].rxs_dmamap); 910 } 911 fail_5: 912 for (i = 0; i < VGE_NTXDESC; i++) { 913 if (sc->sc_txsoft[i].txs_dmamap != NULL) 914 bus_dmamap_destroy(sc->sc_dmat, 915 sc->sc_txsoft[i].txs_dmamap); 916 } 917 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 918 fail_4: 919 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 920 fail_3: 921 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 922 sizeof(struct vge_control_data)); 923 fail_2: 924 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 925 fail_1: 926 return ENOMEM; 927 } 928 929 /* 930 * Attach the interface. Allocate softc structures, do ifmedia 931 * setup and ethernet/BPF attach. 932 */ 933 static void 934 vge_attach(device_t parent, device_t self, void *aux) 935 { 936 uint8_t *eaddr; 937 struct vge_softc *sc = device_private(self); 938 struct ifnet *ifp; 939 struct pci_attach_args *pa = aux; 940 pci_chipset_tag_t pc = pa->pa_pc; 941 const char *intrstr; 942 pci_intr_handle_t ih; 943 uint16_t val; 944 945 sc->sc_dev = self; 946 947 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n", 948 PCI_REVISION(pa->pa_class)); 949 950 /* Make sure bus-mastering is enabled */ 951 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 952 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 953 PCI_COMMAND_MASTER_ENABLE); 954 955 /* 956 * Map control/status registers. 957 */ 958 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 959 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { 960 aprint_error_dev(self, "couldn't map memory\n"); 961 return; 962 } 963 964 /* 965 * Map and establish our interrupt. 966 */ 967 if (pci_intr_map(pa, &ih)) { 968 aprint_error_dev(self, "unable to map interrupt\n"); 969 return; 970 } 971 intrstr = pci_intr_string(pc, ih); 972 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc); 973 if (sc->sc_intrhand == NULL) { 974 aprint_error_dev(self, "unable to establish interrupt"); 975 if (intrstr != NULL) 976 aprint_error(" at %s", intrstr); 977 aprint_error("\n"); 978 return; 979 } 980 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 981 982 /* Reset the adapter. */ 983 vge_reset(sc); 984 985 /* 986 * Get station address from the EEPROM. 987 */ 988 eaddr = sc->sc_eaddr; 989 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); 990 eaddr[0] = val & 0xff; 991 eaddr[1] = val >> 8; 992 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); 993 eaddr[2] = val & 0xff; 994 eaddr[3] = val >> 8; 995 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); 996 eaddr[4] = val & 0xff; 997 eaddr[5] = val >> 8; 998 999 aprint_normal_dev(self, "Ethernet address: %s\n", 1000 ether_sprintf(eaddr)); 1001 1002 /* 1003 * Use the 32bit tag. Hardware supports 48bit physical addresses, 1004 * but we don't use that for now. 1005 */ 1006 sc->sc_dmat = pa->pa_dmat; 1007 1008 if (vge_allocmem(sc) != 0) 1009 return; 1010 1011 ifp = &sc->sc_ethercom.ec_if; 1012 ifp->if_softc = sc; 1013 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1014 ifp->if_mtu = ETHERMTU; 1015 ifp->if_baudrate = IF_Gbps(1); 1016 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1017 ifp->if_ioctl = vge_ioctl; 1018 ifp->if_start = vge_start; 1019 ifp->if_init = vge_init; 1020 ifp->if_stop = vge_stop; 1021 1022 /* 1023 * We can support 802.1Q VLAN-sized frames and jumbo 1024 * Ethernet frames. 1025 */ 1026 sc->sc_ethercom.ec_capabilities |= 1027 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | 1028 ETHERCAP_VLAN_HWTAGGING; 1029 1030 /* 1031 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 1032 */ 1033 ifp->if_capabilities |= 1034 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1035 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1036 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 1037 1038 #ifdef DEVICE_POLLING 1039 #ifdef IFCAP_POLLING 1040 ifp->if_capabilities |= IFCAP_POLLING; 1041 #endif 1042 #endif 1043 ifp->if_watchdog = vge_watchdog; 1044 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); 1045 IFQ_SET_READY(&ifp->if_snd); 1046 1047 /* 1048 * Initialize our media structures and probe the MII. 1049 */ 1050 sc->sc_mii.mii_ifp = ifp; 1051 sc->sc_mii.mii_readreg = vge_miibus_readreg; 1052 sc->sc_mii.mii_writereg = vge_miibus_writereg; 1053 sc->sc_mii.mii_statchg = vge_miibus_statchg; 1054 1055 sc->sc_ethercom.ec_mii = &sc->sc_mii; 1056 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 1057 ether_mediastatus); 1058 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1059 MII_OFFSET_ANY, MIIF_DOPAUSE); 1060 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1061 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1062 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1063 } else 1064 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1065 1066 /* 1067 * Attach the interface. 1068 */ 1069 if_attach(ifp); 1070 ether_ifattach(ifp, eaddr); 1071 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); 1072 1073 callout_init(&sc->sc_timeout, 0); 1074 callout_setfunc(&sc->sc_timeout, vge_tick, sc); 1075 1076 /* 1077 * Make sure the interface is shutdown during reboot. 1078 */ 1079 if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) 1080 pmf_class_network_register(self, ifp); 1081 else 1082 aprint_error_dev(self, "couldn't establish power handler\n"); 1083 } 1084 1085 static int 1086 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1087 { 1088 struct mbuf *m_new; 1089 struct vge_rxdesc *rxd; 1090 struct vge_rxsoft *rxs; 1091 bus_dmamap_t map; 1092 int i; 1093 #ifdef DIAGNOSTIC 1094 uint32_t rd_sts; 1095 #endif 1096 1097 m_new = NULL; 1098 if (m == NULL) { 1099 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1100 if (m_new == NULL) 1101 return ENOBUFS; 1102 1103 MCLGET(m_new, M_DONTWAIT); 1104 if ((m_new->m_flags & M_EXT) == 0) { 1105 m_freem(m_new); 1106 return ENOBUFS; 1107 } 1108 1109 m = m_new; 1110 } else 1111 m->m_data = m->m_ext.ext_buf; 1112 1113 1114 /* 1115 * This is part of an evil trick to deal with non-x86 platforms. 1116 * The VIA chip requires RX buffers to be aligned on 32-bit 1117 * boundaries, but that will hose non-x86 machines. To get around 1118 * this, we leave some empty space at the start of each buffer 1119 * and for non-x86 hosts, we copy the buffer back two bytes 1120 * to achieve word alignment. This is slightly more efficient 1121 * than allocating a new buffer, copying the contents, and 1122 * discarding the old buffer. 1123 */ 1124 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; 1125 #ifndef __NO_STRICT_ALIGNMENT 1126 m->m_data += VGE_RX_PAD; 1127 #endif 1128 rxs = &sc->sc_rxsoft[idx]; 1129 map = rxs->rxs_dmamap; 1130 1131 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) 1132 goto out; 1133 1134 rxd = &sc->sc_rxdescs[idx]; 1135 1136 #ifdef DIAGNOSTIC 1137 /* If this descriptor is still owned by the chip, bail. */ 1138 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1139 rd_sts = le32toh(rxd->rd_sts); 1140 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1141 if (rd_sts & VGE_RDSTS_OWN) { 1142 panic("%s: tried to map busy RX descriptor", 1143 device_xname(sc->sc_dev)); 1144 } 1145 #endif 1146 1147 rxs->rxs_mbuf = m; 1148 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1149 BUS_DMASYNC_PREREAD); 1150 1151 rxd->rd_buflen = 1152 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); 1153 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); 1154 rxd->rd_sts = 0; 1155 rxd->rd_ctl = 0; 1156 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1157 1158 /* 1159 * Note: the manual fails to document the fact that for 1160 * proper opration, the driver needs to replentish the RX 1161 * DMA ring 4 descriptors at a time (rather than one at a 1162 * time, like most chips). We can allocate the new buffers 1163 * but we should not set the OWN bits until we're ready 1164 * to hand back 4 of them in one shot. 1165 */ 1166 1167 #define VGE_RXCHUNK 4 1168 sc->sc_rx_consumed++; 1169 if (sc->sc_rx_consumed == VGE_RXCHUNK) { 1170 for (i = idx; i != idx - VGE_RXCHUNK; i--) { 1171 KASSERT(i >= 0); 1172 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); 1173 VGE_RXDESCSYNC(sc, i, 1174 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1175 } 1176 sc->sc_rx_consumed = 0; 1177 } 1178 1179 return 0; 1180 out: 1181 if (m_new != NULL) 1182 m_freem(m_new); 1183 return ENOMEM; 1184 } 1185 1186 #ifndef __NO_STRICT_ALIGNMENT 1187 static inline void 1188 vge_fixup_rx(struct mbuf *m) 1189 { 1190 int i; 1191 uint16_t *src, *dst; 1192 1193 src = mtod(m, uint16_t *); 1194 dst = src - 1; 1195 1196 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1197 *dst++ = *src++; 1198 1199 m->m_data -= ETHER_ALIGN; 1200 } 1201 #endif 1202 1203 /* 1204 * RX handler. We support the reception of jumbo frames that have 1205 * been fragmented across multiple 2K mbuf cluster buffers. 1206 */ 1207 static void 1208 vge_rxeof(struct vge_softc *sc) 1209 { 1210 struct mbuf *m; 1211 struct ifnet *ifp; 1212 int idx, total_len, lim; 1213 struct vge_rxdesc *cur_rxd; 1214 struct vge_rxsoft *rxs; 1215 uint32_t rxstat, rxctl; 1216 1217 ifp = &sc->sc_ethercom.ec_if; 1218 lim = 0; 1219 1220 /* Invalidate the descriptor memory */ 1221 1222 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { 1223 cur_rxd = &sc->sc_rxdescs[idx]; 1224 1225 VGE_RXDESCSYNC(sc, idx, 1226 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1227 rxstat = le32toh(cur_rxd->rd_sts); 1228 if ((rxstat & VGE_RDSTS_OWN) != 0) { 1229 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1230 break; 1231 } 1232 1233 rxctl = le32toh(cur_rxd->rd_ctl); 1234 rxs = &sc->sc_rxsoft[idx]; 1235 m = rxs->rxs_mbuf; 1236 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; 1237 1238 /* Invalidate the RX mbuf and unload its map */ 1239 1240 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 1241 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1242 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1243 1244 /* 1245 * If the 'start of frame' bit is set, this indicates 1246 * either the first fragment in a multi-fragment receive, 1247 * or an intermediate fragment. Either way, we want to 1248 * accumulate the buffers. 1249 */ 1250 if (rxstat & VGE_RXPKT_SOF) { 1251 m->m_len = VGE_RX_BUFSIZE; 1252 if (sc->sc_rx_mhead == NULL) 1253 sc->sc_rx_mhead = sc->sc_rx_mtail = m; 1254 else { 1255 m->m_flags &= ~M_PKTHDR; 1256 sc->sc_rx_mtail->m_next = m; 1257 sc->sc_rx_mtail = m; 1258 } 1259 vge_newbuf(sc, idx, NULL); 1260 continue; 1261 } 1262 1263 /* 1264 * Bad/error frames will have the RXOK bit cleared. 1265 * However, there's one error case we want to allow: 1266 * if a VLAN tagged frame arrives and the chip can't 1267 * match it against the CAM filter, it considers this 1268 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1269 * We don't want to drop the frame though: our VLAN 1270 * filtering is done in software. 1271 */ 1272 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1273 (rxstat & VGE_RDSTS_VIDM) == 0 && 1274 (rxstat & VGE_RDSTS_CSUMERR) == 0) { 1275 ifp->if_ierrors++; 1276 /* 1277 * If this is part of a multi-fragment packet, 1278 * discard all the pieces. 1279 */ 1280 if (sc->sc_rx_mhead != NULL) { 1281 m_freem(sc->sc_rx_mhead); 1282 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1283 } 1284 vge_newbuf(sc, idx, m); 1285 continue; 1286 } 1287 1288 /* 1289 * If allocating a replacement mbuf fails, 1290 * reload the current one. 1291 */ 1292 1293 if (vge_newbuf(sc, idx, NULL)) { 1294 ifp->if_ierrors++; 1295 if (sc->sc_rx_mhead != NULL) { 1296 m_freem(sc->sc_rx_mhead); 1297 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1298 } 1299 vge_newbuf(sc, idx, m); 1300 continue; 1301 } 1302 1303 if (sc->sc_rx_mhead != NULL) { 1304 m->m_len = total_len % VGE_RX_BUFSIZE; 1305 /* 1306 * Special case: if there's 4 bytes or less 1307 * in this buffer, the mbuf can be discarded: 1308 * the last 4 bytes is the CRC, which we don't 1309 * care about anyway. 1310 */ 1311 if (m->m_len <= ETHER_CRC_LEN) { 1312 sc->sc_rx_mtail->m_len -= 1313 (ETHER_CRC_LEN - m->m_len); 1314 m_freem(m); 1315 } else { 1316 m->m_len -= ETHER_CRC_LEN; 1317 m->m_flags &= ~M_PKTHDR; 1318 sc->sc_rx_mtail->m_next = m; 1319 } 1320 m = sc->sc_rx_mhead; 1321 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1322 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1323 } else 1324 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1325 1326 #ifndef __NO_STRICT_ALIGNMENT 1327 vge_fixup_rx(m); 1328 #endif 1329 ifp->if_ipackets++; 1330 m->m_pkthdr.rcvif = ifp; 1331 1332 /* Do RX checksumming if enabled */ 1333 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 1334 1335 /* Check IP header checksum */ 1336 if (rxctl & VGE_RDCTL_IPPKT) 1337 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1338 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) 1339 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1340 } 1341 1342 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1343 /* Check UDP checksum */ 1344 if (rxctl & VGE_RDCTL_TCPPKT) 1345 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1346 1347 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1348 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1349 } 1350 1351 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { 1352 /* Check UDP checksum */ 1353 if (rxctl & VGE_RDCTL_UDPPKT) 1354 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1355 1356 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1357 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1358 } 1359 1360 if (rxstat & VGE_RDSTS_VTAG) { 1361 /* 1362 * We use bswap16() here because: 1363 * On LE machines, tag is stored in BE as stream data. 1364 * On BE machines, tag is stored in BE as stream data 1365 * but it was already swapped by le32toh() above. 1366 */ 1367 VLAN_INPUT_TAG(ifp, m, 1368 bswap16(rxctl & VGE_RDCTL_VLANID), continue); 1369 } 1370 1371 /* 1372 * Handle BPF listeners. 1373 */ 1374 bpf_mtap(ifp, m); 1375 1376 (*ifp->if_input)(ifp, m); 1377 1378 lim++; 1379 if (lim == VGE_NRXDESC) 1380 break; 1381 } 1382 1383 sc->sc_rx_prodidx = idx; 1384 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1385 } 1386 1387 static void 1388 vge_txeof(struct vge_softc *sc) 1389 { 1390 struct ifnet *ifp; 1391 struct vge_txsoft *txs; 1392 uint32_t txstat; 1393 int idx; 1394 1395 ifp = &sc->sc_ethercom.ec_if; 1396 1397 for (idx = sc->sc_tx_considx; 1398 sc->sc_tx_free < VGE_NTXDESC; 1399 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { 1400 VGE_TXDESCSYNC(sc, idx, 1401 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1402 txstat = le32toh(sc->sc_txdescs[idx].td_sts); 1403 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1404 if (txstat & VGE_TDSTS_OWN) { 1405 break; 1406 } 1407 1408 txs = &sc->sc_txsoft[idx]; 1409 m_freem(txs->txs_mbuf); 1410 txs->txs_mbuf = NULL; 1411 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, 1412 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1413 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1414 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1415 ifp->if_collisions++; 1416 if (txstat & VGE_TDSTS_TXERR) 1417 ifp->if_oerrors++; 1418 else 1419 ifp->if_opackets++; 1420 } 1421 1422 sc->sc_tx_considx = idx; 1423 1424 if (sc->sc_tx_free > 0) { 1425 ifp->if_flags &= ~IFF_OACTIVE; 1426 } 1427 1428 /* 1429 * If not all descriptors have been released reaped yet, 1430 * reload the timer so that we will eventually get another 1431 * interrupt that will cause us to re-enter this routine. 1432 * This is done in case the transmitter has gone idle. 1433 */ 1434 if (sc->sc_tx_free < VGE_NTXDESC) 1435 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1436 else 1437 ifp->if_timer = 0; 1438 } 1439 1440 static void 1441 vge_tick(void *arg) 1442 { 1443 struct vge_softc *sc; 1444 struct ifnet *ifp; 1445 struct mii_data *mii; 1446 int s; 1447 1448 sc = arg; 1449 ifp = &sc->sc_ethercom.ec_if; 1450 mii = &sc->sc_mii; 1451 1452 s = splnet(); 1453 1454 callout_schedule(&sc->sc_timeout, hz); 1455 1456 mii_tick(mii); 1457 if (sc->sc_link) { 1458 if ((mii->mii_media_status & IFM_ACTIVE) == 0) 1459 sc->sc_link = 0; 1460 } else { 1461 if (mii->mii_media_status & IFM_ACTIVE && 1462 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1463 sc->sc_link = 1; 1464 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1465 vge_start(ifp); 1466 } 1467 } 1468 1469 splx(s); 1470 } 1471 1472 static int 1473 vge_intr(void *arg) 1474 { 1475 struct vge_softc *sc; 1476 struct ifnet *ifp; 1477 uint32_t status; 1478 int claim; 1479 1480 sc = arg; 1481 claim = 0; 1482 if (sc->sc_suspended) { 1483 return claim; 1484 } 1485 1486 ifp = &sc->sc_ethercom.ec_if; 1487 1488 if ((ifp->if_flags & IFF_UP) == 0) { 1489 return claim; 1490 } 1491 1492 /* Disable interrupts */ 1493 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1494 1495 for (;;) { 1496 1497 status = CSR_READ_4(sc, VGE_ISR); 1498 /* If the card has gone away the read returns 0xffffffff. */ 1499 if (status == 0xFFFFFFFF) 1500 break; 1501 1502 if (status) { 1503 claim = 1; 1504 CSR_WRITE_4(sc, VGE_ISR, status); 1505 } 1506 1507 if ((status & VGE_INTRS) == 0) 1508 break; 1509 1510 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1511 vge_rxeof(sc); 1512 1513 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1514 vge_rxeof(sc); 1515 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1516 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1517 } 1518 1519 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1520 vge_txeof(sc); 1521 1522 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1523 vge_init(ifp); 1524 1525 if (status & VGE_ISR_LINKSTS) 1526 vge_tick(sc); 1527 } 1528 1529 /* Re-enable interrupts */ 1530 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1531 1532 if (claim && !IFQ_IS_EMPTY(&ifp->if_snd)) 1533 vge_start(ifp); 1534 1535 return claim; 1536 } 1537 1538 static int 1539 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1540 { 1541 struct vge_txsoft *txs; 1542 struct vge_txdesc *txd; 1543 struct vge_txfrag *f; 1544 struct mbuf *m_new; 1545 bus_dmamap_t map; 1546 int m_csumflags, seg, error, flags; 1547 struct m_tag *mtag; 1548 size_t sz; 1549 uint32_t td_sts, td_ctl; 1550 1551 KASSERT(sc->sc_tx_free > 0); 1552 1553 txd = &sc->sc_txdescs[idx]; 1554 1555 #ifdef DIAGNOSTIC 1556 /* If this descriptor is still owned by the chip, bail. */ 1557 VGE_TXDESCSYNC(sc, idx, 1558 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1559 td_sts = le32toh(txd->td_sts); 1560 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1561 if (td_sts & VGE_TDSTS_OWN) { 1562 return ENOBUFS; 1563 } 1564 #endif 1565 1566 /* 1567 * Preserve m_pkthdr.csum_flags here since m_head might be 1568 * updated by m_defrag() 1569 */ 1570 m_csumflags = m_head->m_pkthdr.csum_flags; 1571 1572 txs = &sc->sc_txsoft[idx]; 1573 map = txs->txs_dmamap; 1574 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); 1575 1576 /* If too many segments to map, coalesce */ 1577 if (error == EFBIG || 1578 (m_head->m_pkthdr.len < ETHER_PAD_LEN && 1579 map->dm_nsegs == VGE_TX_FRAGS)) { 1580 m_new = m_defrag(m_head, M_DONTWAIT); 1581 if (m_new == NULL) 1582 return EFBIG; 1583 1584 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1585 m_new, BUS_DMA_NOWAIT); 1586 if (error) { 1587 m_freem(m_new); 1588 return error; 1589 } 1590 1591 m_head = m_new; 1592 } else if (error) 1593 return error; 1594 1595 txs->txs_mbuf = m_head; 1596 1597 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1598 BUS_DMASYNC_PREWRITE); 1599 1600 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { 1601 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); 1602 vge_set_txaddr(f, map->dm_segs[seg].ds_addr); 1603 } 1604 1605 /* Argh. This chip does not autopad short frames */ 1606 sz = m_head->m_pkthdr.len; 1607 if (sz < ETHER_PAD_LEN) { 1608 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); 1609 vge_set_txaddr(f, VGE_CDPADADDR(sc)); 1610 sz = ETHER_PAD_LEN; 1611 seg++; 1612 } 1613 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); 1614 1615 /* 1616 * When telling the chip how many segments there are, we 1617 * must use nsegs + 1 instead of just nsegs. Darned if I 1618 * know why. 1619 */ 1620 seg++; 1621 1622 flags = 0; 1623 if (m_csumflags & M_CSUM_IPv4) 1624 flags |= VGE_TDCTL_IPCSUM; 1625 if (m_csumflags & M_CSUM_TCPv4) 1626 flags |= VGE_TDCTL_TCPCSUM; 1627 if (m_csumflags & M_CSUM_UDPv4) 1628 flags |= VGE_TDCTL_UDPCSUM; 1629 td_sts = sz << 16; 1630 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; 1631 1632 if (sz > ETHERMTU + ETHER_HDR_LEN) 1633 td_ctl |= VGE_TDCTL_JUMBO; 1634 1635 /* 1636 * Set up hardware VLAN tagging. 1637 */ 1638 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head); 1639 if (mtag != NULL) { 1640 /* 1641 * No need htons() here since vge(4) chip assumes 1642 * that tags are written in little endian and 1643 * we already use htole32() here. 1644 */ 1645 td_ctl |= VLAN_TAG_VALUE(mtag) | VGE_TDCTL_VTAG; 1646 } 1647 txd->td_ctl = htole32(td_ctl); 1648 txd->td_sts = htole32(td_sts); 1649 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1650 1651 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); 1652 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1653 1654 sc->sc_tx_free--; 1655 1656 return 0; 1657 } 1658 1659 /* 1660 * Main transmit routine. 1661 */ 1662 1663 static void 1664 vge_start(struct ifnet *ifp) 1665 { 1666 struct vge_softc *sc; 1667 struct vge_txsoft *txs; 1668 struct mbuf *m_head; 1669 int idx, pidx, ofree, error; 1670 1671 sc = ifp->if_softc; 1672 1673 if (!sc->sc_link || 1674 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) { 1675 return; 1676 } 1677 1678 m_head = NULL; 1679 idx = sc->sc_tx_prodidx; 1680 pidx = VGE_PREV_TXDESC(idx); 1681 ofree = sc->sc_tx_free; 1682 1683 /* 1684 * Loop through the send queue, setting up transmit descriptors 1685 * until we drain the queue, or use up all available transmit 1686 * descriptors. 1687 */ 1688 for (;;) { 1689 /* Grab a packet off the queue. */ 1690 IFQ_POLL(&ifp->if_snd, m_head); 1691 if (m_head == NULL) 1692 break; 1693 1694 if (sc->sc_tx_free == 0) { 1695 /* 1696 * All slots used, stop for now. 1697 */ 1698 ifp->if_flags |= IFF_OACTIVE; 1699 break; 1700 } 1701 1702 txs = &sc->sc_txsoft[idx]; 1703 KASSERT(txs->txs_mbuf == NULL); 1704 1705 if ((error = vge_encap(sc, m_head, idx))) { 1706 if (error == EFBIG) { 1707 printf("%s: Tx packet consumes too many " 1708 "DMA segments, dropping...\n", 1709 device_xname(sc->sc_dev)); 1710 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1711 m_freem(m_head); 1712 continue; 1713 } 1714 1715 /* 1716 * Short on resources, just stop for now. 1717 */ 1718 if (error == ENOBUFS) 1719 ifp->if_flags |= IFF_OACTIVE; 1720 break; 1721 } 1722 1723 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1724 1725 /* 1726 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1727 */ 1728 1729 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= 1730 htole16(VGE_TXDESC_Q); 1731 VGE_TXFRAGSYNC(sc, pidx, 1, 1732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1733 1734 if (txs->txs_mbuf != m_head) { 1735 m_freem(m_head); 1736 m_head = txs->txs_mbuf; 1737 } 1738 1739 pidx = idx; 1740 idx = VGE_NEXT_TXDESC(idx); 1741 1742 /* 1743 * If there's a BPF listener, bounce a copy of this frame 1744 * to him. 1745 */ 1746 bpf_mtap(ifp, m_head); 1747 } 1748 1749 if (sc->sc_tx_free < ofree) { 1750 /* TX packet queued */ 1751 1752 sc->sc_tx_prodidx = idx; 1753 1754 /* Issue a transmit command. */ 1755 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1756 1757 /* 1758 * Use the countdown timer for interrupt moderation. 1759 * 'TX done' interrupts are disabled. Instead, we reset the 1760 * countdown timer, which will begin counting until it hits 1761 * the value in the SSTIMER register, and then trigger an 1762 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1763 * the timer count is reloaded. Only when the transmitter 1764 * is idle will the timer hit 0 and an interrupt fire. 1765 */ 1766 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1767 1768 /* 1769 * Set a timeout in case the chip goes out to lunch. 1770 */ 1771 ifp->if_timer = 5; 1772 } 1773 } 1774 1775 static int 1776 vge_init(struct ifnet *ifp) 1777 { 1778 struct vge_softc *sc; 1779 int i, rc = 0; 1780 1781 sc = ifp->if_softc; 1782 1783 /* 1784 * Cancel pending I/O and free all RX/TX buffers. 1785 */ 1786 vge_stop(ifp, 0); 1787 vge_reset(sc); 1788 1789 /* Initialize the RX descriptors and mbufs. */ 1790 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 1791 sc->sc_rx_consumed = 0; 1792 for (i = 0; i < VGE_NRXDESC; i++) { 1793 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1794 printf("%s: unable to allocate or map rx buffer\n", 1795 device_xname(sc->sc_dev)); 1796 return 1; /* XXX */ 1797 } 1798 } 1799 sc->sc_rx_prodidx = 0; 1800 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1801 1802 /* Initialize the TX descriptors and mbufs. */ 1803 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1804 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 1805 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), 1806 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1807 for (i = 0; i < VGE_NTXDESC; i++) 1808 sc->sc_txsoft[i].txs_mbuf = NULL; 1809 1810 sc->sc_tx_prodidx = 0; 1811 sc->sc_tx_considx = 0; 1812 sc->sc_tx_free = VGE_NTXDESC; 1813 1814 /* Set our station address */ 1815 for (i = 0; i < ETHER_ADDR_LEN; i++) 1816 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); 1817 1818 /* 1819 * Set receive FIFO threshold. Also allow transmission and 1820 * reception of VLAN tagged frames. 1821 */ 1822 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1823 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1824 1825 /* Set DMA burst length */ 1826 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1827 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1828 1829 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1830 1831 /* Set collision backoff algorithm */ 1832 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1833 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1834 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1835 1836 /* Disable LPSEL field in priority resolution */ 1837 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1838 1839 /* 1840 * Load the addresses of the DMA queues into the chip. 1841 * Note that we only use one transmit queue. 1842 */ 1843 1844 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); 1845 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); 1846 1847 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); 1848 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); 1849 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); 1850 1851 /* Enable and wake up the RX descriptor queue */ 1852 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1853 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1854 1855 /* Enable the TX descriptor queue */ 1856 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1857 1858 /* Set up the receive filter -- allow large frames for VLANs. */ 1859 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1860 1861 /* If we want promiscuous mode, set the allframes bit. */ 1862 if (ifp->if_flags & IFF_PROMISC) { 1863 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1864 } 1865 1866 /* Set capture broadcast bit to capture broadcast frames. */ 1867 if (ifp->if_flags & IFF_BROADCAST) { 1868 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1869 } 1870 1871 /* Set multicast bit to capture multicast frames. */ 1872 if (ifp->if_flags & IFF_MULTICAST) { 1873 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1874 } 1875 1876 /* Init the cam filter. */ 1877 vge_cam_clear(sc); 1878 1879 /* Init the multicast filter. */ 1880 vge_setmulti(sc); 1881 1882 /* Enable flow control */ 1883 1884 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1885 1886 /* Enable jumbo frame reception (if desired) */ 1887 1888 /* Start the MAC. */ 1889 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1890 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1891 CSR_WRITE_1(sc, VGE_CRS0, 1892 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1893 1894 /* 1895 * Configure one-shot timer for microsecond 1896 * resulution and load it for 500 usecs. 1897 */ 1898 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1899 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1900 1901 /* 1902 * Configure interrupt moderation for receive. Enable 1903 * the holdoff counter and load it, and set the RX 1904 * suppression count to the number of descriptors we 1905 * want to allow before triggering an interrupt. 1906 * The holdoff timer is in units of 20 usecs. 1907 */ 1908 1909 #ifdef notyet 1910 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1911 /* Select the interrupt holdoff timer page. */ 1912 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1913 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1914 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1915 1916 /* Enable use of the holdoff timer. */ 1917 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1918 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1919 1920 /* Select the RX suppression threshold page. */ 1921 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1922 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1923 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1924 1925 /* Restore the page select bits. */ 1926 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1927 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1928 #endif 1929 1930 #ifdef DEVICE_POLLING 1931 /* 1932 * Disable interrupts if we are polling. 1933 */ 1934 if (ifp->if_flags & IFF_POLLING) { 1935 CSR_WRITE_4(sc, VGE_IMR, 0); 1936 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1937 } else /* otherwise ... */ 1938 #endif /* DEVICE_POLLING */ 1939 { 1940 /* 1941 * Enable interrupts. 1942 */ 1943 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1944 CSR_WRITE_4(sc, VGE_ISR, 0); 1945 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1946 } 1947 1948 if ((rc = ether_mediachange(ifp)) != 0) 1949 goto out; 1950 1951 ifp->if_flags |= IFF_RUNNING; 1952 ifp->if_flags &= ~IFF_OACTIVE; 1953 1954 sc->sc_if_flags = 0; 1955 sc->sc_link = 0; 1956 1957 callout_schedule(&sc->sc_timeout, hz); 1958 1959 out: 1960 return rc; 1961 } 1962 1963 static void 1964 vge_miibus_statchg(device_t self) 1965 { 1966 struct vge_softc *sc; 1967 struct mii_data *mii; 1968 struct ifmedia_entry *ife; 1969 1970 sc = device_private(self); 1971 mii = &sc->sc_mii; 1972 ife = mii->mii_media.ifm_cur; 1973 /* 1974 * If the user manually selects a media mode, we need to turn 1975 * on the forced MAC mode bit in the DIAGCTL register. If the 1976 * user happens to choose a full duplex mode, we also need to 1977 * set the 'force full duplex' bit. This applies only to 1978 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1979 * mode is disabled, and in 1000baseT mode, full duplex is 1980 * always implied, so we turn on the forced mode bit but leave 1981 * the FDX bit cleared. 1982 */ 1983 1984 switch (IFM_SUBTYPE(ife->ifm_media)) { 1985 case IFM_AUTO: 1986 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1987 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1988 break; 1989 case IFM_1000_T: 1990 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1991 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1992 break; 1993 case IFM_100_TX: 1994 case IFM_10_T: 1995 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1996 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1997 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1998 } else { 1999 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2000 } 2001 break; 2002 default: 2003 printf("%s: unknown media type: %x\n", 2004 device_xname(sc->sc_dev), 2005 IFM_SUBTYPE(ife->ifm_media)); 2006 break; 2007 } 2008 } 2009 2010 static int 2011 vge_ifflags_cb(struct ethercom *ec) 2012 { 2013 struct ifnet *ifp = &ec->ec_if; 2014 struct vge_softc *sc = ifp->if_softc; 2015 int change = ifp->if_flags ^ sc->sc_if_flags; 2016 2017 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 2018 return ENETRESET; 2019 else if ((change & IFF_PROMISC) == 0) 2020 return 0; 2021 2022 if ((ifp->if_flags & IFF_PROMISC) == 0) 2023 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2024 else 2025 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2026 vge_setmulti(sc); 2027 return 0; 2028 } 2029 2030 static int 2031 vge_ioctl(struct ifnet *ifp, u_long command, void *data) 2032 { 2033 struct vge_softc *sc; 2034 struct ifreq *ifr; 2035 int s, error; 2036 2037 sc = ifp->if_softc; 2038 ifr = (struct ifreq *)data; 2039 error = 0; 2040 2041 s = splnet(); 2042 2043 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { 2044 error = 0; 2045 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 2046 ; 2047 else if (ifp->if_flags & IFF_RUNNING) { 2048 /* 2049 * Multicast list has changed; set the hardware filter 2050 * accordingly. 2051 */ 2052 vge_setmulti(sc); 2053 } 2054 } 2055 sc->sc_if_flags = ifp->if_flags; 2056 2057 splx(s); 2058 return error; 2059 } 2060 2061 static void 2062 vge_watchdog(struct ifnet *ifp) 2063 { 2064 struct vge_softc *sc; 2065 int s; 2066 2067 sc = ifp->if_softc; 2068 s = splnet(); 2069 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2070 ifp->if_oerrors++; 2071 2072 vge_txeof(sc); 2073 vge_rxeof(sc); 2074 2075 vge_init(ifp); 2076 2077 splx(s); 2078 } 2079 2080 /* 2081 * Stop the adapter and free any mbufs allocated to the 2082 * RX and TX lists. 2083 */ 2084 static void 2085 vge_stop(struct ifnet *ifp, int disable) 2086 { 2087 struct vge_softc *sc = ifp->if_softc; 2088 struct vge_txsoft *txs; 2089 struct vge_rxsoft *rxs; 2090 int i, s; 2091 2092 s = splnet(); 2093 ifp->if_timer = 0; 2094 2095 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2096 #ifdef DEVICE_POLLING 2097 ether_poll_deregister(ifp); 2098 #endif /* DEVICE_POLLING */ 2099 2100 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2101 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2102 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2103 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2104 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2105 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2106 2107 if (sc->sc_rx_mhead != NULL) { 2108 m_freem(sc->sc_rx_mhead); 2109 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 2110 } 2111 2112 /* Free the TX list buffers. */ 2113 2114 for (i = 0; i < VGE_NTXDESC; i++) { 2115 txs = &sc->sc_txsoft[i]; 2116 if (txs->txs_mbuf != NULL) { 2117 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2118 m_freem(txs->txs_mbuf); 2119 txs->txs_mbuf = NULL; 2120 } 2121 } 2122 2123 /* Free the RX list buffers. */ 2124 2125 for (i = 0; i < VGE_NRXDESC; i++) { 2126 rxs = &sc->sc_rxsoft[i]; 2127 if (rxs->rxs_mbuf != NULL) { 2128 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2129 m_freem(rxs->rxs_mbuf); 2130 rxs->rxs_mbuf = NULL; 2131 } 2132 } 2133 2134 splx(s); 2135 } 2136 2137 #if VGE_POWER_MANAGEMENT 2138 /* 2139 * Device suspend routine. Stop the interface and save some PCI 2140 * settings in case the BIOS doesn't restore them properly on 2141 * resume. 2142 */ 2143 static int 2144 vge_suspend(device_t dev) 2145 { 2146 struct vge_softc *sc; 2147 int i; 2148 2149 sc = device_get_softc(dev); 2150 2151 vge_stop(sc); 2152 2153 for (i = 0; i < 5; i++) 2154 sc->sc_saved_maps[i] = 2155 pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2156 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2157 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2158 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2159 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2160 2161 sc->suspended = 1; 2162 2163 return 0; 2164 } 2165 2166 /* 2167 * Device resume routine. Restore some PCI settings in case the BIOS 2168 * doesn't, re-enable busmastering, and restart the interface if 2169 * appropriate. 2170 */ 2171 static int 2172 vge_resume(device_t dev) 2173 { 2174 struct vge_softc *sc; 2175 struct ifnet *ifp; 2176 int i; 2177 2178 sc = device_private(dev); 2179 ifp = &sc->sc_ethercom.ec_if; 2180 2181 /* better way to do this? */ 2182 for (i = 0; i < 5; i++) 2183 pci_write_config(dev, PCIR_MAPS + i * 4, 2184 sc->sc_saved_maps[i], 4); 2185 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); 2186 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); 2187 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); 2188 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); 2189 2190 /* reenable busmastering */ 2191 pci_enable_busmaster(dev); 2192 pci_enable_io(dev, SYS_RES_MEMORY); 2193 2194 /* reinitialize interface if necessary */ 2195 if (ifp->if_flags & IFF_UP) 2196 vge_init(sc); 2197 2198 sc->suspended = 0; 2199 2200 return 0; 2201 } 2202 #endif 2203 2204 /* 2205 * Stop all chip I/O so that the kernel's probe routines don't 2206 * get confused by errant DMAs when rebooting. 2207 */ 2208 static bool 2209 vge_shutdown(device_t self, int howto) 2210 { 2211 struct vge_softc *sc; 2212 2213 sc = device_private(self); 2214 vge_stop(&sc->sc_ethercom.ec_if, 1); 2215 2216 return true; 2217 } 2218