1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <william.paul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.5.2.2 2001/12/14 19:49:23 jlemon Exp $ 34 * $DragonFly: src/sys/dev/netif/lge/if_lge.c,v 1.42 2008/08/17 04:32:33 sephe Exp $ 35 */ 36 37 /* 38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public 39 * documentation not available, but ask me nicely. 40 * 41 * Written by Bill Paul <william.paul@windriver.com> 42 * Wind River Systems 43 */ 44 45 /* 46 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. 47 * It's a 64-bit PCI part that supports TCP/IP checksum offload, 48 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There 49 * are three supported methods for data transfer between host and 50 * NIC: programmed I/O, traditional scatter/gather DMA and Packet 51 * Propulsion Technology (tm) DMA. The latter mechanism is a form 52 * of double buffer DMA where the packet data is copied to a 53 * pre-allocated DMA buffer who's physical address has been loaded 54 * into a table at device initialization time. The rationale is that 55 * the virtual to physical address translation needed for normal 56 * scatter/gather DMA is more expensive than the data copy needed 57 * for double buffering. This may be true in Windows NT and the like, 58 * but it isn't true for us, at least on the x86 arch. This driver 59 * uses the scatter/gather I/O method for both TX and RX. 60 * 61 * The LXT1001 only supports TCP/IP checksum offload on receive. 62 * Also, the VLAN tagging is done using a 16-entry table which allows 63 * the chip to perform hardware filtering based on VLAN tags. Sadly, 64 * our vlan support doesn't currently play well with this kind of 65 * hardware support. 66 * 67 * Special thanks to: 68 * - Jeff James at Intel, for arranging to have the LXT1001 manual 69 * released (at long last) 70 * - Beny Chen at D-Link, for actually sending it to me 71 * - Brad Short and Keith Alexis at SMC, for sending me sample 72 * SMC9462SX and SMC9462TX adapters for testing 73 * - Paul Saab at Y!, for not killing me (though it remains to be seen 74 * if in fact he did me much of a favor) 75 */ 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/interrupt.h> 84 #include <sys/socket.h> 85 #include <sys/serialize.h> 86 #include <sys/thread2.h> 87 88 #include <net/if.h> 89 #include <net/ifq_var.h> 90 #include <net/if_arp.h> 91 #include <net/ethernet.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 95 #include <net/bpf.h> 96 97 #include <vm/vm.h> /* for vtophys */ 98 #include <vm/pmap.h> /* for vtophys */ 99 #include <sys/bus.h> 100 #include <sys/rman.h> 101 102 #include <dev/netif/mii_layer/mii.h> 103 #include <dev/netif/mii_layer/miivar.h> 104 105 #include <bus/pci/pcidevs.h> 106 #include <bus/pci/pcireg.h> 107 #include <bus/pci/pcivar.h> 108 109 #define LGE_USEIOSPACE 110 111 #include "if_lgereg.h" 112 113 /* "controller miibus0" required. See GENERIC if you get errors here. */ 114 #include "miibus_if.h" 115 116 /* 117 * Various supported device vendors/types and their names. 118 */ 119 static struct lge_type lge_devs[] = { 120 { PCI_VENDOR_LEVELONE, PCI_PRODUCT_LEVELONE_LXT1001, 121 "Level 1 Gigabit Ethernet" }, 122 { 0, 0, NULL } 123 }; 124 125 static int lge_probe(device_t); 126 static int lge_attach(device_t); 127 static int lge_detach(device_t); 128 129 static int lge_alloc_jumbo_mem(struct lge_softc *); 130 static void lge_free_jumbo_mem(struct lge_softc *); 131 static struct lge_jslot 132 *lge_jalloc(struct lge_softc *); 133 static void lge_jfree(void *); 134 static void lge_jref(void *); 135 136 static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, 137 struct mbuf *); 138 static int lge_encap(struct lge_softc *, struct mbuf *, uint32_t *); 139 static void lge_rxeof(struct lge_softc *, int); 140 static void lge_rxeoc(struct lge_softc *); 141 static void lge_txeof(struct lge_softc *); 142 static void lge_intr(void *); 143 static void lge_tick(void *); 144 static void lge_tick_serialized(void *); 145 static void lge_start(struct ifnet *); 146 static int lge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 147 static void lge_init(void *); 148 static void lge_stop(struct lge_softc *); 149 static void lge_watchdog(struct ifnet *); 150 static void lge_shutdown(device_t); 151 static int lge_ifmedia_upd(struct ifnet *); 152 static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 153 154 static void lge_eeprom_getword(struct lge_softc *, int, uint16_t *); 155 static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int); 156 157 static int lge_miibus_readreg(device_t, int, int); 158 static int lge_miibus_writereg(device_t, int, int, int); 159 static void lge_miibus_statchg(device_t); 160 161 static void lge_setmulti(struct lge_softc *); 162 static void lge_reset(struct lge_softc *); 163 static int lge_list_rx_init(struct lge_softc *); 164 static int lge_list_tx_init(struct lge_softc *); 165 166 #ifdef LGE_USEIOSPACE 167 #define LGE_RES SYS_RES_IOPORT 168 #define LGE_RID LGE_PCI_LOIO 169 #else 170 #define LGE_RES SYS_RES_MEMORY 171 #define LGE_RID LGE_PCI_LOMEM 172 #endif 173 174 static device_method_t lge_methods[] = { 175 /* Device interface */ 176 DEVMETHOD(device_probe, lge_probe), 177 DEVMETHOD(device_attach, lge_attach), 178 DEVMETHOD(device_detach, lge_detach), 179 DEVMETHOD(device_shutdown, lge_shutdown), 180 181 /* bus interface */ 182 DEVMETHOD(bus_print_child, bus_generic_print_child), 183 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 184 185 /* MII interface */ 186 DEVMETHOD(miibus_readreg, lge_miibus_readreg), 187 DEVMETHOD(miibus_writereg, lge_miibus_writereg), 188 DEVMETHOD(miibus_statchg, lge_miibus_statchg), 189 190 { 0, 0 } 191 }; 192 193 static DEFINE_CLASS_0(lge, lge_driver, lge_methods, sizeof(struct lge_softc)); 194 static devclass_t lge_devclass; 195 196 DECLARE_DUMMY_MODULE(if_lge); 197 DRIVER_MODULE(if_lge, pci, lge_driver, lge_devclass, 0, 0); 198 DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0); 199 200 #define LGE_SETBIT(sc, reg, x) \ 201 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 202 203 #define LGE_CLRBIT(sc, reg, x) \ 204 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 205 206 #define SIO_SET(x) \ 207 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | (x)) 208 209 #define SIO_CLR(x) \ 210 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~(x)) 211 212 /* 213 * Read a word of data stored in the EEPROM at address 'addr.' 214 */ 215 static void 216 lge_eeprom_getword(struct lge_softc *sc, int addr, uint16_t *dest) 217 { 218 int i; 219 uint32_t val; 220 221 CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| 222 LGE_EECTL_SINGLEACCESS | ((addr >> 1) << 8)); 223 224 for (i = 0; i < LGE_TIMEOUT; i++) { 225 if ((CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ) == 0) 226 break; 227 } 228 229 if (i == LGE_TIMEOUT) { 230 kprintf("lge%d: EEPROM read timed out\n", sc->lge_unit); 231 return; 232 } 233 234 val = CSR_READ_4(sc, LGE_EEDATA); 235 236 if (addr & 1) 237 *dest = (val >> 16) & 0xFFFF; 238 else 239 *dest = val & 0xFFFF; 240 } 241 242 /* 243 * Read a sequence of words from the EEPROM. 244 */ 245 static void 246 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, int cnt) 247 { 248 int i; 249 uint16_t word = 0, *ptr; 250 251 for (i = 0; i < cnt; i++) { 252 lge_eeprom_getword(sc, off + i, &word); 253 ptr = (uint16_t *)(dest + (i * 2)); 254 *ptr = ntohs(word); 255 } 256 } 257 258 static int 259 lge_miibus_readreg(device_t dev, int phy, int reg) 260 { 261 struct lge_softc *sc = device_get_softc(dev); 262 int i; 263 264 /* 265 * If we have a non-PCS PHY, pretend that the internal 266 * autoneg stuff at PHY address 0 isn't there so that 267 * the miibus code will find only the GMII PHY. 268 */ 269 if (sc->lge_pcs == 0 && phy == 0) 270 return(0); 271 272 CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); 273 274 for (i = 0; i < LGE_TIMEOUT; i++) { 275 if ((CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY) == 0) 276 break; 277 } 278 279 if (i == LGE_TIMEOUT) { 280 kprintf("lge%d: PHY read timed out\n", sc->lge_unit); 281 return(0); 282 } 283 284 return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); 285 } 286 287 static int 288 lge_miibus_writereg(device_t dev, int phy, int reg, int data) 289 { 290 struct lge_softc *sc = device_get_softc(dev); 291 int i; 292 293 CSR_WRITE_4(sc, LGE_GMIICTL, 294 (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); 295 296 for (i = 0; i < LGE_TIMEOUT; i++) { 297 if ((CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY) == 0) 298 break; 299 } 300 301 if (i == LGE_TIMEOUT) { 302 kprintf("lge%d: PHY write timed out\n", sc->lge_unit); 303 return(0); 304 } 305 306 return(0); 307 } 308 309 static void 310 lge_miibus_statchg(device_t dev) 311 { 312 struct lge_softc *sc = device_get_softc(dev); 313 struct mii_data *mii = device_get_softc(sc->lge_miibus); 314 315 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); 316 switch (IFM_SUBTYPE(mii->mii_media_active)) { 317 case IFM_1000_T: 318 case IFM_1000_SX: 319 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 320 break; 321 case IFM_100_TX: 322 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); 323 break; 324 case IFM_10_T: 325 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); 326 break; 327 default: 328 /* 329 * Choose something, even if it's wrong. Clearing 330 * all the bits will hose autoneg on the internal 331 * PHY. 332 */ 333 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 334 break; 335 } 336 337 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 338 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 339 else 340 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 341 } 342 343 static void 344 lge_setmulti(struct lge_softc *sc) 345 { 346 struct ifnet *ifp = &sc->arpcom.ac_if; 347 struct ifmultiaddr *ifma; 348 uint32_t h = 0, hashes[2] = { 0, 0 }; 349 350 /* Make sure multicast hash table is enabled. */ 351 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_MCAST); 352 353 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 354 CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); 355 CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); 356 return; 357 } 358 359 /* first, zot all the existing hash bits */ 360 CSR_WRITE_4(sc, LGE_MAR0, 0); 361 CSR_WRITE_4(sc, LGE_MAR1, 0); 362 363 /* now program new ones */ 364 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 365 if (ifma->ifma_addr->sa_family != AF_LINK) 366 continue; 367 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 368 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 369 if (h < 32) 370 hashes[0] |= (1 << h); 371 else 372 hashes[1] |= (1 << (h - 32)); 373 } 374 375 CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); 376 CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); 377 378 return; 379 } 380 381 static void 382 lge_reset(struct lge_softc *sc) 383 { 384 int i; 385 386 LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0 | LGE_MODE1_SOFTRST); 387 388 for (i = 0; i < LGE_TIMEOUT; i++) { 389 if ((CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST) == 0) 390 break; 391 } 392 393 if (i == LGE_TIMEOUT) 394 kprintf("lge%d: reset never completed\n", sc->lge_unit); 395 396 /* Wait a little while for the chip to get its brains in order. */ 397 DELAY(1000); 398 } 399 400 /* 401 * Probe for a Level 1 chip. Check the PCI vendor and device 402 * IDs against our list and return a device name if we find a match. 403 */ 404 static int 405 lge_probe(device_t dev) 406 { 407 struct lge_type *t; 408 uint16_t vendor, product; 409 410 vendor = pci_get_vendor(dev); 411 product = pci_get_device(dev); 412 413 for (t = lge_devs; t->lge_name != NULL; t++) { 414 if (vendor == t->lge_vid && product == t->lge_did) { 415 device_set_desc(dev, t->lge_name); 416 return(0); 417 } 418 } 419 420 return(ENXIO); 421 } 422 423 /* 424 * Attach the interface. Allocate softc structures, do ifmedia 425 * setup and ethernet/BPF attach. 426 */ 427 static int 428 lge_attach(device_t dev) 429 { 430 uint8_t eaddr[ETHER_ADDR_LEN]; 431 struct lge_softc *sc; 432 struct ifnet *ifp; 433 int unit, error = 0, rid; 434 435 sc = device_get_softc(dev); 436 unit = device_get_unit(dev); 437 callout_init(&sc->lge_stat_timer); 438 lwkt_serialize_init(&sc->lge_jslot_serializer); 439 440 /* 441 * Handle power management nonsense. 442 */ 443 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 444 uint32_t iobase, membase, irq; 445 446 /* Save important PCI config data. */ 447 iobase = pci_read_config(dev, LGE_PCI_LOIO, 4); 448 membase = pci_read_config(dev, LGE_PCI_LOMEM, 4); 449 irq = pci_read_config(dev, LGE_PCI_INTLINE, 4); 450 451 /* Reset the power state. */ 452 device_printf(dev, "chip is in D%d power mode " 453 "-- setting to D0\n", pci_get_powerstate(dev)); 454 455 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 456 457 /* Restore PCI config data. */ 458 pci_write_config(dev, LGE_PCI_LOIO, iobase, 4); 459 pci_write_config(dev, LGE_PCI_LOMEM, membase, 4); 460 pci_write_config(dev, LGE_PCI_INTLINE, irq, 4); 461 } 462 463 pci_enable_busmaster(dev); 464 465 rid = LGE_RID; 466 sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE); 467 468 if (sc->lge_res == NULL) { 469 kprintf("lge%d: couldn't map ports/memory\n", unit); 470 error = ENXIO; 471 goto fail; 472 } 473 474 sc->lge_btag = rman_get_bustag(sc->lge_res); 475 sc->lge_bhandle = rman_get_bushandle(sc->lge_res); 476 477 /* Allocate interrupt */ 478 rid = 0; 479 sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 480 RF_SHAREABLE | RF_ACTIVE); 481 482 if (sc->lge_irq == NULL) { 483 kprintf("lge%d: couldn't map interrupt\n", unit); 484 error = ENXIO; 485 goto fail; 486 } 487 488 /* Reset the adapter. */ 489 lge_reset(sc); 490 491 /* 492 * Get station address from the EEPROM. 493 */ 494 lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1); 495 lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1); 496 lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1); 497 498 sc->lge_unit = unit; 499 500 sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF, 501 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 502 503 if (sc->lge_ldata == NULL) { 504 kprintf("lge%d: no memory for list buffers!\n", unit); 505 error = ENXIO; 506 goto fail; 507 } 508 509 /* Try to allocate memory for jumbo buffers. */ 510 if (lge_alloc_jumbo_mem(sc)) { 511 kprintf("lge%d: jumbo buffer allocation failed\n", 512 sc->lge_unit); 513 error = ENXIO; 514 goto fail; 515 } 516 517 ifp = &sc->arpcom.ac_if; 518 ifp->if_softc = sc; 519 if_initname(ifp, "lge", unit); 520 ifp->if_mtu = ETHERMTU; 521 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 522 ifp->if_ioctl = lge_ioctl; 523 ifp->if_start = lge_start; 524 ifp->if_watchdog = lge_watchdog; 525 ifp->if_init = lge_init; 526 ifp->if_baudrate = 1000000000; 527 ifq_set_maxlen(&ifp->if_snd, LGE_TX_LIST_CNT - 1); 528 ifq_set_ready(&ifp->if_snd); 529 ifp->if_capabilities = IFCAP_RXCSUM; 530 ifp->if_capenable = ifp->if_capabilities; 531 532 if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) 533 sc->lge_pcs = 1; 534 else 535 sc->lge_pcs = 0; 536 537 /* 538 * Do MII setup. 539 */ 540 if (mii_phy_probe(dev, &sc->lge_miibus, 541 lge_ifmedia_upd, lge_ifmedia_sts)) { 542 kprintf("lge%d: MII without any PHY!\n", sc->lge_unit); 543 error = ENXIO; 544 goto fail; 545 } 546 547 /* 548 * Call MI attach routine. 549 */ 550 ether_ifattach(ifp, eaddr, NULL); 551 552 error = bus_setup_intr(dev, sc->lge_irq, INTR_MPSAFE, 553 lge_intr, sc, &sc->lge_intrhand, 554 ifp->if_serializer); 555 if (error) { 556 ether_ifdetach(ifp); 557 kprintf("lge%d: couldn't set up irq\n", unit); 558 goto fail; 559 } 560 561 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->lge_irq)); 562 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 563 564 return(0); 565 566 fail: 567 lge_detach(dev); 568 return(error); 569 } 570 571 static int 572 lge_detach(device_t dev) 573 { 574 struct lge_softc *sc= device_get_softc(dev); 575 struct ifnet *ifp = &sc->arpcom.ac_if; 576 577 if (device_is_attached(dev)) { 578 lwkt_serialize_enter(ifp->if_serializer); 579 lge_reset(sc); 580 lge_stop(sc); 581 bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); 582 lwkt_serialize_exit(ifp->if_serializer); 583 584 ether_ifdetach(ifp); 585 } 586 587 if (sc->lge_miibus) 588 device_delete_child(dev, sc->lge_miibus); 589 bus_generic_detach(dev); 590 591 if (sc->lge_irq) 592 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); 593 if (sc->lge_res) 594 bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); 595 596 if (sc->lge_ldata) 597 contigfree(sc->lge_ldata, sizeof(struct lge_list_data), 598 M_DEVBUF); 599 lge_free_jumbo_mem(sc); 600 601 return(0); 602 } 603 604 /* 605 * Initialize the transmit descriptors. 606 */ 607 static int 608 lge_list_tx_init(struct lge_softc *sc) 609 { 610 struct lge_list_data *ld; 611 struct lge_ring_data *cd; 612 int i; 613 614 cd = &sc->lge_cdata; 615 ld = sc->lge_ldata; 616 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 617 ld->lge_tx_list[i].lge_mbuf = NULL; 618 ld->lge_tx_list[i].lge_ctl = 0; 619 } 620 621 cd->lge_tx_prod = cd->lge_tx_cons = 0; 622 623 return(0); 624 } 625 626 627 /* 628 * Initialize the RX descriptors and allocate mbufs for them. Note that 629 * we arralge the descriptors in a closed ring, so that the last descriptor 630 * points back to the first. 631 */ 632 static int 633 lge_list_rx_init(struct lge_softc *sc) 634 { 635 struct lge_list_data *ld; 636 struct lge_ring_data *cd; 637 int i; 638 639 ld = sc->lge_ldata; 640 cd = &sc->lge_cdata; 641 642 cd->lge_rx_prod = cd->lge_rx_cons = 0; 643 644 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 645 646 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 647 if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) 648 break; 649 if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) 650 return(ENOBUFS); 651 } 652 653 /* Clear possible 'rx command queue empty' interrupt. */ 654 CSR_READ_4(sc, LGE_ISR); 655 656 return(0); 657 } 658 659 /* 660 * Initialize an RX descriptor and attach an MBUF cluster. 661 */ 662 static int 663 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m) 664 { 665 struct mbuf *m_new = NULL; 666 struct lge_jslot *buf; 667 668 if (m == NULL) { 669 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 670 if (m_new == NULL) { 671 kprintf("lge%d: no memory for rx list " 672 "-- packet dropped!\n", sc->lge_unit); 673 return(ENOBUFS); 674 } 675 676 /* Allocate the jumbo buffer */ 677 buf = lge_jalloc(sc); 678 if (buf == NULL) { 679 #ifdef LGE_VERBOSE 680 kprintf("lge%d: jumbo allocation failed " 681 "-- packet dropped!\n", sc->lge_unit); 682 #endif 683 m_freem(m_new); 684 return(ENOBUFS); 685 } 686 /* Attach the buffer to the mbuf */ 687 m_new->m_ext.ext_arg = buf; 688 m_new->m_ext.ext_buf = buf->lge_buf; 689 m_new->m_ext.ext_free = lge_jfree; 690 m_new->m_ext.ext_ref = lge_jref; 691 m_new->m_ext.ext_size = LGE_JUMBO_FRAMELEN; 692 693 m_new->m_data = m_new->m_ext.ext_buf; 694 m_new->m_flags |= M_EXT; 695 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 696 } else { 697 m_new = m; 698 m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN; 699 m_new->m_data = m_new->m_ext.ext_buf; 700 } 701 702 /* 703 * Adjust alignment so packet payload begins on a 704 * longword boundary. Mandatory for Alpha, useful on 705 * x86 too. 706 */ 707 m_adj(m_new, ETHER_ALIGN); 708 709 c->lge_mbuf = m_new; 710 c->lge_fragptr_hi = 0; 711 c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); 712 c->lge_fraglen = m_new->m_len; 713 c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); 714 c->lge_sts = 0; 715 716 /* 717 * Put this buffer in the RX command FIFO. To do this, 718 * we just write the physical address of the descriptor 719 * into the RX descriptor address registers. Note that 720 * there are two registers, one high DWORD and one low 721 * DWORD, which lets us specify a 64-bit address if 722 * desired. We only use a 32-bit address for now. 723 * Writing to the low DWORD register is what actually 724 * causes the command to be issued, so we do that 725 * last. 726 */ 727 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); 728 LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); 729 730 return(0); 731 } 732 733 static int 734 lge_alloc_jumbo_mem(struct lge_softc *sc) 735 { 736 struct lge_jslot *entry; 737 caddr_t ptr; 738 int i; 739 740 /* Grab a big chunk o' storage. */ 741 sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF, 742 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 743 744 if (sc->lge_cdata.lge_jumbo_buf == NULL) { 745 kprintf("lge%d: no memory for jumbo buffers!\n", sc->lge_unit); 746 return(ENOBUFS); 747 } 748 749 SLIST_INIT(&sc->lge_jfree_listhead); 750 751 /* 752 * Now divide it up into 9K pieces and save the addresses 753 * in an array. 754 */ 755 ptr = sc->lge_cdata.lge_jumbo_buf; 756 for (i = 0; i < LGE_JSLOTS; i++) { 757 entry = &sc->lge_cdata.lge_jslots[i]; 758 entry->lge_sc = sc; 759 entry->lge_buf = ptr; 760 entry->lge_inuse = 0; 761 entry->lge_slot = i; 762 SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jslot_link); 763 ptr += LGE_JLEN; 764 } 765 766 return(0); 767 } 768 769 static void 770 lge_free_jumbo_mem(struct lge_softc *sc) 771 { 772 if (sc->lge_cdata.lge_jumbo_buf) 773 contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF); 774 } 775 776 /* 777 * Allocate a jumbo buffer. 778 */ 779 static struct lge_jslot * 780 lge_jalloc(struct lge_softc *sc) 781 { 782 struct lge_jslot *entry; 783 784 lwkt_serialize_enter(&sc->lge_jslot_serializer); 785 entry = SLIST_FIRST(&sc->lge_jfree_listhead); 786 if (entry) { 787 SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jslot_link); 788 entry->lge_inuse = 1; 789 } else { 790 #ifdef LGE_VERBOSE 791 kprintf("lge%d: no free jumbo buffers\n", sc->lge_unit); 792 #endif 793 } 794 lwkt_serialize_exit(&sc->lge_jslot_serializer); 795 return(entry); 796 } 797 798 /* 799 * Adjust usage count on a jumbo buffer. In general this doesn't 800 * get used much because our jumbo buffers don't get passed around 801 * a lot, but it's implemented for correctness. 802 */ 803 static void 804 lge_jref(void *arg) 805 { 806 struct lge_jslot *entry = (struct lge_jslot *)arg; 807 struct lge_softc *sc = entry->lge_sc; 808 809 if (&sc->lge_cdata.lge_jslots[entry->lge_slot] != entry) 810 panic("lge_jref: asked to reference buffer " 811 "that we don't manage!"); 812 else if (entry->lge_inuse == 0) 813 panic("lge_jref: buffer already free!"); 814 else 815 atomic_add_int(&entry->lge_inuse, 1); 816 } 817 818 /* 819 * Release a jumbo buffer. 820 */ 821 static void 822 lge_jfree(void *arg) 823 { 824 struct lge_jslot *entry = (struct lge_jslot *)arg; 825 struct lge_softc *sc = entry->lge_sc; 826 827 if (sc == NULL) 828 panic("lge_jfree: can't find softc pointer!"); 829 830 if (&sc->lge_cdata.lge_jslots[entry->lge_slot] != entry) { 831 panic("lge_jfree: asked to free buffer that we don't manage!"); 832 } else if (entry->lge_inuse == 0) { 833 panic("lge_jfree: buffer already free!"); 834 } else { 835 lwkt_serialize_enter(&sc->lge_jslot_serializer); 836 atomic_subtract_int(&entry->lge_inuse, 1); 837 if (entry->lge_inuse == 0) { 838 SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, 839 entry, jslot_link); 840 } 841 lwkt_serialize_exit(&sc->lge_jslot_serializer); 842 } 843 } 844 845 /* 846 * A frame has been uploaded: pass the resulting mbuf chain up to 847 * the higher level protocols. 848 */ 849 static void 850 lge_rxeof(struct lge_softc *sc, int cnt) 851 { 852 struct ifnet *ifp = &sc->arpcom.ac_if; 853 struct mbuf *m; 854 struct lge_rx_desc *cur_rx; 855 int c, i, total_len = 0; 856 uint32_t rxsts, rxctl; 857 858 859 /* Find out how many frames were processed. */ 860 c = cnt; 861 i = sc->lge_cdata.lge_rx_cons; 862 863 /* Suck them in. */ 864 while(c) { 865 struct mbuf *m0 = NULL; 866 867 cur_rx = &sc->lge_ldata->lge_rx_list[i]; 868 rxctl = cur_rx->lge_ctl; 869 rxsts = cur_rx->lge_sts; 870 m = cur_rx->lge_mbuf; 871 cur_rx->lge_mbuf = NULL; 872 total_len = LGE_RXBYTES(cur_rx); 873 LGE_INC(i, LGE_RX_LIST_CNT); 874 c--; 875 876 /* 877 * If an error occurs, update stats, clear the 878 * status word and leave the mbuf cluster in place: 879 * it should simply get re-used next time this descriptor 880 * comes up in the ring. 881 */ 882 if (rxctl & LGE_RXCTL_ERRMASK) { 883 ifp->if_ierrors++; 884 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 885 continue; 886 } 887 888 if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { 889 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 890 total_len + ETHER_ALIGN, 0, ifp, NULL); 891 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 892 if (m0 == NULL) { 893 kprintf("lge%d: no receive buffers " 894 "available -- packet dropped!\n", 895 sc->lge_unit); 896 ifp->if_ierrors++; 897 continue; 898 } 899 m_adj(m0, ETHER_ALIGN); 900 m = m0; 901 } else { 902 m->m_pkthdr.rcvif = ifp; 903 m->m_pkthdr.len = m->m_len = total_len; 904 } 905 906 ifp->if_ipackets++; 907 908 /* Do IP checksum checking. */ 909 if (rxsts & LGE_RXSTS_ISIP) 910 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 911 if (!(rxsts & LGE_RXSTS_IPCSUMERR)) 912 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 913 if ((rxsts & LGE_RXSTS_ISTCP && 914 !(rxsts & LGE_RXSTS_TCPCSUMERR)) || 915 (rxsts & LGE_RXSTS_ISUDP && 916 !(rxsts & LGE_RXSTS_UDPCSUMERR))) { 917 m->m_pkthdr.csum_flags |= 918 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 919 CSUM_FRAG_NOT_CHECKED; 920 m->m_pkthdr.csum_data = 0xffff; 921 } 922 923 ifp->if_input(ifp, m); 924 } 925 926 sc->lge_cdata.lge_rx_cons = i; 927 } 928 929 static void 930 lge_rxeoc(struct lge_softc *sc) 931 { 932 struct ifnet *ifp = &sc->arpcom.ac_if; 933 934 ifp->if_flags &= ~IFF_RUNNING; 935 lge_init(sc); 936 } 937 938 /* 939 * A frame was downloaded to the chip. It's safe for us to clean up 940 * the list buffers. 941 */ 942 static void 943 lge_txeof(struct lge_softc *sc) 944 { 945 struct ifnet *ifp = &sc->arpcom.ac_if; 946 struct lge_tx_desc *cur_tx = NULL; 947 uint32_t idx, txdone; 948 949 /* Clear the timeout timer. */ 950 ifp->if_timer = 0; 951 952 /* 953 * Go through our tx list and free mbufs for those 954 * frames that have been transmitted. 955 */ 956 idx = sc->lge_cdata.lge_tx_cons; 957 txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); 958 959 while (idx != sc->lge_cdata.lge_tx_prod && txdone) { 960 cur_tx = &sc->lge_ldata->lge_tx_list[idx]; 961 962 ifp->if_opackets++; 963 if (cur_tx->lge_mbuf != NULL) { 964 m_freem(cur_tx->lge_mbuf); 965 cur_tx->lge_mbuf = NULL; 966 } 967 cur_tx->lge_ctl = 0; 968 969 txdone--; 970 LGE_INC(idx, LGE_TX_LIST_CNT); 971 ifp->if_timer = 0; 972 } 973 974 sc->lge_cdata.lge_tx_cons = idx; 975 976 if (cur_tx != NULL) 977 ifp->if_flags &= ~IFF_OACTIVE; 978 } 979 980 static void 981 lge_tick(void *xsc) 982 { 983 struct lge_softc *sc = xsc; 984 struct ifnet *ifp = &sc->arpcom.ac_if; 985 986 lwkt_serialize_enter(ifp->if_serializer); 987 lge_tick_serialized(xsc); 988 lwkt_serialize_exit(ifp->if_serializer); 989 } 990 991 static void 992 lge_tick_serialized(void *xsc) 993 { 994 struct lge_softc *sc = xsc; 995 struct mii_data *mii; 996 struct ifnet *ifp = &sc->arpcom.ac_if; 997 998 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); 999 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1000 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); 1001 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1002 1003 if (!sc->lge_link) { 1004 mii = device_get_softc(sc->lge_miibus); 1005 mii_tick(mii); 1006 mii_pollstat(mii); 1007 if (mii->mii_media_status & IFM_ACTIVE && 1008 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1009 sc->lge_link++; 1010 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| 1011 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1012 kprintf("lge%d: gigabit link up\n", 1013 sc->lge_unit); 1014 if (!ifq_is_empty(&ifp->if_snd)) 1015 if_devstart(ifp); 1016 } 1017 } 1018 1019 callout_reset(&sc->lge_stat_timer, hz, lge_tick, sc); 1020 } 1021 1022 static void 1023 lge_intr(void *arg) 1024 { 1025 struct lge_softc *sc = arg; 1026 struct ifnet *ifp = &sc->arpcom.ac_if; 1027 uint32_t status; 1028 1029 /* Supress unwanted interrupts */ 1030 if ((ifp->if_flags & IFF_UP) == 0) { 1031 lge_stop(sc); 1032 return; 1033 } 1034 1035 for (;;) { 1036 /* 1037 * Reading the ISR register clears all interrupts, and 1038 * clears the 'interrupts enabled' bit in the IMR 1039 * register. 1040 */ 1041 status = CSR_READ_4(sc, LGE_ISR); 1042 1043 if ((status & LGE_INTRS) == 0) 1044 break; 1045 1046 if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) 1047 lge_txeof(sc); 1048 1049 if (status & LGE_ISR_RXDMA_DONE) 1050 lge_rxeof(sc, LGE_RX_DMACNT(status)); 1051 1052 if (status & LGE_ISR_RXCMDFIFO_EMPTY) 1053 lge_rxeoc(sc); 1054 1055 if (status & LGE_ISR_PHY_INTR) { 1056 sc->lge_link = 0; 1057 callout_stop(&sc->lge_stat_timer); 1058 lge_tick_serialized(sc); 1059 } 1060 } 1061 1062 /* Re-enable interrupts. */ 1063 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); 1064 1065 if (!ifq_is_empty(&ifp->if_snd)) 1066 if_devstart(ifp); 1067 } 1068 1069 /* 1070 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1071 * pointers to the fragment pointers. 1072 */ 1073 static int 1074 lge_encap(struct lge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1075 { 1076 struct lge_frag *f = NULL; 1077 struct lge_tx_desc *cur_tx; 1078 struct mbuf *m; 1079 int frag = 0, tot_len = 0; 1080 1081 /* 1082 * Start packing the mbufs in this chain into 1083 * the fragment pointers. Stop when we run out 1084 * of fragments or hit the end of the mbuf chain. 1085 */ 1086 m = m_head; 1087 cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; 1088 frag = 0; 1089 1090 for (m = m_head; m != NULL; m = m->m_next) { 1091 if (m->m_len != 0) { 1092 if (frag == LGE_FRAG_CNT) 1093 break; 1094 1095 tot_len += m->m_len; 1096 f = &cur_tx->lge_frags[frag]; 1097 f->lge_fraglen = m->m_len; 1098 f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t)); 1099 f->lge_fragptr_hi = 0; 1100 frag++; 1101 } 1102 } 1103 /* Caller should make sure that 'm_head' is not excessive fragmented */ 1104 KASSERT(m == NULL, ("too many fragments\n")); 1105 1106 cur_tx->lge_mbuf = m_head; 1107 cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; 1108 LGE_INC((*txidx), LGE_TX_LIST_CNT); 1109 1110 /* Queue for transmit */ 1111 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); 1112 1113 return(0); 1114 } 1115 1116 /* 1117 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1118 * to the mbuf data regions directly in the transmit lists. We also save a 1119 * copy of the pointers since the transmit list fragment pointers are 1120 * physical addresses. 1121 */ 1122 1123 static void 1124 lge_start(struct ifnet *ifp) 1125 { 1126 struct lge_softc *sc = ifp->if_softc; 1127 struct mbuf *m_head = NULL, *m_defragged; 1128 uint32_t idx; 1129 int need_timer; 1130 1131 if (!sc->lge_link) { 1132 ifq_purge(&ifp->if_snd); 1133 return; 1134 } 1135 1136 idx = sc->lge_cdata.lge_tx_prod; 1137 1138 if (ifp->if_flags & IFF_OACTIVE) 1139 return; 1140 1141 need_timer = 0; 1142 while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { 1143 struct mbuf *m; 1144 int frags; 1145 1146 if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) { 1147 ifp->if_flags |= IFF_OACTIVE; 1148 break; 1149 } 1150 1151 m_defragged = NULL; 1152 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1153 if (m_head == NULL) 1154 break; 1155 1156 again: 1157 frags = 0; 1158 for (m = m_head; m != NULL; m = m->m_next) 1159 ++frags; 1160 if (frags > LGE_FRAG_CNT) { 1161 if (m_defragged != NULL) { 1162 /* 1163 * Even after defragmentation, there 1164 * are still too many fragments, so 1165 * drop this packet. 1166 */ 1167 m_freem(m_head); 1168 continue; 1169 } 1170 1171 m_defragged = m_defrag(m_head, MB_DONTWAIT); 1172 if (m_defragged == NULL) { 1173 m_freem(m_head); 1174 continue; 1175 } 1176 m_head = m_defragged; 1177 1178 /* Recount # of fragments */ 1179 goto again; 1180 } 1181 1182 lge_encap(sc, m_head, &idx); 1183 need_timer = 1; 1184 1185 BPF_MTAP(ifp, m_head); 1186 } 1187 1188 if (!need_timer) 1189 return; 1190 1191 sc->lge_cdata.lge_tx_prod = idx; 1192 1193 /* 1194 * Set a timeout in case the chip goes out to lunch. 1195 */ 1196 ifp->if_timer = 5; 1197 } 1198 1199 static void 1200 lge_init(void *xsc) 1201 { 1202 struct lge_softc *sc = xsc; 1203 struct ifnet *ifp = &sc->arpcom.ac_if; 1204 struct mii_data *mii; 1205 1206 if (ifp->if_flags & IFF_RUNNING) 1207 return; 1208 1209 /* 1210 * Cancel pending I/O and free all RX/TX buffers. 1211 */ 1212 lge_stop(sc); 1213 lge_reset(sc); 1214 1215 mii = device_get_softc(sc->lge_miibus); 1216 1217 /* Set MAC address */ 1218 CSR_WRITE_4(sc, LGE_PAR0, *(uint32_t *)(&sc->arpcom.ac_enaddr[0])); 1219 CSR_WRITE_4(sc, LGE_PAR1, *(uint32_t *)(&sc->arpcom.ac_enaddr[4])); 1220 1221 /* Init circular RX list. */ 1222 if (lge_list_rx_init(sc) == ENOBUFS) { 1223 kprintf("lge%d: initialization failed: no " 1224 "memory for rx buffers\n", sc->lge_unit); 1225 lge_stop(sc); 1226 return; 1227 } 1228 1229 /* 1230 * Init tx descriptors. 1231 */ 1232 lge_list_tx_init(sc); 1233 1234 /* Set initial value for MODE1 register. */ 1235 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST | 1236 LGE_MODE1_TX_CRC | LGE_MODE1_TXPAD | 1237 LGE_MODE1_RX_FLOWCTL | LGE_MODE1_SETRST_CTL0 | 1238 LGE_MODE1_SETRST_CTL1 | LGE_MODE1_SETRST_CTL2); 1239 1240 /* If we want promiscuous mode, set the allframes bit. */ 1241 if (ifp->if_flags & IFF_PROMISC) { 1242 CSR_WRITE_4(sc, LGE_MODE1, 1243 LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_PROMISC); 1244 } else { 1245 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); 1246 } 1247 1248 /* 1249 * Set the capture broadcast bit to capture broadcast frames. 1250 */ 1251 if (ifp->if_flags & IFF_BROADCAST) { 1252 CSR_WRITE_4(sc, LGE_MODE1, 1253 LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_BCAST); 1254 } else { 1255 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); 1256 } 1257 1258 /* Packet padding workaround? */ 1259 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); 1260 1261 /* No error frames */ 1262 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); 1263 1264 /* Receive large frames */ 1265 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_GIANTS); 1266 1267 /* Workaround: disable RX/TX flow control */ 1268 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); 1269 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); 1270 1271 /* Make sure to strip CRC from received frames */ 1272 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); 1273 1274 /* Turn off magic packet mode */ 1275 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); 1276 1277 /* Turn off all VLAN stuff */ 1278 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX | LGE_MODE1_VLAN_TX | 1279 LGE_MODE1_VLAN_STRIP | LGE_MODE1_VLAN_INSERT); 1280 1281 /* Workarond: FIFO overflow */ 1282 CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); 1283 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); 1284 1285 /* 1286 * Load the multicast filter. 1287 */ 1288 lge_setmulti(sc); 1289 1290 /* 1291 * Enable hardware checksum validation for all received IPv4 1292 * packets, do not reject packets with bad checksums. 1293 */ 1294 CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM | 1295 LGE_MODE2_RX_TCPCSUM | LGE_MODE2_RX_UDPCSUM | 1296 LGE_MODE2_RX_ERRCSUM); 1297 1298 /* 1299 * Enable the delivery of PHY interrupts based on 1300 * link/speed/duplex status chalges. 1301 */ 1302 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0 | LGE_MODE1_GMIIPOLL); 1303 1304 /* Enable receiver and transmitter. */ 1305 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 1306 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_ENB); 1307 1308 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); 1309 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_TX_ENB); 1310 1311 /* 1312 * Enable interrupts. 1313 */ 1314 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0 | 1315 LGE_IMR_SETRST_CTL1 | LGE_IMR_INTR_ENB|LGE_INTRS); 1316 1317 lge_ifmedia_upd(ifp); 1318 1319 ifp->if_flags |= IFF_RUNNING; 1320 ifp->if_flags &= ~IFF_OACTIVE; 1321 1322 callout_reset(&sc->lge_stat_timer, hz, lge_tick, sc); 1323 } 1324 1325 /* 1326 * Set media options. 1327 */ 1328 static int 1329 lge_ifmedia_upd(struct ifnet *ifp) 1330 { 1331 struct lge_softc *sc = ifp->if_softc; 1332 struct mii_data *mii = device_get_softc(sc->lge_miibus); 1333 1334 sc->lge_link = 0; 1335 if (mii->mii_instance) { 1336 struct mii_softc *miisc; 1337 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1338 mii_phy_reset(miisc); 1339 } 1340 mii_mediachg(mii); 1341 1342 return(0); 1343 } 1344 1345 /* 1346 * Report current media status. 1347 */ 1348 static void 1349 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1350 { 1351 struct lge_softc *sc = ifp->if_softc; 1352 struct mii_data *mii; 1353 1354 mii = device_get_softc(sc->lge_miibus); 1355 mii_pollstat(mii); 1356 ifmr->ifm_active = mii->mii_media_active; 1357 ifmr->ifm_status = mii->mii_media_status; 1358 } 1359 1360 static int 1361 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1362 { 1363 struct lge_softc *sc = ifp->if_softc; 1364 struct ifreq *ifr = (struct ifreq *) data; 1365 struct mii_data *mii; 1366 int error = 0; 1367 1368 switch(command) { 1369 case SIOCSIFMTU: 1370 if (ifr->ifr_mtu > LGE_JUMBO_MTU) 1371 error = EINVAL; 1372 else 1373 ifp->if_mtu = ifr->ifr_mtu; 1374 break; 1375 case SIOCSIFFLAGS: 1376 if (ifp->if_flags & IFF_UP) { 1377 if (ifp->if_flags & IFF_RUNNING && 1378 ifp->if_flags & IFF_PROMISC && 1379 !(sc->lge_if_flags & IFF_PROMISC)) { 1380 CSR_WRITE_4(sc, LGE_MODE1, 1381 LGE_MODE1_SETRST_CTL1| 1382 LGE_MODE1_RX_PROMISC); 1383 } else if (ifp->if_flags & IFF_RUNNING && 1384 !(ifp->if_flags & IFF_PROMISC) && 1385 sc->lge_if_flags & IFF_PROMISC) { 1386 CSR_WRITE_4(sc, LGE_MODE1, 1387 LGE_MODE1_RX_PROMISC); 1388 } else { 1389 ifp->if_flags &= ~IFF_RUNNING; 1390 lge_init(sc); 1391 } 1392 } else { 1393 if (ifp->if_flags & IFF_RUNNING) 1394 lge_stop(sc); 1395 } 1396 sc->lge_if_flags = ifp->if_flags; 1397 error = 0; 1398 break; 1399 case SIOCADDMULTI: 1400 case SIOCDELMULTI: 1401 lge_setmulti(sc); 1402 error = 0; 1403 break; 1404 case SIOCGIFMEDIA: 1405 case SIOCSIFMEDIA: 1406 mii = device_get_softc(sc->lge_miibus); 1407 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1408 break; 1409 default: 1410 error = ether_ioctl(ifp, command, data); 1411 break; 1412 } 1413 1414 return(error); 1415 } 1416 1417 static void 1418 lge_watchdog(struct ifnet *ifp) 1419 { 1420 struct lge_softc *sc = ifp->if_softc; 1421 1422 ifp->if_oerrors++; 1423 kprintf("lge%d: watchdog timeout\n", sc->lge_unit); 1424 1425 lge_stop(sc); 1426 lge_reset(sc); 1427 ifp->if_flags &= ~IFF_RUNNING; 1428 lge_init(sc); 1429 1430 if (!ifq_is_empty(&ifp->if_snd)) 1431 if_devstart(ifp); 1432 } 1433 1434 /* 1435 * Stop the adapter and free any mbufs allocated to the 1436 * RX and TX lists. 1437 */ 1438 static void 1439 lge_stop(struct lge_softc *sc) 1440 { 1441 struct ifnet *ifp = &sc->arpcom.ac_if; 1442 int i; 1443 1444 ifp->if_timer = 0; 1445 callout_stop(&sc->lge_stat_timer); 1446 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); 1447 1448 /* Disable receiver and transmitter. */ 1449 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); 1450 sc->lge_link = 0; 1451 1452 /* 1453 * Free data in the RX lists. 1454 */ 1455 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 1456 if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { 1457 m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); 1458 sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; 1459 } 1460 } 1461 bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); 1462 1463 /* 1464 * Free the TX list buffers. 1465 */ 1466 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 1467 if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { 1468 m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); 1469 sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; 1470 } 1471 } 1472 1473 bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); 1474 1475 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1476 } 1477 1478 /* 1479 * Stop all chip I/O so that the kernel's probe routines don't 1480 * get confused by errant DMAs when rebooting. 1481 */ 1482 static void 1483 lge_shutdown(device_t dev) 1484 { 1485 struct lge_softc *sc = device_get_softc(dev); 1486 1487 lge_reset(sc); 1488 lge_stop(sc); 1489 } 1490