1 /*- 2 * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $ 27 * $DragonFly: src/sys/dev/netif/tx/if_tx.c,v 1.14 2004/09/15 01:00:26 joerg Exp $ 28 */ 29 30 /* 31 * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie) 32 * 33 * These cards are based on SMC83c17x (EPIC) chip and one of the various 34 * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on 35 * card model. All cards support 10baseT/UTP and 100baseTX half- and full- 36 * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also 37 * supports fibre optics. 38 * 39 * Thanks are going to Steve Bauer and Jason Wright. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/sockio.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/kernel.h> 48 #include <sys/socket.h> 49 #include <sys/queue.h> 50 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #include <net/bpf.h> 58 59 #include <net/vlan/if_vlan_var.h> 60 61 #include <vm/vm.h> /* for vtophys */ 62 #include <vm/pmap.h> /* for vtophys */ 63 #include <machine/bus_memio.h> 64 #include <machine/bus_pio.h> 65 #include <machine/bus.h> 66 #include <machine/resource.h> 67 #include <machine/clock.h> /* for DELAY */ 68 #include <sys/bus.h> 69 #include <sys/rman.h> 70 71 #include <bus/pci/pcireg.h> 72 #include <bus/pci/pcivar.h> 73 74 #include "../mii_layer/mii.h" 75 #include "../mii_layer/miivar.h" 76 #include "../mii_layer/miidevs.h" 77 #include "../mii_layer/lxtphyreg.h" 78 79 #include "miibus_if.h" 80 81 #include "if_txreg.h" 82 #include "if_txvar.h" 83 84 static int epic_ifioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 85 static void epic_intr(void *); 86 static void epic_tx_underrun(epic_softc_t *); 87 static int epic_common_attach(epic_softc_t *); 88 static void epic_ifstart(struct ifnet *); 89 static void epic_ifwatchdog(struct ifnet *); 90 static void epic_stats_update(void *); 91 static int epic_init(epic_softc_t *); 92 static void epic_stop(epic_softc_t *); 93 static void epic_rx_done(epic_softc_t *); 94 static void epic_tx_done(epic_softc_t *); 95 static int epic_init_rings(epic_softc_t *); 96 static void epic_free_rings(epic_softc_t *); 97 static void epic_stop_activity(epic_softc_t *); 98 static int epic_queue_last_packet(epic_softc_t *); 99 static void epic_start_activity(epic_softc_t *); 100 static void epic_set_rx_mode(epic_softc_t *); 101 static void epic_set_tx_mode(epic_softc_t *); 102 static void epic_set_mc_table(epic_softc_t *); 103 static u_int8_t epic_calchash(caddr_t); 104 static int epic_read_eeprom(epic_softc_t *,u_int16_t); 105 static void epic_output_eepromw(epic_softc_t *, u_int16_t); 106 static u_int16_t epic_input_eepromw(epic_softc_t *); 107 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t); 108 static void epic_write_eepromreg(epic_softc_t *,u_int8_t); 109 static u_int8_t epic_read_eepromreg(epic_softc_t *); 110 111 static int epic_read_phy_reg(epic_softc_t *, int, int); 112 static void epic_write_phy_reg(epic_softc_t *, int, int, int); 113 114 static int epic_miibus_readreg(device_t, int, int); 115 static int epic_miibus_writereg(device_t, int, int, int); 116 static void epic_miibus_statchg(device_t); 117 static void epic_miibus_mediainit(device_t); 118 119 static int epic_ifmedia_upd(struct ifnet *); 120 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *); 121 122 static int epic_probe(device_t); 123 static int epic_attach(device_t); 124 static void epic_shutdown(device_t); 125 static int epic_detach(device_t); 126 static struct epic_type *epic_devtype(device_t); 127 128 static device_method_t epic_methods[] = { 129 /* Device interface */ 130 DEVMETHOD(device_probe, epic_probe), 131 DEVMETHOD(device_attach, epic_attach), 132 DEVMETHOD(device_detach, epic_detach), 133 DEVMETHOD(device_shutdown, epic_shutdown), 134 135 /* MII interface */ 136 DEVMETHOD(miibus_readreg, epic_miibus_readreg), 137 DEVMETHOD(miibus_writereg, epic_miibus_writereg), 138 DEVMETHOD(miibus_statchg, epic_miibus_statchg), 139 DEVMETHOD(miibus_mediainit, epic_miibus_mediainit), 140 141 { 0, 0 } 142 }; 143 144 static driver_t epic_driver = { 145 "tx", 146 epic_methods, 147 sizeof(epic_softc_t) 148 }; 149 150 static devclass_t epic_devclass; 151 152 DECLARE_DUMMY_MODULE(if_tx); 153 MODULE_DEPEND(if_tx, miibus, 1, 1, 1); 154 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0); 155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0); 156 157 static struct epic_type epic_devs[] = { 158 { SMC_VENDORID, SMC_DEVICEID_83C170, 159 "SMC EtherPower II 10/100" }, 160 { 0, 0, NULL } 161 }; 162 163 static int 164 epic_probe(dev) 165 device_t dev; 166 { 167 struct epic_type *t; 168 169 t = epic_devtype(dev); 170 171 if (t != NULL) { 172 device_set_desc(dev, t->name); 173 return(0); 174 } 175 176 return(ENXIO); 177 } 178 179 static struct epic_type * 180 epic_devtype(dev) 181 device_t dev; 182 { 183 struct epic_type *t; 184 185 t = epic_devs; 186 187 while(t->name != NULL) { 188 if ((pci_get_vendor(dev) == t->ven_id) && 189 (pci_get_device(dev) == t->dev_id)) { 190 return(t); 191 } 192 t++; 193 } 194 return (NULL); 195 } 196 197 #if defined(EPIC_USEIOSPACE) 198 #define EPIC_RES SYS_RES_IOPORT 199 #define EPIC_RID PCIR_BASEIO 200 #else 201 #define EPIC_RES SYS_RES_MEMORY 202 #define EPIC_RID PCIR_BASEMEM 203 #endif 204 205 /* 206 * Attach routine: map registers, allocate softc, rings and descriptors. 207 * Reset to known state. 208 */ 209 static int 210 epic_attach(dev) 211 device_t dev; 212 { 213 struct ifnet *ifp; 214 epic_softc_t *sc; 215 u_int32_t command; 216 int unit, error; 217 int i, s, rid, tmp; 218 219 s = splimp (); 220 221 sc = device_get_softc(dev); 222 unit = device_get_unit(dev); 223 224 /* Preinitialize softc structure */ 225 bzero(sc, sizeof(epic_softc_t)); 226 sc->unit = unit; 227 sc->dev = dev; 228 callout_init(&sc->tx_stat_timer); 229 230 /* Fill ifnet structure */ 231 ifp = &sc->sc_if; 232 if_initname(ifp, "tx", unit); 233 ifp->if_softc = sc; 234 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST; 235 ifp->if_ioctl = epic_ifioctl; 236 ifp->if_start = epic_ifstart; 237 ifp->if_watchdog = epic_ifwatchdog; 238 ifp->if_init = (if_init_f_t*)epic_init; 239 ifp->if_timer = 0; 240 ifp->if_baudrate = 10000000; 241 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; 242 243 /* Enable ports, memory and busmastering */ 244 command = pci_read_config(dev, PCIR_COMMAND, 4); 245 command |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 246 pci_write_config(dev, PCIR_COMMAND, command, 4); 247 command = pci_read_config(dev, PCIR_COMMAND, 4); 248 249 #if defined(EPIC_USEIOSPACE) 250 if ((command & PCIM_CMD_PORTEN) == 0) { 251 device_printf(dev, "failed to enable I/O mapping!\n"); 252 error = ENXIO; 253 goto fail; 254 } 255 #else 256 if ((command & PCIM_CMD_MEMEN) == 0) { 257 device_printf(dev, "failed to enable memory mapping!\n"); 258 error = ENXIO; 259 goto fail; 260 } 261 #endif 262 263 rid = EPIC_RID; 264 sc->res = bus_alloc_resource(dev, EPIC_RES, &rid, 0, ~0, 1, 265 RF_ACTIVE); 266 267 if (sc->res == NULL) { 268 device_printf(dev, "couldn't map ports/memory\n"); 269 error = ENXIO; 270 goto fail; 271 } 272 273 sc->sc_st = rman_get_bustag(sc->res); 274 sc->sc_sh = rman_get_bushandle(sc->res); 275 276 /* Allocate interrupt */ 277 rid = 0; 278 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 279 RF_SHAREABLE | RF_ACTIVE); 280 281 if (sc->irq == NULL) { 282 device_printf(dev, "couldn't map interrupt\n"); 283 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 284 error = ENXIO; 285 goto fail; 286 } 287 288 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 289 epic_intr, sc, &sc->sc_ih); 290 291 if (error) { 292 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 293 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 294 device_printf(dev, "couldn't set up irq\n"); 295 goto fail; 296 } 297 298 /* Do OS independent part, including chip wakeup and reset */ 299 error = epic_common_attach(sc); 300 if (error) { 301 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 302 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 303 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 304 error = ENXIO; 305 goto fail; 306 } 307 308 /* Do ifmedia setup */ 309 if (mii_phy_probe(dev, &sc->miibus, 310 epic_ifmedia_upd, epic_ifmedia_sts)) { 311 device_printf(dev, "ERROR! MII without any PHY!?\n"); 312 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 313 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 314 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 315 error = ENXIO; 316 goto fail; 317 } 318 319 /* board type and ... */ 320 printf(" type "); 321 for(i=0x2c;i<0x32;i++) { 322 tmp = epic_read_eeprom(sc, i); 323 if (' ' == (u_int8_t)tmp) break; 324 printf("%c", (u_int8_t)tmp); 325 tmp >>= 8; 326 if (' ' == (u_int8_t)tmp) break; 327 printf("%c", (u_int8_t)tmp); 328 } 329 printf("\n"); 330 331 /* Attach to OS's managers */ 332 ether_ifattach(ifp, sc->sc_macaddr); 333 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 334 335 fail: 336 splx(s); 337 338 return(error); 339 } 340 341 /* 342 * Detach driver and free resources 343 */ 344 static int 345 epic_detach(dev) 346 device_t dev; 347 { 348 struct ifnet *ifp; 349 epic_softc_t *sc; 350 int s; 351 352 s = splimp(); 353 354 sc = device_get_softc(dev); 355 ifp = &sc->arpcom.ac_if; 356 357 ether_ifdetach(ifp); 358 359 epic_stop(sc); 360 361 bus_generic_detach(dev); 362 device_delete_child(dev, sc->miibus); 363 364 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 365 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 366 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 367 368 free(sc->tx_flist, M_DEVBUF); 369 free(sc->tx_desc, M_DEVBUF); 370 free(sc->rx_desc, M_DEVBUF); 371 372 splx(s); 373 374 return(0); 375 } 376 377 #undef EPIC_RES 378 #undef EPIC_RID 379 380 /* 381 * Stop all chip I/O so that the kernel's probe routines don't 382 * get confused by errant DMAs when rebooting. 383 */ 384 static void 385 epic_shutdown(dev) 386 device_t dev; 387 { 388 epic_softc_t *sc; 389 390 sc = device_get_softc(dev); 391 392 epic_stop(sc); 393 394 return; 395 } 396 397 /* 398 * This is if_ioctl handler. 399 */ 400 static int 401 epic_ifioctl(ifp, command, data, cr) 402 struct ifnet *ifp; 403 u_long command; 404 caddr_t data; 405 struct ucred *cr; 406 { 407 epic_softc_t *sc = ifp->if_softc; 408 struct mii_data *mii; 409 struct ifreq *ifr = (struct ifreq *) data; 410 int x, error = 0; 411 412 x = splimp(); 413 414 switch (command) { 415 case SIOCSIFADDR: 416 case SIOCGIFADDR: 417 error = ether_ioctl(ifp, command, data); 418 break; 419 case SIOCSIFMTU: 420 if (ifp->if_mtu == ifr->ifr_mtu) 421 break; 422 423 /* XXX Though the datasheet doesn't imply any 424 * limitations on RX and TX sizes beside max 64Kb 425 * DMA transfer, seems we can't send more then 1600 426 * data bytes per ethernet packet. (Transmitter hangs 427 * up if more data is sent) 428 */ 429 if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) { 430 ifp->if_mtu = ifr->ifr_mtu; 431 epic_stop(sc); 432 epic_init(sc); 433 } else 434 error = EINVAL; 435 break; 436 437 case SIOCSIFFLAGS: 438 /* 439 * If the interface is marked up and stopped, then start it. 440 * If it is marked down and running, then stop it. 441 */ 442 if (ifp->if_flags & IFF_UP) { 443 if ((ifp->if_flags & IFF_RUNNING) == 0) { 444 epic_init(sc); 445 break; 446 } 447 } else { 448 if (ifp->if_flags & IFF_RUNNING) { 449 epic_stop(sc); 450 break; 451 } 452 } 453 454 /* Handle IFF_PROMISC and IFF_ALLMULTI flags */ 455 epic_stop_activity(sc); 456 epic_set_mc_table(sc); 457 epic_set_rx_mode(sc); 458 epic_start_activity(sc); 459 break; 460 461 case SIOCADDMULTI: 462 case SIOCDELMULTI: 463 epic_set_mc_table(sc); 464 error = 0; 465 break; 466 467 case SIOCSIFMEDIA: 468 case SIOCGIFMEDIA: 469 mii = device_get_softc(sc->miibus); 470 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 471 break; 472 473 default: 474 error = EINVAL; 475 } 476 splx(x); 477 478 return error; 479 } 480 481 /* 482 * OS-independed part of attach process. allocate memory for descriptors 483 * and frag lists, wake up chip, read MAC address and PHY identyfier. 484 * Return -1 on failure. 485 */ 486 static int 487 epic_common_attach(sc) 488 epic_softc_t *sc; 489 { 490 int i; 491 492 sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE, 493 M_DEVBUF, M_WAITOK | M_ZERO); 494 sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE, 495 M_DEVBUF, M_WAITOK | M_ZERO); 496 sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE, 497 M_DEVBUF, M_WAITOK | M_ZERO); 498 499 /* Bring the chip out of low-power mode. */ 500 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 501 DELAY(500); 502 503 /* Workaround for Application Note 7-15 */ 504 for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); 505 506 /* Read mac address from EEPROM */ 507 for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++) 508 ((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i); 509 510 /* Set Non-Volatile Control Register from EEPROM */ 511 CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F); 512 513 /* Set defaults */ 514 sc->tx_threshold = TRANSMIT_THRESHOLD; 515 sc->txcon = TXCON_DEFAULT; 516 sc->miicfg = MIICFG_SMI_ENABLE; 517 sc->phyid = EPIC_UNKN_PHY; 518 sc->serinst = -1; 519 520 /* Fetch card id */ 521 sc->cardvend = pci_read_config(sc->dev, PCIR_SUBVEND_0, 2); 522 sc->cardid = pci_read_config(sc->dev, PCIR_SUBDEV_0, 2); 523 524 if (sc->cardvend != SMC_VENDORID) 525 device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend); 526 527 return 0; 528 } 529 530 /* 531 * This is if_start handler. It takes mbufs from if_snd queue 532 * and queue them for transmit, one by one, until TX ring become full 533 * or queue become empty. 534 */ 535 static void 536 epic_ifstart(ifp) 537 struct ifnet * ifp; 538 { 539 epic_softc_t *sc = ifp->if_softc; 540 struct epic_tx_buffer *buf; 541 struct epic_tx_desc *desc; 542 struct epic_frag_list *flist; 543 struct mbuf *m0; 544 struct mbuf *m; 545 int i; 546 547 while (sc->pending_txs < TX_RING_SIZE) { 548 buf = sc->tx_buffer + sc->cur_tx; 549 desc = sc->tx_desc + sc->cur_tx; 550 flist = sc->tx_flist + sc->cur_tx; 551 552 /* Get next packet to send */ 553 IF_DEQUEUE(&ifp->if_snd, m0); 554 555 /* If nothing to send, return */ 556 if (NULL == m0) return; 557 558 /* Fill fragments list */ 559 for (m = m0, i = 0; 560 (NULL != m) && (i < EPIC_MAX_FRAGS); 561 m = m->m_next, i++) { 562 flist->frag[i].fraglen = m->m_len; 563 flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t)); 564 } 565 flist->numfrags = i; 566 567 /* If packet was more than EPIC_MAX_FRAGS parts, */ 568 /* recopy packet to new allocated mbuf cluster */ 569 if (NULL != m) { 570 EPIC_MGETCLUSTER(m); 571 if (NULL == m) { 572 m_freem(m0); 573 ifp->if_oerrors++; 574 continue; 575 } 576 577 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 578 flist->frag[0].fraglen = 579 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 580 m->m_pkthdr.rcvif = ifp; 581 582 flist->numfrags = 1; 583 flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t)); 584 m_freem(m0); 585 m0 = m; 586 } 587 588 buf->mbuf = m0; 589 sc->pending_txs++; 590 sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; 591 desc->control = 0x01; 592 desc->txlength = 593 max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN); 594 desc->status = 0x8000; 595 CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED); 596 597 /* Set watchdog timer */ 598 ifp->if_timer = 8; 599 600 if (ifp->if_bpf) 601 bpf_mtap(ifp, m0); 602 } 603 604 ifp->if_flags |= IFF_OACTIVE; 605 606 return; 607 608 } 609 610 /* 611 * Synopsis: Finish all received frames. 612 */ 613 static void 614 epic_rx_done(sc) 615 epic_softc_t *sc; 616 { 617 u_int16_t len; 618 struct ifnet *ifp = &sc->sc_if; 619 struct epic_rx_buffer *buf; 620 struct epic_rx_desc *desc; 621 struct mbuf *m; 622 623 while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) { 624 buf = sc->rx_buffer + sc->cur_rx; 625 desc = sc->rx_desc + sc->cur_rx; 626 627 /* Switch to next descriptor */ 628 sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK; 629 630 /* 631 * Check for RX errors. This should only happen if 632 * SAVE_ERRORED_PACKETS is set. RX errors generate 633 * RXE interrupt usually. 634 */ 635 if ((desc->status & 1) == 0) { 636 sc->sc_if.if_ierrors++; 637 desc->status = 0x8000; 638 continue; 639 } 640 641 /* Save packet length and mbuf contained packet */ 642 len = desc->rxlength - ETHER_CRC_LEN; 643 m = buf->mbuf; 644 645 /* Try to get mbuf cluster */ 646 EPIC_MGETCLUSTER(buf->mbuf); 647 if (NULL == buf->mbuf) { 648 buf->mbuf = m; 649 desc->status = 0x8000; 650 ifp->if_ierrors++; 651 continue; 652 } 653 654 /* Point to new mbuf, and give descriptor to chip */ 655 desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t)); 656 desc->status = 0x8000; 657 658 /* First mbuf in packet holds the ethernet and packet headers */ 659 m->m_pkthdr.rcvif = ifp; 660 m->m_pkthdr.len = m->m_len = len; 661 662 /* Give mbuf to OS */ 663 (*ifp->if_input)(ifp, m); 664 665 /* Successfuly received frame */ 666 ifp->if_ipackets++; 667 } 668 669 return; 670 } 671 672 /* 673 * Synopsis: Do last phase of transmission. I.e. if desc is 674 * transmitted, decrease pending_txs counter, free mbuf contained 675 * packet, switch to next descriptor and repeat until no packets 676 * are pending or descriptor is not transmitted yet. 677 */ 678 static void 679 epic_tx_done(sc) 680 epic_softc_t *sc; 681 { 682 struct epic_tx_buffer *buf; 683 struct epic_tx_desc *desc; 684 u_int16_t status; 685 686 while (sc->pending_txs > 0) { 687 buf = sc->tx_buffer + sc->dirty_tx; 688 desc = sc->tx_desc + sc->dirty_tx; 689 status = desc->status; 690 691 /* If packet is not transmitted, thou followed */ 692 /* packets are not transmitted too */ 693 if (status & 0x8000) break; 694 695 /* Packet is transmitted. Switch to next and */ 696 /* free mbuf */ 697 sc->pending_txs--; 698 sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK; 699 m_freem(buf->mbuf); 700 buf->mbuf = NULL; 701 702 /* Check for errors and collisions */ 703 if (status & 0x0001) sc->sc_if.if_opackets++; 704 else sc->sc_if.if_oerrors++; 705 sc->sc_if.if_collisions += (status >> 8) & 0x1F; 706 #if defined(EPIC_DIAG) 707 if ((status & 0x1001) == 0x1001) 708 device_printf(sc->dev, "Tx ERROR: excessive coll. number\n"); 709 #endif 710 } 711 712 if (sc->pending_txs < TX_RING_SIZE) 713 sc->sc_if.if_flags &= ~IFF_OACTIVE; 714 } 715 716 /* 717 * Interrupt function 718 */ 719 static void 720 epic_intr(arg) 721 void *arg; 722 { 723 epic_softc_t * sc = (epic_softc_t *) arg; 724 int status, i = 4; 725 726 while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) { 727 CSR_WRITE_4(sc, INTSTAT, status); 728 729 if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) { 730 epic_rx_done(sc); 731 if (status & (INTSTAT_RQE|INTSTAT_OVW)) { 732 #if defined(EPIC_DIAG) 733 if (status & INTSTAT_OVW) 734 device_printf(sc->dev, "RX buffer overflow\n"); 735 if (status & INTSTAT_RQE) 736 device_printf(sc->dev, "RX FIFO overflow\n"); 737 #endif 738 if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0) 739 CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED); 740 sc->sc_if.if_ierrors++; 741 } 742 } 743 744 if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) { 745 epic_tx_done(sc); 746 if (sc->sc_if.if_snd.ifq_head != NULL) 747 epic_ifstart(&sc->sc_if); 748 } 749 750 /* Check for rare errors */ 751 if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| 752 INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) { 753 if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| 754 INTSTAT_APE|INTSTAT_DPE)) { 755 device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n", 756 (status&INTSTAT_PMA)?"PMA ":"", 757 (status&INTSTAT_PTA)?"PTA ":"", 758 (status&INTSTAT_APE)?"APE ":"", 759 (status&INTSTAT_DPE)?"DPE":"" 760 ); 761 762 epic_stop(sc); 763 epic_init(sc); 764 765 break; 766 } 767 768 if (status & INTSTAT_RXE) { 769 #if defined(EPIC_DIAG) 770 device_printf(sc->dev, "CRC/Alignment error\n"); 771 #endif 772 sc->sc_if.if_ierrors++; 773 } 774 775 if (status & INTSTAT_TXU) { 776 epic_tx_underrun(sc); 777 sc->sc_if.if_oerrors++; 778 } 779 } 780 } 781 782 /* If no packets are pending, then no timeouts */ 783 if (sc->pending_txs == 0) sc->sc_if.if_timer = 0; 784 785 return; 786 } 787 788 /* 789 * Handle the TX underrun error: increase the TX threshold 790 * and restart the transmitter. 791 */ 792 static void 793 epic_tx_underrun(sc) 794 epic_softc_t *sc; 795 { 796 if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) { 797 sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE; 798 #if defined(EPIC_DIAG) 799 device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n"); 800 #endif 801 } else { 802 sc->tx_threshold += 0x40; 803 #if defined(EPIC_DIAG) 804 device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n", 805 sc->tx_threshold); 806 #endif 807 } 808 809 /* We must set TXUGO to reset the stuck transmitter */ 810 CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO); 811 812 /* Update the TX threshold */ 813 epic_stop_activity(sc); 814 epic_set_tx_mode(sc); 815 epic_start_activity(sc); 816 817 return; 818 } 819 820 /* 821 * Synopsis: This one is called if packets wasn't transmitted 822 * during timeout. Try to deallocate transmitted packets, and 823 * if success continue to work. 824 */ 825 static void 826 epic_ifwatchdog(ifp) 827 struct ifnet *ifp; 828 { 829 epic_softc_t *sc = ifp->if_softc; 830 int x; 831 832 x = splimp(); 833 834 device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs); 835 836 /* Try to finish queued packets */ 837 epic_tx_done(sc); 838 839 /* If not successful */ 840 if (sc->pending_txs > 0) { 841 842 ifp->if_oerrors+=sc->pending_txs; 843 844 /* Reinitialize board */ 845 device_printf(sc->dev, "reinitialization\n"); 846 epic_stop(sc); 847 epic_init(sc); 848 849 } else 850 device_printf(sc->dev, "seems we can continue normaly\n"); 851 852 /* Start output */ 853 if (ifp->if_snd.ifq_head) epic_ifstart(ifp); 854 855 splx(x); 856 } 857 858 /* 859 * Despite the name of this function, it doesn't update statistics, it only 860 * helps in autonegotiation process. 861 */ 862 static void 863 epic_stats_update(void *xsc) 864 { 865 epic_softc_t *sc = xsc; 866 struct mii_data * mii; 867 int s; 868 869 s = splimp(); 870 871 mii = device_get_softc(sc->miibus); 872 mii_tick(mii); 873 874 callout_reset(&sc->tx_stat_timer, hz, epic_stats_update, sc); 875 876 splx(s); 877 } 878 879 /* 880 * Set media options. 881 */ 882 static int 883 epic_ifmedia_upd(ifp) 884 struct ifnet *ifp; 885 { 886 epic_softc_t *sc; 887 struct mii_data *mii; 888 struct ifmedia *ifm; 889 struct mii_softc *miisc; 890 int cfg, media; 891 892 sc = ifp->if_softc; 893 mii = device_get_softc(sc->miibus); 894 ifm = &mii->mii_media; 895 media = ifm->ifm_cur->ifm_media; 896 897 /* Do not do anything if interface is not up */ 898 if ((ifp->if_flags & IFF_UP) == 0) 899 return (0); 900 901 /* 902 * Lookup current selected PHY 903 */ 904 if (IFM_INST(media) == sc->serinst) { 905 sc->phyid = EPIC_SERIAL; 906 sc->physc = NULL; 907 } else { 908 /* If we're not selecting serial interface, select MII mode */ 909 sc->miicfg &= ~MIICFG_SERIAL_ENABLE; 910 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 911 912 /* Default to unknown PHY */ 913 sc->phyid = EPIC_UNKN_PHY; 914 915 /* Lookup selected PHY */ 916 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 917 miisc = LIST_NEXT(miisc, mii_list)) { 918 if (IFM_INST(media) == miisc->mii_inst) { 919 sc->physc = miisc; 920 break; 921 } 922 } 923 924 /* Identify selected PHY */ 925 if (sc->physc) { 926 int id1, id2, model, oui; 927 928 id1 = PHY_READ(sc->physc, MII_PHYIDR1); 929 id2 = PHY_READ(sc->physc, MII_PHYIDR2); 930 931 oui = MII_OUI(id1, id2); 932 model = MII_MODEL(id2); 933 switch (oui) { 934 case MII_OUI_QUALSEMI: 935 if (model == MII_MODEL_QUALSEMI_QS6612) 936 sc->phyid = EPIC_QS6612_PHY; 937 break; 938 case MII_OUI_xxALTIMA: 939 if (model == MII_MODEL_xxALTIMA_AC101) 940 sc->phyid = EPIC_AC101_PHY; 941 break; 942 case MII_OUI_xxLEVEL1: 943 if (model == MII_MODEL_xxLEVEL1_LXT970) 944 sc->phyid = EPIC_LXT970_PHY; 945 break; 946 } 947 } 948 } 949 950 /* 951 * Do PHY specific card setup 952 */ 953 954 /* Call this, to isolate all not selected PHYs and 955 * set up selected 956 */ 957 mii_mediachg(mii); 958 959 /* Do our own setup */ 960 switch (sc->phyid) { 961 case EPIC_QS6612_PHY: 962 break; 963 case EPIC_AC101_PHY: 964 /* We have to powerup fiber tranceivers */ 965 if (IFM_SUBTYPE(media) == IFM_100_FX) 966 sc->miicfg |= MIICFG_694_ENABLE; 967 else 968 sc->miicfg &= ~MIICFG_694_ENABLE; 969 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 970 971 break; 972 case EPIC_LXT970_PHY: 973 /* We have to powerup fiber tranceivers */ 974 cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG); 975 if (IFM_SUBTYPE(media) == IFM_100_FX) 976 cfg |= CONFIG_LEDC1 | CONFIG_LEDC0; 977 else 978 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 979 PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg); 980 981 break; 982 case EPIC_SERIAL: 983 /* Select serial PHY, (10base2/BNC usually) */ 984 sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE; 985 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 986 987 /* There is no driver to fill this */ 988 mii->mii_media_active = media; 989 mii->mii_media_status = 0; 990 991 /* We need to call this manualy as i wasn't called 992 * in mii_mediachg() 993 */ 994 epic_miibus_statchg(sc->dev); 995 996 break; 997 default: 998 device_printf(sc->dev, "ERROR! Unknown PHY selected\n"); 999 return (EINVAL); 1000 } 1001 1002 return(0); 1003 } 1004 1005 /* 1006 * Report current media status. 1007 */ 1008 static void 1009 epic_ifmedia_sts(ifp, ifmr) 1010 struct ifnet *ifp; 1011 struct ifmediareq *ifmr; 1012 { 1013 epic_softc_t *sc; 1014 struct mii_data *mii; 1015 struct ifmedia *ifm; 1016 1017 sc = ifp->if_softc; 1018 mii = device_get_softc(sc->miibus); 1019 ifm = &mii->mii_media; 1020 1021 /* Nothing should be selected if interface is down */ 1022 if ((ifp->if_flags & IFF_UP) == 0) { 1023 ifmr->ifm_active = IFM_NONE; 1024 ifmr->ifm_status = 0; 1025 1026 return; 1027 } 1028 1029 /* Call underlying pollstat, if not serial PHY */ 1030 if (sc->phyid != EPIC_SERIAL) 1031 mii_pollstat(mii); 1032 1033 /* Simply copy media info */ 1034 ifmr->ifm_active = mii->mii_media_active; 1035 ifmr->ifm_status = mii->mii_media_status; 1036 1037 return; 1038 } 1039 1040 /* 1041 * Callback routine, called on media change. 1042 */ 1043 static void 1044 epic_miibus_statchg(dev) 1045 device_t dev; 1046 { 1047 epic_softc_t *sc; 1048 struct mii_data *mii; 1049 int media; 1050 1051 sc = device_get_softc(dev); 1052 mii = device_get_softc(sc->miibus); 1053 media = mii->mii_media_active; 1054 1055 sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX); 1056 1057 /* If we are in full-duplex mode or loopback operation, 1058 * we need to decouple receiver and transmitter. 1059 */ 1060 if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP)) 1061 sc->txcon |= TXCON_FULL_DUPLEX; 1062 1063 /* On some cards we need manualy set fullduplex led */ 1064 if (sc->cardid == SMC9432FTX || 1065 sc->cardid == SMC9432FTX_SC) { 1066 if (IFM_OPTIONS(media) & IFM_FDX) 1067 sc->miicfg |= MIICFG_694_ENABLE; 1068 else 1069 sc->miicfg &= ~MIICFG_694_ENABLE; 1070 1071 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 1072 } 1073 1074 /* Update baudrate */ 1075 if (IFM_SUBTYPE(media) == IFM_100_TX || 1076 IFM_SUBTYPE(media) == IFM_100_FX) 1077 sc->sc_if.if_baudrate = 100000000; 1078 else 1079 sc->sc_if.if_baudrate = 10000000; 1080 1081 epic_stop_activity(sc); 1082 epic_set_tx_mode(sc); 1083 epic_start_activity(sc); 1084 1085 return; 1086 } 1087 1088 static void 1089 epic_miibus_mediainit(dev) 1090 device_t dev; 1091 { 1092 epic_softc_t *sc; 1093 struct mii_data *mii; 1094 struct ifmedia *ifm; 1095 int media; 1096 1097 sc = device_get_softc(dev); 1098 mii = device_get_softc(sc->miibus); 1099 ifm = &mii->mii_media; 1100 1101 /* Add Serial Media Interface if present, this applies to 1102 * SMC9432BTX serie 1103 */ 1104 if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) { 1105 /* Store its instance */ 1106 sc->serinst = mii->mii_instance++; 1107 1108 /* Add as 10base2/BNC media */ 1109 media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst); 1110 ifmedia_add(ifm, media, 0, NULL); 1111 1112 /* Report to user */ 1113 device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n"); 1114 } 1115 1116 return; 1117 } 1118 1119 /* 1120 * Reset chip, allocate rings, and update media. 1121 */ 1122 static int 1123 epic_init(sc) 1124 epic_softc_t *sc; 1125 { 1126 struct ifnet *ifp = &sc->sc_if; 1127 int s,i; 1128 1129 s = splimp(); 1130 1131 /* If interface is already running, then we need not do anything */ 1132 if (ifp->if_flags & IFF_RUNNING) { 1133 splx(s); 1134 return 0; 1135 } 1136 1137 /* Soft reset the chip (we have to power up card before) */ 1138 CSR_WRITE_4(sc, GENCTL, 0); 1139 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 1140 1141 /* 1142 * Reset takes 15 pci ticks which depends on PCI bus speed. 1143 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec. 1144 */ 1145 DELAY(500); 1146 1147 /* Wake up */ 1148 CSR_WRITE_4(sc, GENCTL, 0); 1149 1150 /* Workaround for Application Note 7-15 */ 1151 for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); 1152 1153 /* Initialize rings */ 1154 if (epic_init_rings(sc)) { 1155 device_printf(sc->dev, "failed to init rings\n"); 1156 splx(s); 1157 return -1; 1158 } 1159 1160 /* Give rings to EPIC */ 1161 CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc)); 1162 CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc)); 1163 1164 /* Put node address to EPIC */ 1165 CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]); 1166 CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]); 1167 CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]); 1168 1169 /* Set tx mode, includeing transmit threshold */ 1170 epic_set_tx_mode(sc); 1171 1172 /* Compute and set RXCON. */ 1173 epic_set_rx_mode(sc); 1174 1175 /* Set multicast table */ 1176 epic_set_mc_table(sc); 1177 1178 /* Enable interrupts by setting the interrupt mask. */ 1179 CSR_WRITE_4(sc, INTMASK, 1180 INTSTAT_RCC | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */ 1181 /* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU | 1182 INTSTAT_FATAL); 1183 1184 /* Acknowledge all pending interrupts */ 1185 CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT)); 1186 1187 /* Enable interrupts, set for PCI read multiple and etc */ 1188 CSR_WRITE_4(sc, GENCTL, 1189 GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE | 1190 GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64); 1191 1192 /* Mark interface running ... */ 1193 if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING; 1194 else ifp->if_flags &= ~IFF_RUNNING; 1195 1196 /* ... and free */ 1197 ifp->if_flags &= ~IFF_OACTIVE; 1198 1199 /* Start Rx process */ 1200 epic_start_activity(sc); 1201 1202 /* Set appropriate media */ 1203 epic_ifmedia_upd(ifp); 1204 1205 callout_reset(&sc->tx_stat_timer, hz, epic_stats_update, sc); 1206 1207 splx(s); 1208 1209 return 0; 1210 } 1211 1212 /* 1213 * Synopsis: calculate and set Rx mode. Chip must be in idle state to 1214 * access RXCON. 1215 */ 1216 static void 1217 epic_set_rx_mode(sc) 1218 epic_softc_t *sc; 1219 { 1220 u_int32_t flags = sc->sc_if.if_flags; 1221 u_int32_t rxcon = RXCON_DEFAULT; 1222 1223 #if defined(EPIC_EARLY_RX) 1224 rxcon |= RXCON_EARLY_RX; 1225 #endif 1226 1227 rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0; 1228 1229 CSR_WRITE_4(sc, RXCON, rxcon); 1230 1231 return; 1232 } 1233 1234 /* 1235 * Synopsis: Set transmit control register. Chip must be in idle state to 1236 * access TXCON. 1237 */ 1238 static void 1239 epic_set_tx_mode(sc) 1240 epic_softc_t *sc; 1241 { 1242 if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE) 1243 CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold); 1244 1245 CSR_WRITE_4(sc, TXCON, sc->txcon); 1246 } 1247 1248 /* 1249 * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC 1250 * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch 1251 * individual frames, multicast filter must be manually programmed) 1252 * 1253 * Note: EPIC must be in idle state. 1254 */ 1255 static void 1256 epic_set_mc_table(sc) 1257 epic_softc_t *sc; 1258 { 1259 struct ifnet *ifp = &sc->sc_if; 1260 struct ifmultiaddr *ifma; 1261 u_int16_t filter[4]; 1262 u_int8_t h; 1263 1264 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 1265 CSR_WRITE_4(sc, MC0, 0xFFFF); 1266 CSR_WRITE_4(sc, MC1, 0xFFFF); 1267 CSR_WRITE_4(sc, MC2, 0xFFFF); 1268 CSR_WRITE_4(sc, MC3, 0xFFFF); 1269 1270 return; 1271 } 1272 1273 filter[0] = 0; 1274 filter[1] = 0; 1275 filter[2] = 0; 1276 filter[3] = 0; 1277 1278 #if defined(__DragonFly__) || __FreeBSD_version < 500000 1279 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1280 #else 1281 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1282 #endif 1283 if (ifma->ifma_addr->sa_family != AF_LINK) 1284 continue; 1285 h = epic_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1286 filter[h >> 4] |= 1 << (h & 0xF); 1287 } 1288 1289 CSR_WRITE_4(sc, MC0, filter[0]); 1290 CSR_WRITE_4(sc, MC1, filter[1]); 1291 CSR_WRITE_4(sc, MC2, filter[2]); 1292 CSR_WRITE_4(sc, MC3, filter[3]); 1293 1294 return; 1295 } 1296 1297 /* 1298 * Synopsis: calculate EPIC's hash of multicast address. 1299 */ 1300 static u_int8_t 1301 epic_calchash(addr) 1302 caddr_t addr; 1303 { 1304 u_int32_t crc, carry; 1305 int i, j; 1306 u_int8_t c; 1307 1308 /* Compute CRC for the address value. */ 1309 crc = 0xFFFFFFFF; /* initial value */ 1310 1311 for (i = 0; i < 6; i++) { 1312 c = *(addr + i); 1313 for (j = 0; j < 8; j++) { 1314 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1315 crc <<= 1; 1316 c >>= 1; 1317 if (carry) 1318 crc = (crc ^ 0x04c11db6) | carry; 1319 } 1320 } 1321 1322 return ((crc >> 26) & 0x3F); 1323 } 1324 1325 1326 /* 1327 * Synopsis: Start receive process and transmit one, if they need. 1328 */ 1329 static void 1330 epic_start_activity(sc) 1331 epic_softc_t *sc; 1332 { 1333 /* Start rx process */ 1334 CSR_WRITE_4(sc, COMMAND, 1335 COMMAND_RXQUEUED | COMMAND_START_RX | 1336 (sc->pending_txs?COMMAND_TXQUEUED:0)); 1337 } 1338 1339 /* 1340 * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional 1341 * packet needs to be queued to stop Tx DMA. 1342 */ 1343 static void 1344 epic_stop_activity(sc) 1345 epic_softc_t *sc; 1346 { 1347 int status, i; 1348 1349 /* Stop Tx and Rx DMA */ 1350 CSR_WRITE_4(sc, COMMAND, 1351 COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA); 1352 1353 /* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */ 1354 for (i=0; i<0x1000; i++) { 1355 status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE); 1356 if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE)) 1357 break; 1358 DELAY(1); 1359 } 1360 1361 /* Catch all finished packets */ 1362 epic_rx_done(sc); 1363 epic_tx_done(sc); 1364 1365 status = CSR_READ_4(sc, INTSTAT); 1366 1367 if ((status & INTSTAT_RXIDLE) == 0) 1368 device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n"); 1369 1370 if ((status & INTSTAT_TXIDLE) == 0) 1371 device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n"); 1372 1373 /* 1374 * May need to queue one more packet if TQE, this is rare 1375 * but existing case. 1376 */ 1377 if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE)) 1378 (void) epic_queue_last_packet(sc); 1379 1380 } 1381 1382 /* 1383 * The EPIC transmitter may stuck in TQE state. It will not go IDLE until 1384 * a packet from current descriptor will be copied to internal RAM. We 1385 * compose a dummy packet here and queue it for transmission. 1386 * 1387 * XXX the packet will then be actually sent over network... 1388 */ 1389 static int 1390 epic_queue_last_packet(sc) 1391 epic_softc_t *sc; 1392 { 1393 struct epic_tx_desc *desc; 1394 struct epic_frag_list *flist; 1395 struct epic_tx_buffer *buf; 1396 struct mbuf *m0; 1397 int i; 1398 1399 device_printf(sc->dev, "queue last packet\n"); 1400 1401 desc = sc->tx_desc + sc->cur_tx; 1402 flist = sc->tx_flist + sc->cur_tx; 1403 buf = sc->tx_buffer + sc->cur_tx; 1404 1405 if ((desc->status & 0x8000) || (buf->mbuf != NULL)) 1406 return (EBUSY); 1407 1408 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 1409 if (NULL == m0) 1410 return (ENOBUFS); 1411 1412 /* Prepare mbuf */ 1413 m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN); 1414 flist->frag[0].fraglen = m0->m_len; 1415 m0->m_pkthdr.len = m0->m_len; 1416 m0->m_pkthdr.rcvif = &sc->sc_if; 1417 bzero(mtod(m0,caddr_t), m0->m_len); 1418 1419 /* Fill fragments list */ 1420 flist->frag[0].fraglen = m0->m_len; 1421 flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t)); 1422 flist->numfrags = 1; 1423 1424 /* Fill in descriptor */ 1425 buf->mbuf = m0; 1426 sc->pending_txs++; 1427 sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; 1428 desc->control = 0x01; 1429 desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN); 1430 desc->status = 0x8000; 1431 1432 /* Launch transmition */ 1433 CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED); 1434 1435 /* Wait Tx DMA to stop (for how long??? XXX) */ 1436 for (i=0; i<1000; i++) { 1437 if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) 1438 break; 1439 DELAY(1); 1440 } 1441 1442 if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0) 1443 device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n"); 1444 else 1445 epic_tx_done(sc); 1446 1447 return 0; 1448 } 1449 1450 /* 1451 * Synopsis: Shut down board and deallocates rings. 1452 */ 1453 static void 1454 epic_stop(sc) 1455 epic_softc_t *sc; 1456 { 1457 int s; 1458 1459 s = splimp(); 1460 1461 sc->sc_if.if_timer = 0; 1462 1463 callout_stop(&sc->tx_stat_timer); 1464 1465 /* Disable interrupts */ 1466 CSR_WRITE_4(sc, INTMASK, 0); 1467 CSR_WRITE_4(sc, GENCTL, 0); 1468 1469 /* Try to stop Rx and TX processes */ 1470 epic_stop_activity(sc); 1471 1472 /* Reset chip */ 1473 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 1474 DELAY(1000); 1475 1476 /* Make chip go to bed */ 1477 CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN); 1478 1479 /* Free memory allocated for rings */ 1480 epic_free_rings(sc); 1481 1482 /* Mark as stoped */ 1483 sc->sc_if.if_flags &= ~IFF_RUNNING; 1484 1485 splx(s); 1486 return; 1487 } 1488 1489 /* 1490 * Synopsis: This function should free all memory allocated for rings. 1491 */ 1492 static void 1493 epic_free_rings(sc) 1494 epic_softc_t *sc; 1495 { 1496 int i; 1497 1498 for (i=0; i<RX_RING_SIZE; i++) { 1499 struct epic_rx_buffer *buf = sc->rx_buffer + i; 1500 struct epic_rx_desc *desc = sc->rx_desc + i; 1501 1502 desc->status = 0; 1503 desc->buflength = 0; 1504 desc->bufaddr = 0; 1505 1506 if (buf->mbuf) m_freem(buf->mbuf); 1507 buf->mbuf = NULL; 1508 } 1509 1510 for (i=0; i<TX_RING_SIZE; i++) { 1511 struct epic_tx_buffer *buf = sc->tx_buffer + i; 1512 struct epic_tx_desc *desc = sc->tx_desc + i; 1513 1514 desc->status = 0; 1515 desc->buflength = 0; 1516 desc->bufaddr = 0; 1517 1518 if (buf->mbuf) m_freem(buf->mbuf); 1519 buf->mbuf = NULL; 1520 } 1521 } 1522 1523 /* 1524 * Synopsis: Allocates mbufs for Rx ring and point Rx descs to them. 1525 * Point Tx descs to fragment lists. Check that all descs and fraglists 1526 * are bounded and aligned properly. 1527 */ 1528 static int 1529 epic_init_rings(sc) 1530 epic_softc_t *sc; 1531 { 1532 int i; 1533 1534 sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0; 1535 1536 for (i = 0; i < RX_RING_SIZE; i++) { 1537 struct epic_rx_buffer *buf = sc->rx_buffer + i; 1538 struct epic_rx_desc *desc = sc->rx_desc + i; 1539 1540 desc->status = 0; /* Owned by driver */ 1541 desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK)); 1542 1543 if ((desc->next & 3) || 1544 ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { 1545 epic_free_rings(sc); 1546 return EFAULT; 1547 } 1548 1549 EPIC_MGETCLUSTER(buf->mbuf); 1550 if (NULL == buf->mbuf) { 1551 epic_free_rings(sc); 1552 return ENOBUFS; 1553 } 1554 desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t)); 1555 1556 desc->buflength = MCLBYTES; /* Max RX buffer length */ 1557 desc->status = 0x8000; /* Set owner bit to NIC */ 1558 } 1559 1560 for (i = 0; i < TX_RING_SIZE; i++) { 1561 struct epic_tx_buffer *buf = sc->tx_buffer + i; 1562 struct epic_tx_desc *desc = sc->tx_desc + i; 1563 1564 desc->status = 0; 1565 desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK)); 1566 1567 if ((desc->next & 3) || 1568 ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { 1569 epic_free_rings(sc); 1570 return EFAULT; 1571 } 1572 1573 buf->mbuf = NULL; 1574 desc->bufaddr = vtophys(sc->tx_flist + i); 1575 1576 if ((desc->bufaddr & 3) || 1577 ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) { 1578 epic_free_rings(sc); 1579 return EFAULT; 1580 } 1581 } 1582 1583 return 0; 1584 } 1585 1586 /* 1587 * EEPROM operation functions 1588 */ 1589 static void 1590 epic_write_eepromreg(sc, val) 1591 epic_softc_t *sc; 1592 u_int8_t val; 1593 { 1594 u_int16_t i; 1595 1596 CSR_WRITE_1(sc, EECTL, val); 1597 1598 for (i=0; i<0xFF; i++) 1599 if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break; 1600 1601 return; 1602 } 1603 1604 static u_int8_t 1605 epic_read_eepromreg(sc) 1606 epic_softc_t *sc; 1607 { 1608 return CSR_READ_1(sc, EECTL); 1609 } 1610 1611 static u_int8_t 1612 epic_eeprom_clock(sc, val) 1613 epic_softc_t *sc; 1614 u_int8_t val; 1615 { 1616 epic_write_eepromreg(sc, val); 1617 epic_write_eepromreg(sc, (val | 0x4)); 1618 epic_write_eepromreg(sc, val); 1619 1620 return epic_read_eepromreg(sc); 1621 } 1622 1623 static void 1624 epic_output_eepromw(sc, val) 1625 epic_softc_t *sc; 1626 u_int16_t val; 1627 { 1628 int i; 1629 1630 for (i = 0xF; i >= 0; i--) { 1631 if (val & (1 << i)) 1632 epic_eeprom_clock(sc, 0x0B); 1633 else 1634 epic_eeprom_clock(sc, 0x03); 1635 } 1636 } 1637 1638 static u_int16_t 1639 epic_input_eepromw(sc) 1640 epic_softc_t *sc; 1641 { 1642 u_int16_t retval = 0; 1643 int i; 1644 1645 for (i = 0xF; i >= 0; i--) { 1646 if (epic_eeprom_clock(sc, 0x3) & 0x10) 1647 retval |= (1 << i); 1648 } 1649 1650 return retval; 1651 } 1652 1653 static int 1654 epic_read_eeprom(sc, loc) 1655 epic_softc_t *sc; 1656 u_int16_t loc; 1657 { 1658 u_int16_t dataval; 1659 u_int16_t read_cmd; 1660 1661 epic_write_eepromreg(sc, 3); 1662 1663 if (epic_read_eepromreg(sc) & 0x40) 1664 read_cmd = (loc & 0x3F) | 0x180; 1665 else 1666 read_cmd = (loc & 0xFF) | 0x600; 1667 1668 epic_output_eepromw(sc, read_cmd); 1669 1670 dataval = epic_input_eepromw(sc); 1671 1672 epic_write_eepromreg(sc, 1); 1673 1674 return dataval; 1675 } 1676 1677 /* 1678 * Here goes MII read/write routines 1679 */ 1680 static int 1681 epic_read_phy_reg(sc, phy, reg) 1682 epic_softc_t *sc; 1683 int phy, reg; 1684 { 1685 int i; 1686 1687 CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01)); 1688 1689 for (i = 0; i < 0x100; i++) { 1690 if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break; 1691 DELAY(1); 1692 } 1693 1694 return (CSR_READ_4(sc, MIIDATA)); 1695 } 1696 1697 static void 1698 epic_write_phy_reg(sc, phy, reg, val) 1699 epic_softc_t *sc; 1700 int phy, reg, val; 1701 { 1702 int i; 1703 1704 CSR_WRITE_4(sc, MIIDATA, val); 1705 CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02)); 1706 1707 for(i=0;i<0x100;i++) { 1708 if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break; 1709 DELAY(1); 1710 } 1711 1712 return; 1713 } 1714 1715 static int 1716 epic_miibus_readreg(dev, phy, reg) 1717 device_t dev; 1718 int phy, reg; 1719 { 1720 epic_softc_t *sc; 1721 1722 sc = device_get_softc(dev); 1723 1724 return (PHY_READ_2(sc, phy, reg)); 1725 } 1726 1727 static int 1728 epic_miibus_writereg(dev, phy, reg, data) 1729 device_t dev; 1730 int phy, reg, data; 1731 { 1732 epic_softc_t *sc; 1733 1734 sc = device_get_softc(dev); 1735 1736 PHY_WRITE_2(sc, phy, reg, data); 1737 1738 return (0); 1739 } 1740