1 /*- 2 * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $ 27 * $DragonFly: src/sys/dev/netif/tx/if_tx.c,v 1.12 2004/07/02 17:42:20 joerg Exp $ 28 */ 29 30 /* 31 * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie) 32 * 33 * These cards are based on SMC83c17x (EPIC) chip and one of the various 34 * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on 35 * card model. All cards support 10baseT/UTP and 100baseTX half- and full- 36 * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also 37 * supports fibre optics. 38 * 39 * Thanks are going to Steve Bauer and Jason Wright. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/sockio.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/kernel.h> 48 #include <sys/socket.h> 49 #include <sys/queue.h> 50 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #include <net/bpf.h> 58 59 #include <net/vlan/if_vlan_var.h> 60 61 #include <vm/vm.h> /* for vtophys */ 62 #include <vm/pmap.h> /* for vtophys */ 63 #include <machine/bus_memio.h> 64 #include <machine/bus_pio.h> 65 #include <machine/bus.h> 66 #include <machine/resource.h> 67 #include <machine/clock.h> /* for DELAY */ 68 #include <sys/bus.h> 69 #include <sys/rman.h> 70 71 #include <bus/pci/pcireg.h> 72 #include <bus/pci/pcivar.h> 73 74 #include "../mii_layer/mii.h" 75 #include "../mii_layer/miivar.h" 76 #include "../mii_layer/miidevs.h" 77 #include "../mii_layer/lxtphyreg.h" 78 79 #include "miibus_if.h" 80 81 #include "if_txreg.h" 82 #include "if_txvar.h" 83 84 static int epic_ifioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 85 static void epic_intr(void *); 86 static void epic_tx_underrun(epic_softc_t *); 87 static int epic_common_attach(epic_softc_t *); 88 static void epic_ifstart(struct ifnet *); 89 static void epic_ifwatchdog(struct ifnet *); 90 static void epic_stats_update(epic_softc_t *); 91 static int epic_init(epic_softc_t *); 92 static void epic_stop(epic_softc_t *); 93 static void epic_rx_done(epic_softc_t *); 94 static void epic_tx_done(epic_softc_t *); 95 static int epic_init_rings(epic_softc_t *); 96 static void epic_free_rings(epic_softc_t *); 97 static void epic_stop_activity(epic_softc_t *); 98 static int epic_queue_last_packet(epic_softc_t *); 99 static void epic_start_activity(epic_softc_t *); 100 static void epic_set_rx_mode(epic_softc_t *); 101 static void epic_set_tx_mode(epic_softc_t *); 102 static void epic_set_mc_table(epic_softc_t *); 103 static u_int8_t epic_calchash(caddr_t); 104 static int epic_read_eeprom(epic_softc_t *,u_int16_t); 105 static void epic_output_eepromw(epic_softc_t *, u_int16_t); 106 static u_int16_t epic_input_eepromw(epic_softc_t *); 107 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t); 108 static void epic_write_eepromreg(epic_softc_t *,u_int8_t); 109 static u_int8_t epic_read_eepromreg(epic_softc_t *); 110 111 static int epic_read_phy_reg(epic_softc_t *, int, int); 112 static void epic_write_phy_reg(epic_softc_t *, int, int, int); 113 114 static int epic_miibus_readreg(device_t, int, int); 115 static int epic_miibus_writereg(device_t, int, int, int); 116 static void epic_miibus_statchg(device_t); 117 static void epic_miibus_mediainit(device_t); 118 119 static int epic_ifmedia_upd(struct ifnet *); 120 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *); 121 122 static int epic_probe(device_t); 123 static int epic_attach(device_t); 124 static void epic_shutdown(device_t); 125 static int epic_detach(device_t); 126 static struct epic_type *epic_devtype(device_t); 127 128 static device_method_t epic_methods[] = { 129 /* Device interface */ 130 DEVMETHOD(device_probe, epic_probe), 131 DEVMETHOD(device_attach, epic_attach), 132 DEVMETHOD(device_detach, epic_detach), 133 DEVMETHOD(device_shutdown, epic_shutdown), 134 135 /* MII interface */ 136 DEVMETHOD(miibus_readreg, epic_miibus_readreg), 137 DEVMETHOD(miibus_writereg, epic_miibus_writereg), 138 DEVMETHOD(miibus_statchg, epic_miibus_statchg), 139 DEVMETHOD(miibus_mediainit, epic_miibus_mediainit), 140 141 { 0, 0 } 142 }; 143 144 static driver_t epic_driver = { 145 "tx", 146 epic_methods, 147 sizeof(epic_softc_t) 148 }; 149 150 static devclass_t epic_devclass; 151 152 DECLARE_DUMMY_MODULE(if_tx); 153 MODULE_DEPEND(if_tx, miibus, 1, 1, 1); 154 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0); 155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0); 156 157 static struct epic_type epic_devs[] = { 158 { SMC_VENDORID, SMC_DEVICEID_83C170, 159 "SMC EtherPower II 10/100" }, 160 { 0, 0, NULL } 161 }; 162 163 static int 164 epic_probe(dev) 165 device_t dev; 166 { 167 struct epic_type *t; 168 169 t = epic_devtype(dev); 170 171 if (t != NULL) { 172 device_set_desc(dev, t->name); 173 return(0); 174 } 175 176 return(ENXIO); 177 } 178 179 static struct epic_type * 180 epic_devtype(dev) 181 device_t dev; 182 { 183 struct epic_type *t; 184 185 t = epic_devs; 186 187 while(t->name != NULL) { 188 if ((pci_get_vendor(dev) == t->ven_id) && 189 (pci_get_device(dev) == t->dev_id)) { 190 return(t); 191 } 192 t++; 193 } 194 return (NULL); 195 } 196 197 #if defined(EPIC_USEIOSPACE) 198 #define EPIC_RES SYS_RES_IOPORT 199 #define EPIC_RID PCIR_BASEIO 200 #else 201 #define EPIC_RES SYS_RES_MEMORY 202 #define EPIC_RID PCIR_BASEMEM 203 #endif 204 205 /* 206 * Attach routine: map registers, allocate softc, rings and descriptors. 207 * Reset to known state. 208 */ 209 static int 210 epic_attach(dev) 211 device_t dev; 212 { 213 struct ifnet *ifp; 214 epic_softc_t *sc; 215 u_int32_t command; 216 int unit, error; 217 int i, s, rid, tmp; 218 219 s = splimp (); 220 221 sc = device_get_softc(dev); 222 unit = device_get_unit(dev); 223 224 /* Preinitialize softc structure */ 225 bzero(sc, sizeof(epic_softc_t)); 226 sc->unit = unit; 227 sc->dev = dev; 228 229 /* Fill ifnet structure */ 230 ifp = &sc->sc_if; 231 if_initname(ifp, "tx", unit); 232 ifp->if_softc = sc; 233 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST; 234 ifp->if_ioctl = epic_ifioctl; 235 ifp->if_output = ether_output; 236 ifp->if_start = epic_ifstart; 237 ifp->if_watchdog = epic_ifwatchdog; 238 ifp->if_init = (if_init_f_t*)epic_init; 239 ifp->if_timer = 0; 240 ifp->if_baudrate = 10000000; 241 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; 242 243 /* Enable ports, memory and busmastering */ 244 command = pci_read_config(dev, PCIR_COMMAND, 4); 245 command |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 246 pci_write_config(dev, PCIR_COMMAND, command, 4); 247 command = pci_read_config(dev, PCIR_COMMAND, 4); 248 249 #if defined(EPIC_USEIOSPACE) 250 if ((command & PCIM_CMD_PORTEN) == 0) { 251 device_printf(dev, "failed to enable I/O mapping!\n"); 252 error = ENXIO; 253 goto fail; 254 } 255 #else 256 if ((command & PCIM_CMD_MEMEN) == 0) { 257 device_printf(dev, "failed to enable memory mapping!\n"); 258 error = ENXIO; 259 goto fail; 260 } 261 #endif 262 263 rid = EPIC_RID; 264 sc->res = bus_alloc_resource(dev, EPIC_RES, &rid, 0, ~0, 1, 265 RF_ACTIVE); 266 267 if (sc->res == NULL) { 268 device_printf(dev, "couldn't map ports/memory\n"); 269 error = ENXIO; 270 goto fail; 271 } 272 273 sc->sc_st = rman_get_bustag(sc->res); 274 sc->sc_sh = rman_get_bushandle(sc->res); 275 276 /* Allocate interrupt */ 277 rid = 0; 278 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 279 RF_SHAREABLE | RF_ACTIVE); 280 281 if (sc->irq == NULL) { 282 device_printf(dev, "couldn't map interrupt\n"); 283 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 284 error = ENXIO; 285 goto fail; 286 } 287 288 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 289 epic_intr, sc, &sc->sc_ih); 290 291 if (error) { 292 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 293 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 294 device_printf(dev, "couldn't set up irq\n"); 295 goto fail; 296 } 297 298 /* Do OS independent part, including chip wakeup and reset */ 299 error = epic_common_attach(sc); 300 if (error) { 301 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 302 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 303 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 304 error = ENXIO; 305 goto fail; 306 } 307 308 /* Do ifmedia setup */ 309 if (mii_phy_probe(dev, &sc->miibus, 310 epic_ifmedia_upd, epic_ifmedia_sts)) { 311 device_printf(dev, "ERROR! MII without any PHY!?\n"); 312 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 313 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 314 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 315 error = ENXIO; 316 goto fail; 317 } 318 319 /* board type and ... */ 320 printf(" type "); 321 for(i=0x2c;i<0x32;i++) { 322 tmp = epic_read_eeprom(sc, i); 323 if (' ' == (u_int8_t)tmp) break; 324 printf("%c", (u_int8_t)tmp); 325 tmp >>= 8; 326 if (' ' == (u_int8_t)tmp) break; 327 printf("%c", (u_int8_t)tmp); 328 } 329 printf("\n"); 330 331 /* Attach to OS's managers */ 332 ether_ifattach(ifp, sc->sc_macaddr); 333 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 334 callout_handle_init(&sc->stat_ch); 335 336 fail: 337 splx(s); 338 339 return(error); 340 } 341 342 /* 343 * Detach driver and free resources 344 */ 345 static int 346 epic_detach(dev) 347 device_t dev; 348 { 349 struct ifnet *ifp; 350 epic_softc_t *sc; 351 int s; 352 353 s = splimp(); 354 355 sc = device_get_softc(dev); 356 ifp = &sc->arpcom.ac_if; 357 358 ether_ifdetach(ifp); 359 360 epic_stop(sc); 361 362 bus_generic_detach(dev); 363 device_delete_child(dev, sc->miibus); 364 365 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 366 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 367 bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res); 368 369 free(sc->tx_flist, M_DEVBUF); 370 free(sc->tx_desc, M_DEVBUF); 371 free(sc->rx_desc, M_DEVBUF); 372 373 splx(s); 374 375 return(0); 376 } 377 378 #undef EPIC_RES 379 #undef EPIC_RID 380 381 /* 382 * Stop all chip I/O so that the kernel's probe routines don't 383 * get confused by errant DMAs when rebooting. 384 */ 385 static void 386 epic_shutdown(dev) 387 device_t dev; 388 { 389 epic_softc_t *sc; 390 391 sc = device_get_softc(dev); 392 393 epic_stop(sc); 394 395 return; 396 } 397 398 /* 399 * This is if_ioctl handler. 400 */ 401 static int 402 epic_ifioctl(ifp, command, data, cr) 403 struct ifnet *ifp; 404 u_long command; 405 caddr_t data; 406 struct ucred *cr; 407 { 408 epic_softc_t *sc = ifp->if_softc; 409 struct mii_data *mii; 410 struct ifreq *ifr = (struct ifreq *) data; 411 int x, error = 0; 412 413 x = splimp(); 414 415 switch (command) { 416 case SIOCSIFADDR: 417 case SIOCGIFADDR: 418 error = ether_ioctl(ifp, command, data); 419 break; 420 case SIOCSIFMTU: 421 if (ifp->if_mtu == ifr->ifr_mtu) 422 break; 423 424 /* XXX Though the datasheet doesn't imply any 425 * limitations on RX and TX sizes beside max 64Kb 426 * DMA transfer, seems we can't send more then 1600 427 * data bytes per ethernet packet. (Transmitter hangs 428 * up if more data is sent) 429 */ 430 if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) { 431 ifp->if_mtu = ifr->ifr_mtu; 432 epic_stop(sc); 433 epic_init(sc); 434 } else 435 error = EINVAL; 436 break; 437 438 case SIOCSIFFLAGS: 439 /* 440 * If the interface is marked up and stopped, then start it. 441 * If it is marked down and running, then stop it. 442 */ 443 if (ifp->if_flags & IFF_UP) { 444 if ((ifp->if_flags & IFF_RUNNING) == 0) { 445 epic_init(sc); 446 break; 447 } 448 } else { 449 if (ifp->if_flags & IFF_RUNNING) { 450 epic_stop(sc); 451 break; 452 } 453 } 454 455 /* Handle IFF_PROMISC and IFF_ALLMULTI flags */ 456 epic_stop_activity(sc); 457 epic_set_mc_table(sc); 458 epic_set_rx_mode(sc); 459 epic_start_activity(sc); 460 break; 461 462 case SIOCADDMULTI: 463 case SIOCDELMULTI: 464 epic_set_mc_table(sc); 465 error = 0; 466 break; 467 468 case SIOCSIFMEDIA: 469 case SIOCGIFMEDIA: 470 mii = device_get_softc(sc->miibus); 471 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 472 break; 473 474 default: 475 error = EINVAL; 476 } 477 splx(x); 478 479 return error; 480 } 481 482 /* 483 * OS-independed part of attach process. allocate memory for descriptors 484 * and frag lists, wake up chip, read MAC address and PHY identyfier. 485 * Return -1 on failure. 486 */ 487 static int 488 epic_common_attach(sc) 489 epic_softc_t *sc; 490 { 491 int i; 492 493 sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE, 494 M_DEVBUF, M_WAITOK | M_ZERO); 495 sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE, 496 M_DEVBUF, M_WAITOK | M_ZERO); 497 sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE, 498 M_DEVBUF, M_WAITOK | M_ZERO); 499 500 /* Bring the chip out of low-power mode. */ 501 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 502 DELAY(500); 503 504 /* Workaround for Application Note 7-15 */ 505 for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); 506 507 /* Read mac address from EEPROM */ 508 for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++) 509 ((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i); 510 511 /* Set Non-Volatile Control Register from EEPROM */ 512 CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F); 513 514 /* Set defaults */ 515 sc->tx_threshold = TRANSMIT_THRESHOLD; 516 sc->txcon = TXCON_DEFAULT; 517 sc->miicfg = MIICFG_SMI_ENABLE; 518 sc->phyid = EPIC_UNKN_PHY; 519 sc->serinst = -1; 520 521 /* Fetch card id */ 522 sc->cardvend = pci_read_config(sc->dev, PCIR_SUBVEND_0, 2); 523 sc->cardid = pci_read_config(sc->dev, PCIR_SUBDEV_0, 2); 524 525 if (sc->cardvend != SMC_VENDORID) 526 device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend); 527 528 return 0; 529 } 530 531 /* 532 * This is if_start handler. It takes mbufs from if_snd queue 533 * and queue them for transmit, one by one, until TX ring become full 534 * or queue become empty. 535 */ 536 static void 537 epic_ifstart(ifp) 538 struct ifnet * ifp; 539 { 540 epic_softc_t *sc = ifp->if_softc; 541 struct epic_tx_buffer *buf; 542 struct epic_tx_desc *desc; 543 struct epic_frag_list *flist; 544 struct mbuf *m0; 545 struct mbuf *m; 546 int i; 547 548 while (sc->pending_txs < TX_RING_SIZE) { 549 buf = sc->tx_buffer + sc->cur_tx; 550 desc = sc->tx_desc + sc->cur_tx; 551 flist = sc->tx_flist + sc->cur_tx; 552 553 /* Get next packet to send */ 554 IF_DEQUEUE(&ifp->if_snd, m0); 555 556 /* If nothing to send, return */ 557 if (NULL == m0) return; 558 559 /* Fill fragments list */ 560 for (m = m0, i = 0; 561 (NULL != m) && (i < EPIC_MAX_FRAGS); 562 m = m->m_next, i++) { 563 flist->frag[i].fraglen = m->m_len; 564 flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t)); 565 } 566 flist->numfrags = i; 567 568 /* If packet was more than EPIC_MAX_FRAGS parts, */ 569 /* recopy packet to new allocated mbuf cluster */ 570 if (NULL != m) { 571 EPIC_MGETCLUSTER(m); 572 if (NULL == m) { 573 m_freem(m0); 574 ifp->if_oerrors++; 575 continue; 576 } 577 578 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 579 flist->frag[0].fraglen = 580 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 581 m->m_pkthdr.rcvif = ifp; 582 583 flist->numfrags = 1; 584 flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t)); 585 m_freem(m0); 586 m0 = m; 587 } 588 589 buf->mbuf = m0; 590 sc->pending_txs++; 591 sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; 592 desc->control = 0x01; 593 desc->txlength = 594 max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN); 595 desc->status = 0x8000; 596 CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED); 597 598 /* Set watchdog timer */ 599 ifp->if_timer = 8; 600 601 if (ifp->if_bpf) 602 bpf_mtap(ifp, m0); 603 } 604 605 ifp->if_flags |= IFF_OACTIVE; 606 607 return; 608 609 } 610 611 /* 612 * Synopsis: Finish all received frames. 613 */ 614 static void 615 epic_rx_done(sc) 616 epic_softc_t *sc; 617 { 618 u_int16_t len; 619 struct epic_rx_buffer *buf; 620 struct epic_rx_desc *desc; 621 struct mbuf *m; 622 struct ether_header *eh; 623 624 while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) { 625 buf = sc->rx_buffer + sc->cur_rx; 626 desc = sc->rx_desc + sc->cur_rx; 627 628 /* Switch to next descriptor */ 629 sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK; 630 631 /* 632 * Check for RX errors. This should only happen if 633 * SAVE_ERRORED_PACKETS is set. RX errors generate 634 * RXE interrupt usually. 635 */ 636 if ((desc->status & 1) == 0) { 637 sc->sc_if.if_ierrors++; 638 desc->status = 0x8000; 639 continue; 640 } 641 642 /* Save packet length and mbuf contained packet */ 643 len = desc->rxlength - ETHER_CRC_LEN; 644 m = buf->mbuf; 645 646 /* Try to get mbuf cluster */ 647 EPIC_MGETCLUSTER(buf->mbuf); 648 if (NULL == buf->mbuf) { 649 buf->mbuf = m; 650 desc->status = 0x8000; 651 sc->sc_if.if_ierrors++; 652 continue; 653 } 654 655 /* Point to new mbuf, and give descriptor to chip */ 656 desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t)); 657 desc->status = 0x8000; 658 659 /* First mbuf in packet holds the ethernet and packet headers */ 660 eh = mtod(m, struct ether_header *); 661 m->m_pkthdr.rcvif = &(sc->sc_if); 662 m->m_pkthdr.len = m->m_len = len; 663 664 /* Second mbuf holds packet ifself */ 665 m->m_pkthdr.len = m->m_len = len - sizeof(struct ether_header); 666 m->m_data += sizeof(struct ether_header); 667 668 /* Give mbuf to OS */ 669 ether_input(&sc->sc_if, eh, m); 670 671 /* Successfuly received frame */ 672 sc->sc_if.if_ipackets++; 673 } 674 675 return; 676 } 677 678 /* 679 * Synopsis: Do last phase of transmission. I.e. if desc is 680 * transmitted, decrease pending_txs counter, free mbuf contained 681 * packet, switch to next descriptor and repeat until no packets 682 * are pending or descriptor is not transmitted yet. 683 */ 684 static void 685 epic_tx_done(sc) 686 epic_softc_t *sc; 687 { 688 struct epic_tx_buffer *buf; 689 struct epic_tx_desc *desc; 690 u_int16_t status; 691 692 while (sc->pending_txs > 0) { 693 buf = sc->tx_buffer + sc->dirty_tx; 694 desc = sc->tx_desc + sc->dirty_tx; 695 status = desc->status; 696 697 /* If packet is not transmitted, thou followed */ 698 /* packets are not transmitted too */ 699 if (status & 0x8000) break; 700 701 /* Packet is transmitted. Switch to next and */ 702 /* free mbuf */ 703 sc->pending_txs--; 704 sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK; 705 m_freem(buf->mbuf); 706 buf->mbuf = NULL; 707 708 /* Check for errors and collisions */ 709 if (status & 0x0001) sc->sc_if.if_opackets++; 710 else sc->sc_if.if_oerrors++; 711 sc->sc_if.if_collisions += (status >> 8) & 0x1F; 712 #if defined(EPIC_DIAG) 713 if ((status & 0x1001) == 0x1001) 714 device_printf(sc->dev, "Tx ERROR: excessive coll. number\n"); 715 #endif 716 } 717 718 if (sc->pending_txs < TX_RING_SIZE) 719 sc->sc_if.if_flags &= ~IFF_OACTIVE; 720 } 721 722 /* 723 * Interrupt function 724 */ 725 static void 726 epic_intr(arg) 727 void *arg; 728 { 729 epic_softc_t * sc = (epic_softc_t *) arg; 730 int status, i = 4; 731 732 while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) { 733 CSR_WRITE_4(sc, INTSTAT, status); 734 735 if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) { 736 epic_rx_done(sc); 737 if (status & (INTSTAT_RQE|INTSTAT_OVW)) { 738 #if defined(EPIC_DIAG) 739 if (status & INTSTAT_OVW) 740 device_printf(sc->dev, "RX buffer overflow\n"); 741 if (status & INTSTAT_RQE) 742 device_printf(sc->dev, "RX FIFO overflow\n"); 743 #endif 744 if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0) 745 CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED); 746 sc->sc_if.if_ierrors++; 747 } 748 } 749 750 if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) { 751 epic_tx_done(sc); 752 if (sc->sc_if.if_snd.ifq_head != NULL) 753 epic_ifstart(&sc->sc_if); 754 } 755 756 /* Check for rare errors */ 757 if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| 758 INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) { 759 if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| 760 INTSTAT_APE|INTSTAT_DPE)) { 761 device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n", 762 (status&INTSTAT_PMA)?"PMA ":"", 763 (status&INTSTAT_PTA)?"PTA ":"", 764 (status&INTSTAT_APE)?"APE ":"", 765 (status&INTSTAT_DPE)?"DPE":"" 766 ); 767 768 epic_stop(sc); 769 epic_init(sc); 770 771 break; 772 } 773 774 if (status & INTSTAT_RXE) { 775 #if defined(EPIC_DIAG) 776 device_printf(sc->dev, "CRC/Alignment error\n"); 777 #endif 778 sc->sc_if.if_ierrors++; 779 } 780 781 if (status & INTSTAT_TXU) { 782 epic_tx_underrun(sc); 783 sc->sc_if.if_oerrors++; 784 } 785 } 786 } 787 788 /* If no packets are pending, then no timeouts */ 789 if (sc->pending_txs == 0) sc->sc_if.if_timer = 0; 790 791 return; 792 } 793 794 /* 795 * Handle the TX underrun error: increase the TX threshold 796 * and restart the transmitter. 797 */ 798 static void 799 epic_tx_underrun(sc) 800 epic_softc_t *sc; 801 { 802 if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) { 803 sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE; 804 #if defined(EPIC_DIAG) 805 device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n"); 806 #endif 807 } else { 808 sc->tx_threshold += 0x40; 809 #if defined(EPIC_DIAG) 810 device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n", 811 sc->tx_threshold); 812 #endif 813 } 814 815 /* We must set TXUGO to reset the stuck transmitter */ 816 CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO); 817 818 /* Update the TX threshold */ 819 epic_stop_activity(sc); 820 epic_set_tx_mode(sc); 821 epic_start_activity(sc); 822 823 return; 824 } 825 826 /* 827 * Synopsis: This one is called if packets wasn't transmitted 828 * during timeout. Try to deallocate transmitted packets, and 829 * if success continue to work. 830 */ 831 static void 832 epic_ifwatchdog(ifp) 833 struct ifnet *ifp; 834 { 835 epic_softc_t *sc = ifp->if_softc; 836 int x; 837 838 x = splimp(); 839 840 device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs); 841 842 /* Try to finish queued packets */ 843 epic_tx_done(sc); 844 845 /* If not successful */ 846 if (sc->pending_txs > 0) { 847 848 ifp->if_oerrors+=sc->pending_txs; 849 850 /* Reinitialize board */ 851 device_printf(sc->dev, "reinitialization\n"); 852 epic_stop(sc); 853 epic_init(sc); 854 855 } else 856 device_printf(sc->dev, "seems we can continue normaly\n"); 857 858 /* Start output */ 859 if (ifp->if_snd.ifq_head) epic_ifstart(ifp); 860 861 splx(x); 862 } 863 864 /* 865 * Despite the name of this function, it doesn't update statistics, it only 866 * helps in autonegotiation process. 867 */ 868 static void 869 epic_stats_update(epic_softc_t * sc) 870 { 871 struct mii_data * mii; 872 int s; 873 874 s = splimp(); 875 876 mii = device_get_softc(sc->miibus); 877 mii_tick(mii); 878 879 sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); 880 881 splx(s); 882 } 883 884 /* 885 * Set media options. 886 */ 887 static int 888 epic_ifmedia_upd(ifp) 889 struct ifnet *ifp; 890 { 891 epic_softc_t *sc; 892 struct mii_data *mii; 893 struct ifmedia *ifm; 894 struct mii_softc *miisc; 895 int cfg, media; 896 897 sc = ifp->if_softc; 898 mii = device_get_softc(sc->miibus); 899 ifm = &mii->mii_media; 900 media = ifm->ifm_cur->ifm_media; 901 902 /* Do not do anything if interface is not up */ 903 if ((ifp->if_flags & IFF_UP) == 0) 904 return (0); 905 906 /* 907 * Lookup current selected PHY 908 */ 909 if (IFM_INST(media) == sc->serinst) { 910 sc->phyid = EPIC_SERIAL; 911 sc->physc = NULL; 912 } else { 913 /* If we're not selecting serial interface, select MII mode */ 914 sc->miicfg &= ~MIICFG_SERIAL_ENABLE; 915 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 916 917 /* Default to unknown PHY */ 918 sc->phyid = EPIC_UNKN_PHY; 919 920 /* Lookup selected PHY */ 921 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 922 miisc = LIST_NEXT(miisc, mii_list)) { 923 if (IFM_INST(media) == miisc->mii_inst) { 924 sc->physc = miisc; 925 break; 926 } 927 } 928 929 /* Identify selected PHY */ 930 if (sc->physc) { 931 int id1, id2, model, oui; 932 933 id1 = PHY_READ(sc->physc, MII_PHYIDR1); 934 id2 = PHY_READ(sc->physc, MII_PHYIDR2); 935 936 oui = MII_OUI(id1, id2); 937 model = MII_MODEL(id2); 938 switch (oui) { 939 case MII_OUI_QUALSEMI: 940 if (model == MII_MODEL_QUALSEMI_QS6612) 941 sc->phyid = EPIC_QS6612_PHY; 942 break; 943 case MII_OUI_xxALTIMA: 944 if (model == MII_MODEL_xxALTIMA_AC101) 945 sc->phyid = EPIC_AC101_PHY; 946 break; 947 case MII_OUI_xxLEVEL1: 948 if (model == MII_MODEL_xxLEVEL1_LXT970) 949 sc->phyid = EPIC_LXT970_PHY; 950 break; 951 } 952 } 953 } 954 955 /* 956 * Do PHY specific card setup 957 */ 958 959 /* Call this, to isolate all not selected PHYs and 960 * set up selected 961 */ 962 mii_mediachg(mii); 963 964 /* Do our own setup */ 965 switch (sc->phyid) { 966 case EPIC_QS6612_PHY: 967 break; 968 case EPIC_AC101_PHY: 969 /* We have to powerup fiber tranceivers */ 970 if (IFM_SUBTYPE(media) == IFM_100_FX) 971 sc->miicfg |= MIICFG_694_ENABLE; 972 else 973 sc->miicfg &= ~MIICFG_694_ENABLE; 974 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 975 976 break; 977 case EPIC_LXT970_PHY: 978 /* We have to powerup fiber tranceivers */ 979 cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG); 980 if (IFM_SUBTYPE(media) == IFM_100_FX) 981 cfg |= CONFIG_LEDC1 | CONFIG_LEDC0; 982 else 983 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 984 PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg); 985 986 break; 987 case EPIC_SERIAL: 988 /* Select serial PHY, (10base2/BNC usually) */ 989 sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE; 990 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 991 992 /* There is no driver to fill this */ 993 mii->mii_media_active = media; 994 mii->mii_media_status = 0; 995 996 /* We need to call this manualy as i wasn't called 997 * in mii_mediachg() 998 */ 999 epic_miibus_statchg(sc->dev); 1000 1001 break; 1002 default: 1003 device_printf(sc->dev, "ERROR! Unknown PHY selected\n"); 1004 return (EINVAL); 1005 } 1006 1007 return(0); 1008 } 1009 1010 /* 1011 * Report current media status. 1012 */ 1013 static void 1014 epic_ifmedia_sts(ifp, ifmr) 1015 struct ifnet *ifp; 1016 struct ifmediareq *ifmr; 1017 { 1018 epic_softc_t *sc; 1019 struct mii_data *mii; 1020 struct ifmedia *ifm; 1021 1022 sc = ifp->if_softc; 1023 mii = device_get_softc(sc->miibus); 1024 ifm = &mii->mii_media; 1025 1026 /* Nothing should be selected if interface is down */ 1027 if ((ifp->if_flags & IFF_UP) == 0) { 1028 ifmr->ifm_active = IFM_NONE; 1029 ifmr->ifm_status = 0; 1030 1031 return; 1032 } 1033 1034 /* Call underlying pollstat, if not serial PHY */ 1035 if (sc->phyid != EPIC_SERIAL) 1036 mii_pollstat(mii); 1037 1038 /* Simply copy media info */ 1039 ifmr->ifm_active = mii->mii_media_active; 1040 ifmr->ifm_status = mii->mii_media_status; 1041 1042 return; 1043 } 1044 1045 /* 1046 * Callback routine, called on media change. 1047 */ 1048 static void 1049 epic_miibus_statchg(dev) 1050 device_t dev; 1051 { 1052 epic_softc_t *sc; 1053 struct mii_data *mii; 1054 int media; 1055 1056 sc = device_get_softc(dev); 1057 mii = device_get_softc(sc->miibus); 1058 media = mii->mii_media_active; 1059 1060 sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX); 1061 1062 /* If we are in full-duplex mode or loopback operation, 1063 * we need to decouple receiver and transmitter. 1064 */ 1065 if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP)) 1066 sc->txcon |= TXCON_FULL_DUPLEX; 1067 1068 /* On some cards we need manualy set fullduplex led */ 1069 if (sc->cardid == SMC9432FTX || 1070 sc->cardid == SMC9432FTX_SC) { 1071 if (IFM_OPTIONS(media) & IFM_FDX) 1072 sc->miicfg |= MIICFG_694_ENABLE; 1073 else 1074 sc->miicfg &= ~MIICFG_694_ENABLE; 1075 1076 CSR_WRITE_4(sc, MIICFG, sc->miicfg); 1077 } 1078 1079 /* Update baudrate */ 1080 if (IFM_SUBTYPE(media) == IFM_100_TX || 1081 IFM_SUBTYPE(media) == IFM_100_FX) 1082 sc->sc_if.if_baudrate = 100000000; 1083 else 1084 sc->sc_if.if_baudrate = 10000000; 1085 1086 epic_stop_activity(sc); 1087 epic_set_tx_mode(sc); 1088 epic_start_activity(sc); 1089 1090 return; 1091 } 1092 1093 static void 1094 epic_miibus_mediainit(dev) 1095 device_t dev; 1096 { 1097 epic_softc_t *sc; 1098 struct mii_data *mii; 1099 struct ifmedia *ifm; 1100 int media; 1101 1102 sc = device_get_softc(dev); 1103 mii = device_get_softc(sc->miibus); 1104 ifm = &mii->mii_media; 1105 1106 /* Add Serial Media Interface if present, this applies to 1107 * SMC9432BTX serie 1108 */ 1109 if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) { 1110 /* Store its instance */ 1111 sc->serinst = mii->mii_instance++; 1112 1113 /* Add as 10base2/BNC media */ 1114 media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst); 1115 ifmedia_add(ifm, media, 0, NULL); 1116 1117 /* Report to user */ 1118 device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n"); 1119 } 1120 1121 return; 1122 } 1123 1124 /* 1125 * Reset chip, allocate rings, and update media. 1126 */ 1127 static int 1128 epic_init(sc) 1129 epic_softc_t *sc; 1130 { 1131 struct ifnet *ifp = &sc->sc_if; 1132 int s,i; 1133 1134 s = splimp(); 1135 1136 /* If interface is already running, then we need not do anything */ 1137 if (ifp->if_flags & IFF_RUNNING) { 1138 splx(s); 1139 return 0; 1140 } 1141 1142 /* Soft reset the chip (we have to power up card before) */ 1143 CSR_WRITE_4(sc, GENCTL, 0); 1144 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 1145 1146 /* 1147 * Reset takes 15 pci ticks which depends on PCI bus speed. 1148 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec. 1149 */ 1150 DELAY(500); 1151 1152 /* Wake up */ 1153 CSR_WRITE_4(sc, GENCTL, 0); 1154 1155 /* Workaround for Application Note 7-15 */ 1156 for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); 1157 1158 /* Initialize rings */ 1159 if (epic_init_rings(sc)) { 1160 device_printf(sc->dev, "failed to init rings\n"); 1161 splx(s); 1162 return -1; 1163 } 1164 1165 /* Give rings to EPIC */ 1166 CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc)); 1167 CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc)); 1168 1169 /* Put node address to EPIC */ 1170 CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]); 1171 CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]); 1172 CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]); 1173 1174 /* Set tx mode, includeing transmit threshold */ 1175 epic_set_tx_mode(sc); 1176 1177 /* Compute and set RXCON. */ 1178 epic_set_rx_mode(sc); 1179 1180 /* Set multicast table */ 1181 epic_set_mc_table(sc); 1182 1183 /* Enable interrupts by setting the interrupt mask. */ 1184 CSR_WRITE_4(sc, INTMASK, 1185 INTSTAT_RCC | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */ 1186 /* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU | 1187 INTSTAT_FATAL); 1188 1189 /* Acknowledge all pending interrupts */ 1190 CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT)); 1191 1192 /* Enable interrupts, set for PCI read multiple and etc */ 1193 CSR_WRITE_4(sc, GENCTL, 1194 GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE | 1195 GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64); 1196 1197 /* Mark interface running ... */ 1198 if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING; 1199 else ifp->if_flags &= ~IFF_RUNNING; 1200 1201 /* ... and free */ 1202 ifp->if_flags &= ~IFF_OACTIVE; 1203 1204 /* Start Rx process */ 1205 epic_start_activity(sc); 1206 1207 /* Set appropriate media */ 1208 epic_ifmedia_upd(ifp); 1209 1210 sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); 1211 1212 splx(s); 1213 1214 return 0; 1215 } 1216 1217 /* 1218 * Synopsis: calculate and set Rx mode. Chip must be in idle state to 1219 * access RXCON. 1220 */ 1221 static void 1222 epic_set_rx_mode(sc) 1223 epic_softc_t *sc; 1224 { 1225 u_int32_t flags = sc->sc_if.if_flags; 1226 u_int32_t rxcon = RXCON_DEFAULT; 1227 1228 #if defined(EPIC_EARLY_RX) 1229 rxcon |= RXCON_EARLY_RX; 1230 #endif 1231 1232 rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0; 1233 1234 CSR_WRITE_4(sc, RXCON, rxcon); 1235 1236 return; 1237 } 1238 1239 /* 1240 * Synopsis: Set transmit control register. Chip must be in idle state to 1241 * access TXCON. 1242 */ 1243 static void 1244 epic_set_tx_mode(sc) 1245 epic_softc_t *sc; 1246 { 1247 if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE) 1248 CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold); 1249 1250 CSR_WRITE_4(sc, TXCON, sc->txcon); 1251 } 1252 1253 /* 1254 * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC 1255 * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch 1256 * individual frames, multicast filter must be manually programmed) 1257 * 1258 * Note: EPIC must be in idle state. 1259 */ 1260 static void 1261 epic_set_mc_table(sc) 1262 epic_softc_t *sc; 1263 { 1264 struct ifnet *ifp = &sc->sc_if; 1265 struct ifmultiaddr *ifma; 1266 u_int16_t filter[4]; 1267 u_int8_t h; 1268 1269 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 1270 CSR_WRITE_4(sc, MC0, 0xFFFF); 1271 CSR_WRITE_4(sc, MC1, 0xFFFF); 1272 CSR_WRITE_4(sc, MC2, 0xFFFF); 1273 CSR_WRITE_4(sc, MC3, 0xFFFF); 1274 1275 return; 1276 } 1277 1278 filter[0] = 0; 1279 filter[1] = 0; 1280 filter[2] = 0; 1281 filter[3] = 0; 1282 1283 #if defined(__DragonFly__) || __FreeBSD_version < 500000 1284 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1285 #else 1286 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1287 #endif 1288 if (ifma->ifma_addr->sa_family != AF_LINK) 1289 continue; 1290 h = epic_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1291 filter[h >> 4] |= 1 << (h & 0xF); 1292 } 1293 1294 CSR_WRITE_4(sc, MC0, filter[0]); 1295 CSR_WRITE_4(sc, MC1, filter[1]); 1296 CSR_WRITE_4(sc, MC2, filter[2]); 1297 CSR_WRITE_4(sc, MC3, filter[3]); 1298 1299 return; 1300 } 1301 1302 /* 1303 * Synopsis: calculate EPIC's hash of multicast address. 1304 */ 1305 static u_int8_t 1306 epic_calchash(addr) 1307 caddr_t addr; 1308 { 1309 u_int32_t crc, carry; 1310 int i, j; 1311 u_int8_t c; 1312 1313 /* Compute CRC for the address value. */ 1314 crc = 0xFFFFFFFF; /* initial value */ 1315 1316 for (i = 0; i < 6; i++) { 1317 c = *(addr + i); 1318 for (j = 0; j < 8; j++) { 1319 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1320 crc <<= 1; 1321 c >>= 1; 1322 if (carry) 1323 crc = (crc ^ 0x04c11db6) | carry; 1324 } 1325 } 1326 1327 return ((crc >> 26) & 0x3F); 1328 } 1329 1330 1331 /* 1332 * Synopsis: Start receive process and transmit one, if they need. 1333 */ 1334 static void 1335 epic_start_activity(sc) 1336 epic_softc_t *sc; 1337 { 1338 /* Start rx process */ 1339 CSR_WRITE_4(sc, COMMAND, 1340 COMMAND_RXQUEUED | COMMAND_START_RX | 1341 (sc->pending_txs?COMMAND_TXQUEUED:0)); 1342 } 1343 1344 /* 1345 * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional 1346 * packet needs to be queued to stop Tx DMA. 1347 */ 1348 static void 1349 epic_stop_activity(sc) 1350 epic_softc_t *sc; 1351 { 1352 int status, i; 1353 1354 /* Stop Tx and Rx DMA */ 1355 CSR_WRITE_4(sc, COMMAND, 1356 COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA); 1357 1358 /* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */ 1359 for (i=0; i<0x1000; i++) { 1360 status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE); 1361 if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE)) 1362 break; 1363 DELAY(1); 1364 } 1365 1366 /* Catch all finished packets */ 1367 epic_rx_done(sc); 1368 epic_tx_done(sc); 1369 1370 status = CSR_READ_4(sc, INTSTAT); 1371 1372 if ((status & INTSTAT_RXIDLE) == 0) 1373 device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n"); 1374 1375 if ((status & INTSTAT_TXIDLE) == 0) 1376 device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n"); 1377 1378 /* 1379 * May need to queue one more packet if TQE, this is rare 1380 * but existing case. 1381 */ 1382 if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE)) 1383 (void) epic_queue_last_packet(sc); 1384 1385 } 1386 1387 /* 1388 * The EPIC transmitter may stuck in TQE state. It will not go IDLE until 1389 * a packet from current descriptor will be copied to internal RAM. We 1390 * compose a dummy packet here and queue it for transmission. 1391 * 1392 * XXX the packet will then be actually sent over network... 1393 */ 1394 static int 1395 epic_queue_last_packet(sc) 1396 epic_softc_t *sc; 1397 { 1398 struct epic_tx_desc *desc; 1399 struct epic_frag_list *flist; 1400 struct epic_tx_buffer *buf; 1401 struct mbuf *m0; 1402 int i; 1403 1404 device_printf(sc->dev, "queue last packet\n"); 1405 1406 desc = sc->tx_desc + sc->cur_tx; 1407 flist = sc->tx_flist + sc->cur_tx; 1408 buf = sc->tx_buffer + sc->cur_tx; 1409 1410 if ((desc->status & 0x8000) || (buf->mbuf != NULL)) 1411 return (EBUSY); 1412 1413 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 1414 if (NULL == m0) 1415 return (ENOBUFS); 1416 1417 /* Prepare mbuf */ 1418 m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN); 1419 flist->frag[0].fraglen = m0->m_len; 1420 m0->m_pkthdr.len = m0->m_len; 1421 m0->m_pkthdr.rcvif = &sc->sc_if; 1422 bzero(mtod(m0,caddr_t), m0->m_len); 1423 1424 /* Fill fragments list */ 1425 flist->frag[0].fraglen = m0->m_len; 1426 flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t)); 1427 flist->numfrags = 1; 1428 1429 /* Fill in descriptor */ 1430 buf->mbuf = m0; 1431 sc->pending_txs++; 1432 sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; 1433 desc->control = 0x01; 1434 desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN); 1435 desc->status = 0x8000; 1436 1437 /* Launch transmition */ 1438 CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED); 1439 1440 /* Wait Tx DMA to stop (for how long??? XXX) */ 1441 for (i=0; i<1000; i++) { 1442 if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) 1443 break; 1444 DELAY(1); 1445 } 1446 1447 if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0) 1448 device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n"); 1449 else 1450 epic_tx_done(sc); 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * Synopsis: Shut down board and deallocates rings. 1457 */ 1458 static void 1459 epic_stop(sc) 1460 epic_softc_t *sc; 1461 { 1462 int s; 1463 1464 s = splimp(); 1465 1466 sc->sc_if.if_timer = 0; 1467 1468 untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch); 1469 1470 /* Disable interrupts */ 1471 CSR_WRITE_4(sc, INTMASK, 0); 1472 CSR_WRITE_4(sc, GENCTL, 0); 1473 1474 /* Try to stop Rx and TX processes */ 1475 epic_stop_activity(sc); 1476 1477 /* Reset chip */ 1478 CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); 1479 DELAY(1000); 1480 1481 /* Make chip go to bed */ 1482 CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN); 1483 1484 /* Free memory allocated for rings */ 1485 epic_free_rings(sc); 1486 1487 /* Mark as stoped */ 1488 sc->sc_if.if_flags &= ~IFF_RUNNING; 1489 1490 splx(s); 1491 return; 1492 } 1493 1494 /* 1495 * Synopsis: This function should free all memory allocated for rings. 1496 */ 1497 static void 1498 epic_free_rings(sc) 1499 epic_softc_t *sc; 1500 { 1501 int i; 1502 1503 for (i=0; i<RX_RING_SIZE; i++) { 1504 struct epic_rx_buffer *buf = sc->rx_buffer + i; 1505 struct epic_rx_desc *desc = sc->rx_desc + i; 1506 1507 desc->status = 0; 1508 desc->buflength = 0; 1509 desc->bufaddr = 0; 1510 1511 if (buf->mbuf) m_freem(buf->mbuf); 1512 buf->mbuf = NULL; 1513 } 1514 1515 for (i=0; i<TX_RING_SIZE; i++) { 1516 struct epic_tx_buffer *buf = sc->tx_buffer + i; 1517 struct epic_tx_desc *desc = sc->tx_desc + i; 1518 1519 desc->status = 0; 1520 desc->buflength = 0; 1521 desc->bufaddr = 0; 1522 1523 if (buf->mbuf) m_freem(buf->mbuf); 1524 buf->mbuf = NULL; 1525 } 1526 } 1527 1528 /* 1529 * Synopsis: Allocates mbufs for Rx ring and point Rx descs to them. 1530 * Point Tx descs to fragment lists. Check that all descs and fraglists 1531 * are bounded and aligned properly. 1532 */ 1533 static int 1534 epic_init_rings(sc) 1535 epic_softc_t *sc; 1536 { 1537 int i; 1538 1539 sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0; 1540 1541 for (i = 0; i < RX_RING_SIZE; i++) { 1542 struct epic_rx_buffer *buf = sc->rx_buffer + i; 1543 struct epic_rx_desc *desc = sc->rx_desc + i; 1544 1545 desc->status = 0; /* Owned by driver */ 1546 desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK)); 1547 1548 if ((desc->next & 3) || 1549 ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { 1550 epic_free_rings(sc); 1551 return EFAULT; 1552 } 1553 1554 EPIC_MGETCLUSTER(buf->mbuf); 1555 if (NULL == buf->mbuf) { 1556 epic_free_rings(sc); 1557 return ENOBUFS; 1558 } 1559 desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t)); 1560 1561 desc->buflength = MCLBYTES; /* Max RX buffer length */ 1562 desc->status = 0x8000; /* Set owner bit to NIC */ 1563 } 1564 1565 for (i = 0; i < TX_RING_SIZE; i++) { 1566 struct epic_tx_buffer *buf = sc->tx_buffer + i; 1567 struct epic_tx_desc *desc = sc->tx_desc + i; 1568 1569 desc->status = 0; 1570 desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK)); 1571 1572 if ((desc->next & 3) || 1573 ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { 1574 epic_free_rings(sc); 1575 return EFAULT; 1576 } 1577 1578 buf->mbuf = NULL; 1579 desc->bufaddr = vtophys(sc->tx_flist + i); 1580 1581 if ((desc->bufaddr & 3) || 1582 ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) { 1583 epic_free_rings(sc); 1584 return EFAULT; 1585 } 1586 } 1587 1588 return 0; 1589 } 1590 1591 /* 1592 * EEPROM operation functions 1593 */ 1594 static void 1595 epic_write_eepromreg(sc, val) 1596 epic_softc_t *sc; 1597 u_int8_t val; 1598 { 1599 u_int16_t i; 1600 1601 CSR_WRITE_1(sc, EECTL, val); 1602 1603 for (i=0; i<0xFF; i++) 1604 if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break; 1605 1606 return; 1607 } 1608 1609 static u_int8_t 1610 epic_read_eepromreg(sc) 1611 epic_softc_t *sc; 1612 { 1613 return CSR_READ_1(sc, EECTL); 1614 } 1615 1616 static u_int8_t 1617 epic_eeprom_clock(sc, val) 1618 epic_softc_t *sc; 1619 u_int8_t val; 1620 { 1621 epic_write_eepromreg(sc, val); 1622 epic_write_eepromreg(sc, (val | 0x4)); 1623 epic_write_eepromreg(sc, val); 1624 1625 return epic_read_eepromreg(sc); 1626 } 1627 1628 static void 1629 epic_output_eepromw(sc, val) 1630 epic_softc_t *sc; 1631 u_int16_t val; 1632 { 1633 int i; 1634 1635 for (i = 0xF; i >= 0; i--) { 1636 if (val & (1 << i)) 1637 epic_eeprom_clock(sc, 0x0B); 1638 else 1639 epic_eeprom_clock(sc, 0x03); 1640 } 1641 } 1642 1643 static u_int16_t 1644 epic_input_eepromw(sc) 1645 epic_softc_t *sc; 1646 { 1647 u_int16_t retval = 0; 1648 int i; 1649 1650 for (i = 0xF; i >= 0; i--) { 1651 if (epic_eeprom_clock(sc, 0x3) & 0x10) 1652 retval |= (1 << i); 1653 } 1654 1655 return retval; 1656 } 1657 1658 static int 1659 epic_read_eeprom(sc, loc) 1660 epic_softc_t *sc; 1661 u_int16_t loc; 1662 { 1663 u_int16_t dataval; 1664 u_int16_t read_cmd; 1665 1666 epic_write_eepromreg(sc, 3); 1667 1668 if (epic_read_eepromreg(sc) & 0x40) 1669 read_cmd = (loc & 0x3F) | 0x180; 1670 else 1671 read_cmd = (loc & 0xFF) | 0x600; 1672 1673 epic_output_eepromw(sc, read_cmd); 1674 1675 dataval = epic_input_eepromw(sc); 1676 1677 epic_write_eepromreg(sc, 1); 1678 1679 return dataval; 1680 } 1681 1682 /* 1683 * Here goes MII read/write routines 1684 */ 1685 static int 1686 epic_read_phy_reg(sc, phy, reg) 1687 epic_softc_t *sc; 1688 int phy, reg; 1689 { 1690 int i; 1691 1692 CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01)); 1693 1694 for (i = 0; i < 0x100; i++) { 1695 if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break; 1696 DELAY(1); 1697 } 1698 1699 return (CSR_READ_4(sc, MIIDATA)); 1700 } 1701 1702 static void 1703 epic_write_phy_reg(sc, phy, reg, val) 1704 epic_softc_t *sc; 1705 int phy, reg, val; 1706 { 1707 int i; 1708 1709 CSR_WRITE_4(sc, MIIDATA, val); 1710 CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02)); 1711 1712 for(i=0;i<0x100;i++) { 1713 if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break; 1714 DELAY(1); 1715 } 1716 1717 return; 1718 } 1719 1720 static int 1721 epic_miibus_readreg(dev, phy, reg) 1722 device_t dev; 1723 int phy, reg; 1724 { 1725 epic_softc_t *sc; 1726 1727 sc = device_get_softc(dev); 1728 1729 return (PHY_READ_2(sc, phy, reg)); 1730 } 1731 1732 static int 1733 epic_miibus_writereg(dev, phy, reg, data) 1734 device_t dev; 1735 int phy, reg, data; 1736 { 1737 epic_softc_t *sc; 1738 1739 sc = device_get_softc(dev); 1740 1741 PHY_WRITE_2(sc, phy, reg, data); 1742 1743 return (0); 1744 } 1745