1 /* $OpenBSD: smc83c170.c,v 1.19 2015/03/14 03:38:47 jsg Exp $ */ 2 /* $NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Device driver for the Standard Microsystems Corp. 83C170 36 * Ethernet PCI Integrated Controller (EPIC/100). 37 */ 38 39 #include "bpfilter.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/timeout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <net/if.h> 53 54 #include <netinet/in.h> 55 #include <netinet/if_ether.h> 56 57 #include <net/if_media.h> 58 59 #if NBPFILTER > 0 60 #include <net/bpf.h> 61 #endif 62 63 #include <machine/bus.h> 64 #include <machine/intr.h> 65 66 #include <dev/mii/miivar.h> 67 #include <dev/mii/lxtphyreg.h> 68 69 #include <dev/ic/smc83c170reg.h> 70 #include <dev/ic/smc83c170var.h> 71 72 void epic_start(struct ifnet *); 73 void epic_watchdog(struct ifnet *); 74 int epic_ioctl(struct ifnet *, u_long, caddr_t); 75 int epic_init(struct ifnet *); 76 void epic_stop(struct ifnet *, int); 77 78 void epic_reset(struct epic_softc *); 79 void epic_rxdrain(struct epic_softc *); 80 int epic_add_rxbuf(struct epic_softc *, int); 81 void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *); 82 void epic_set_mchash(struct epic_softc *); 83 void epic_fixup_clock_source(struct epic_softc *); 84 int epic_mii_read(struct device *, int, int); 85 void epic_mii_write(struct device *, int, int, int); 86 int epic_mii_wait(struct epic_softc *, u_int32_t); 87 void epic_tick(void *); 88 89 void epic_statchg(struct device *); 90 int epic_mediachange(struct ifnet *); 91 void epic_mediastatus(struct ifnet *, struct ifmediareq *); 92 93 struct cfdriver epic_cd = { 94 0, "epic", DV_IFNET 95 }; 96 97 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ 98 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) 99 100 int epic_copy_small = 0; 101 102 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 103 104 /* 105 * Attach an EPIC interface to the system. 106 */ 107 void 108 epic_attach(struct epic_softc *sc, const char *intrstr) 109 { 110 bus_space_tag_t st = sc->sc_st; 111 bus_space_handle_t sh = sc->sc_sh; 112 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 113 int rseg, error, miiflags; 114 u_int i; 115 bus_dma_segment_t seg; 116 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; 117 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; 118 char *nullbuf; 119 120 timeout_set(&sc->sc_mii_timeout, epic_tick, sc); 121 122 /* 123 * Allocate the control data structures, and create and load the 124 * DMA map for it. 125 */ 126 if ((error = bus_dmamem_alloc(sc->sc_dmat, 127 sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, 128 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 129 printf(": unable to allocate control data, error = %d\n", 130 error); 131 goto fail_0; 132 } 133 134 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 135 sizeof(struct epic_control_data) + ETHER_PAD_LEN, 136 (caddr_t *)&sc->sc_control_data, 137 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 138 printf(": unable to map control data, error = %d\n", error); 139 goto fail_1; 140 } 141 nullbuf = 142 (char *)sc->sc_control_data + sizeof(struct epic_control_data); 143 memset(nullbuf, 0, ETHER_PAD_LEN); 144 145 if ((error = bus_dmamap_create(sc->sc_dmat, 146 sizeof(struct epic_control_data), 1, 147 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, 148 &sc->sc_cddmamap)) != 0) { 149 printf(": unable to create control data DMA map, error = %d\n", 150 error); 151 goto fail_2; 152 } 153 154 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 155 sc->sc_control_data, sizeof(struct epic_control_data), NULL, 156 BUS_DMA_NOWAIT)) != 0) { 157 printf(": unable to load control data DMA map, error = %d\n", 158 error); 159 goto fail_3; 160 } 161 162 /* 163 * Create the transmit buffer DMA maps. 164 */ 165 for (i = 0; i < EPIC_NTXDESC; i++) { 166 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 167 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 168 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { 169 printf(": unable to create tx DMA map %d, error = %d\n", 170 i, error); 171 goto fail_4; 172 } 173 } 174 175 /* 176 * Create the receive buffer DMA maps. 177 */ 178 for (i = 0; i < EPIC_NRXDESC; i++) { 179 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 180 MCLBYTES, 0, BUS_DMA_NOWAIT, 181 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { 182 printf(": unable to create rx DMA map %d, error = %d\n", 183 i, error); 184 goto fail_5; 185 } 186 EPIC_DSRX(sc, i)->ds_mbuf = NULL; 187 } 188 189 /* 190 * create and map the pad buffer 191 */ 192 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 193 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 194 printf(": unable to create pad buffer DMA map, error = %d\n", 195 error); 196 goto fail_5; 197 } 198 199 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 200 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 201 printf(": unable to load pad buffer DMA map, error = %d\n", 202 error); 203 goto fail_6; 204 } 205 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 206 BUS_DMASYNC_PREWRITE); 207 208 /* 209 * Bring the chip out of low-power mode and reset it to a known state. 210 */ 211 bus_space_write_4(st, sh, EPIC_GENCTL, 0); 212 epic_reset(sc); 213 214 /* 215 * Read the Ethernet address from the EEPROM. 216 */ 217 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); 218 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { 219 enaddr[i * 2] = myea[i] & 0xff; 220 enaddr[i * 2 + 1] = myea[i] >> 8; 221 } 222 223 /* 224 * ...and the device name. 225 */ 226 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), 227 mydevname); 228 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { 229 devname[i * 2] = mydevname[i] & 0xff; 230 devname[i * 2 + 1] = mydevname[i] >> 8; 231 } 232 233 devname[sizeof(devname) - 1] = ' '; 234 for (i = sizeof(devname) - 1; devname[i] == ' '; i--) { 235 devname[i] = '\0'; 236 if (i == 0) 237 break; 238 } 239 240 printf(", %s : %s, address %s\n", devname, intrstr, 241 ether_sprintf(enaddr)); 242 243 miiflags = 0; 244 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) 245 miiflags |= MIIF_HAVEFIBER; 246 247 /* 248 * Initialize our media structures and probe the MII. 249 */ 250 sc->sc_mii.mii_ifp = ifp; 251 sc->sc_mii.mii_readreg = epic_mii_read; 252 sc->sc_mii.mii_writereg = epic_mii_write; 253 sc->sc_mii.mii_statchg = epic_statchg; 254 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, 255 epic_mediastatus); 256 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 257 MII_OFFSET_ANY, miiflags); 258 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 259 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 260 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 261 } else 262 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 263 264 if (sc->sc_hwflags & EPIC_HAS_BNC) { 265 /* use the next free media instance */ 266 sc->sc_serinst = sc->sc_mii.mii_instance++; 267 ifmedia_add(&sc->sc_mii.mii_media, 268 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, 269 sc->sc_serinst), 270 0, NULL); 271 } else 272 sc->sc_serinst = -1; 273 274 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 275 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 276 ifp->if_softc = sc; 277 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 278 ifp->if_ioctl = epic_ioctl; 279 ifp->if_start = epic_start; 280 ifp->if_watchdog = epic_watchdog; 281 IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1); 282 IFQ_SET_READY(&ifp->if_snd); 283 284 ifp->if_capabilities = IFCAP_VLAN_MTU; 285 286 /* 287 * Attach the interface. 288 */ 289 if_attach(ifp); 290 ether_ifattach(ifp); 291 return; 292 293 /* 294 * Free any resources we've allocated during the failed attach 295 * attempt. Do this in reverse order and fall through. 296 */ 297 fail_6: 298 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 299 fail_5: 300 for (i = 0; i < EPIC_NRXDESC; i++) { 301 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) 302 bus_dmamap_destroy(sc->sc_dmat, 303 EPIC_DSRX(sc, i)->ds_dmamap); 304 } 305 fail_4: 306 for (i = 0; i < EPIC_NTXDESC; i++) { 307 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) 308 bus_dmamap_destroy(sc->sc_dmat, 309 EPIC_DSTX(sc, i)->ds_dmamap); 310 } 311 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 312 fail_3: 313 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 314 fail_2: 315 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 316 sizeof(struct epic_control_data)); 317 fail_1: 318 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 319 fail_0: 320 return; 321 } 322 323 /* 324 * Start packet transmission on the interface. 325 * [ifnet interface function] 326 */ 327 void 328 epic_start(struct ifnet *ifp) 329 { 330 struct epic_softc *sc = ifp->if_softc; 331 struct mbuf *m0, *m; 332 struct epic_txdesc *txd; 333 struct epic_descsoft *ds; 334 struct epic_fraglist *fr; 335 bus_dmamap_t dmamap; 336 int error, firsttx, nexttx, opending, seg; 337 u_int len; 338 339 /* 340 * Remember the previous txpending and the first transmit 341 * descriptor we use. 342 */ 343 opending = sc->sc_txpending; 344 firsttx = EPIC_NEXTTX(sc->sc_txlast); 345 346 /* 347 * Loop through the send queue, setting up transmit descriptors 348 * until we drain the queue, or use up all available transmit 349 * descriptors. 350 */ 351 while (sc->sc_txpending < EPIC_NTXDESC) { 352 /* 353 * Grab a packet off the queue. 354 */ 355 IFQ_POLL(&ifp->if_snd, m0); 356 if (m0 == NULL) 357 break; 358 m = NULL; 359 360 /* 361 * Get the last and next available transmit descriptor. 362 */ 363 nexttx = EPIC_NEXTTX(sc->sc_txlast); 364 txd = EPIC_CDTX(sc, nexttx); 365 fr = EPIC_CDFL(sc, nexttx); 366 ds = EPIC_DSTX(sc, nexttx); 367 dmamap = ds->ds_dmamap; 368 369 /* 370 * Load the DMA map. If this fails, the packet either 371 * didn't fit in the alloted number of frags, or we were 372 * short on resources. In this case, we'll copy and try 373 * again. 374 */ 375 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 376 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 377 (m0->m_pkthdr.len < ETHER_PAD_LEN && 378 dmamap-> dm_nsegs == EPIC_NFRAGS)) { 379 if (error == 0) 380 bus_dmamap_unload(sc->sc_dmat, dmamap); 381 382 MGETHDR(m, M_DONTWAIT, MT_DATA); 383 if (m == NULL) 384 break; 385 if (m0->m_pkthdr.len > MHLEN) { 386 MCLGET(m, M_DONTWAIT); 387 if ((m->m_flags & M_EXT) == 0) { 388 m_freem(m); 389 break; 390 } 391 } 392 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 393 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 394 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 395 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 396 if (error) 397 break; 398 } 399 IFQ_DEQUEUE(&ifp->if_snd, m0); 400 if (m != NULL) { 401 m_freem(m0); 402 m0 = m; 403 } 404 405 /* Initialize the fraglist. */ 406 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 407 fr->ef_frags[seg].ef_addr = 408 dmamap->dm_segs[seg].ds_addr; 409 fr->ef_frags[seg].ef_length = 410 dmamap->dm_segs[seg].ds_len; 411 } 412 len = m0->m_pkthdr.len; 413 if (len < ETHER_PAD_LEN) { 414 fr->ef_frags[seg].ef_addr = sc->sc_nulldma; 415 fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len; 416 len = ETHER_PAD_LEN; 417 seg++; 418 } 419 fr->ef_nfrags = seg; 420 421 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 422 423 /* Sync the DMA map. */ 424 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 425 BUS_DMASYNC_PREWRITE); 426 427 /* 428 * Store a pointer to the packet so we can free it later. 429 */ 430 ds->ds_mbuf = m0; 431 432 /* 433 * Fill in the transmit descriptor. 434 */ 435 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; 436 437 /* 438 * If this is the first descriptor we're enqueueing, 439 * don't give it to the EPIC yet. That could cause 440 * a race condition. We'll do it below. 441 */ 442 if (nexttx == firsttx) 443 txd->et_txstatus = TXSTAT_TXLENGTH(len); 444 else 445 txd->et_txstatus = 446 TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER; 447 448 EPIC_CDTXSYNC(sc, nexttx, 449 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 450 451 /* Advance the tx pointer. */ 452 sc->sc_txpending++; 453 sc->sc_txlast = nexttx; 454 455 #if NBPFILTER > 0 456 /* 457 * Pass the packet to any BPF listeners. 458 */ 459 if (ifp->if_bpf) 460 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 461 #endif 462 } 463 464 if (sc->sc_txpending == EPIC_NTXDESC) { 465 /* No more slots left; notify upper layer. */ 466 ifp->if_flags |= IFF_OACTIVE; 467 } 468 469 if (sc->sc_txpending != opending) { 470 /* 471 * We enqueued packets. If the transmitter was idle, 472 * reset the txdirty pointer. 473 */ 474 if (opending == 0) 475 sc->sc_txdirty = firsttx; 476 477 /* 478 * Cause a transmit interrupt to happen on the 479 * last packet we enqueued. 480 */ 481 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; 482 EPIC_CDTXSYNC(sc, sc->sc_txlast, 483 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 484 485 /* 486 * The entire packet chain is set up. Give the 487 * first descriptor to the EPIC now. 488 */ 489 EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER; 490 EPIC_CDTXSYNC(sc, firsttx, 491 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 492 493 /* Start the transmitter. */ 494 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 495 COMMAND_TXQUEUED); 496 497 /* Set a watchdog timer in case the chip flakes out. */ 498 ifp->if_timer = 5; 499 } 500 } 501 502 /* 503 * Watchdog timer handler. 504 * [ifnet interface function] 505 */ 506 void 507 epic_watchdog(struct ifnet *ifp) 508 { 509 struct epic_softc *sc = ifp->if_softc; 510 511 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 512 ifp->if_oerrors++; 513 514 (void) epic_init(ifp); 515 } 516 517 /* 518 * Handle control requests from the operator. 519 * [ifnet interface function] 520 */ 521 int 522 epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 523 { 524 struct epic_softc *sc = ifp->if_softc; 525 struct ifaddr *ifa = (struct ifaddr *)data; 526 struct ifreq *ifr = (struct ifreq *)data; 527 int s, error = 0; 528 529 s = splnet(); 530 531 switch (cmd) { 532 case SIOCSIFADDR: 533 ifp->if_flags |= IFF_UP; 534 535 switch (ifa->ifa_addr->sa_family) { 536 case AF_INET: 537 epic_init(ifp); 538 arp_ifinit(&sc->sc_arpcom, ifa); 539 break; 540 default: 541 epic_init(ifp); 542 break; 543 } 544 break; 545 546 case SIOCSIFFLAGS: 547 /* 548 * If interface is marked up and not running, then start it. 549 * If it is marked down and running, stop it. 550 * XXX If it's up then re-initialize it. This is so flags 551 * such as IFF_PROMISC are handled. 552 */ 553 if (ifp->if_flags & IFF_UP) 554 epic_init(ifp); 555 else if (ifp->if_flags & IFF_RUNNING) 556 epic_stop(ifp, 1); 557 break; 558 559 case SIOCSIFMEDIA: 560 case SIOCGIFMEDIA: 561 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 562 break; 563 564 default: 565 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 566 } 567 568 if (error == ENETRESET) { 569 if (ifp->if_flags & IFF_RUNNING) { 570 mii_pollstat(&sc->sc_mii); 571 epic_set_mchash(sc); 572 } 573 error = 0; 574 } 575 576 splx(s); 577 return (error); 578 } 579 580 /* 581 * Interrupt handler. 582 */ 583 int 584 epic_intr(void *arg) 585 { 586 struct epic_softc *sc = arg; 587 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 588 struct epic_rxdesc *rxd; 589 struct epic_txdesc *txd; 590 struct epic_descsoft *ds; 591 struct mbuf *m; 592 u_int32_t intstat, rxstatus, txstatus; 593 int i, claimed = 0; 594 u_int len; 595 596 top: 597 /* 598 * Get the interrupt status from the EPIC. 599 */ 600 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); 601 if ((intstat & INTSTAT_INT_ACTV) == 0) 602 return (claimed); 603 604 claimed = 1; 605 606 /* 607 * Acknowledge the interrupt. 608 */ 609 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, 610 intstat & INTMASK); 611 612 /* 613 * Check for receive interrupts. 614 */ 615 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { 616 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { 617 rxd = EPIC_CDRX(sc, i); 618 ds = EPIC_DSRX(sc, i); 619 620 EPIC_CDRXSYNC(sc, i, 621 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 622 623 rxstatus = rxd->er_rxstatus; 624 if (rxstatus & ER_RXSTAT_OWNER) { 625 /* 626 * We have processed all of the 627 * receive buffers. 628 */ 629 break; 630 } 631 632 /* 633 * Make sure the packet arrived intact. If an error 634 * occurred, update stats and reset the descriptor. 635 * The buffer will be reused the next time the 636 * descriptor comes up in the ring. 637 */ 638 if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) { 639 if (rxstatus & ER_RXSTAT_CRCERROR) 640 printf("%s: CRC error\n", 641 sc->sc_dev.dv_xname); 642 if (rxstatus & ER_RXSTAT_ALIGNERROR) 643 printf("%s: alignment error\n", 644 sc->sc_dev.dv_xname); 645 ifp->if_ierrors++; 646 EPIC_INIT_RXDESC(sc, i); 647 continue; 648 } 649 650 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 651 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 652 653 /* 654 * The EPIC includes the CRC with every packet; 655 * trim it. 656 */ 657 len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN; 658 659 if (len < sizeof(struct ether_header)) { 660 /* 661 * Runt packet; drop it now. 662 */ 663 ifp->if_ierrors++; 664 EPIC_INIT_RXDESC(sc, i); 665 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 666 ds->ds_dmamap->dm_mapsize, 667 BUS_DMASYNC_PREREAD); 668 continue; 669 } 670 671 /* 672 * If the packet is small enough to fit in a 673 * single header mbuf, allocate one and copy 674 * the data into it. This greatly reduces 675 * memory consumption when we receive lots 676 * of small packets. 677 * 678 * Otherwise, we add a new buffer to the receive 679 * chain. If this fails, we drop the packet and 680 * recycle the old buffer. 681 */ 682 if (epic_copy_small != 0 && len <= MHLEN) { 683 MGETHDR(m, M_DONTWAIT, MT_DATA); 684 if (m == NULL) 685 goto dropit; 686 memcpy(mtod(m, caddr_t), 687 mtod(ds->ds_mbuf, caddr_t), len); 688 EPIC_INIT_RXDESC(sc, i); 689 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 690 ds->ds_dmamap->dm_mapsize, 691 BUS_DMASYNC_PREREAD); 692 } else { 693 m = ds->ds_mbuf; 694 if (epic_add_rxbuf(sc, i) != 0) { 695 dropit: 696 ifp->if_ierrors++; 697 EPIC_INIT_RXDESC(sc, i); 698 bus_dmamap_sync(sc->sc_dmat, 699 ds->ds_dmamap, 0, 700 ds->ds_dmamap->dm_mapsize, 701 BUS_DMASYNC_PREREAD); 702 continue; 703 } 704 } 705 706 m->m_pkthdr.rcvif = ifp; 707 m->m_pkthdr.len = m->m_len = len; 708 709 #if NBPFILTER > 0 710 /* 711 * Pass this up to any BPF listeners, but only 712 * pass it up the stack if its for us. 713 */ 714 if (ifp->if_bpf) 715 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 716 #endif 717 718 /* Pass it on. */ 719 ether_input_mbuf(ifp, m); 720 ifp->if_ipackets++; 721 } 722 723 /* Update the receive pointer. */ 724 sc->sc_rxptr = i; 725 726 /* 727 * Check for receive queue underflow. 728 */ 729 if (intstat & INTSTAT_RQE) { 730 printf("%s: receiver queue empty\n", 731 sc->sc_dev.dv_xname); 732 /* 733 * Ring is already built; just restart the 734 * receiver. 735 */ 736 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, 737 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 738 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 739 COMMAND_RXQUEUED | COMMAND_START_RX); 740 } 741 } 742 743 /* 744 * Check for transmission complete interrupts. 745 */ 746 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { 747 ifp->if_flags &= ~IFF_OACTIVE; 748 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 749 i = EPIC_NEXTTX(i), sc->sc_txpending--) { 750 txd = EPIC_CDTX(sc, i); 751 ds = EPIC_DSTX(sc, i); 752 753 EPIC_CDTXSYNC(sc, i, 754 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 755 756 txstatus = txd->et_txstatus; 757 if (txstatus & ET_TXSTAT_OWNER) 758 break; 759 760 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 761 762 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 763 0, ds->ds_dmamap->dm_mapsize, 764 BUS_DMASYNC_POSTWRITE); 765 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 766 m_freem(ds->ds_mbuf); 767 ds->ds_mbuf = NULL; 768 769 /* 770 * Check for errors and collisions. 771 */ 772 if ((txstatus & ET_TXSTAT_PACKETTX) == 0) 773 ifp->if_oerrors++; 774 else 775 ifp->if_opackets++; 776 ifp->if_collisions += 777 TXSTAT_COLLISIONS(txstatus); 778 if (txstatus & ET_TXSTAT_CARSENSELOST) 779 printf("%s: lost carrier\n", 780 sc->sc_dev.dv_xname); 781 } 782 783 /* Update the dirty transmit buffer pointer. */ 784 sc->sc_txdirty = i; 785 786 /* 787 * Cancel the watchdog timer if there are no pending 788 * transmissions. 789 */ 790 if (sc->sc_txpending == 0) 791 ifp->if_timer = 0; 792 793 /* 794 * Kick the transmitter after a DMA underrun. 795 */ 796 if (intstat & INTSTAT_TXU) { 797 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname); 798 bus_space_write_4(sc->sc_st, sc->sc_sh, 799 EPIC_COMMAND, COMMAND_TXUGO); 800 if (sc->sc_txpending) 801 bus_space_write_4(sc->sc_st, sc->sc_sh, 802 EPIC_COMMAND, COMMAND_TXQUEUED); 803 } 804 805 /* 806 * Try to get more packets going. 807 */ 808 epic_start(ifp); 809 } 810 811 /* 812 * Check for fatal interrupts. 813 */ 814 if (intstat & INTSTAT_FATAL_INT) { 815 if (intstat & INTSTAT_PTA) 816 printf("%s: PCI target abort error\n", 817 sc->sc_dev.dv_xname); 818 else if (intstat & INTSTAT_PMA) 819 printf("%s: PCI master abort error\n", 820 sc->sc_dev.dv_xname); 821 else if (intstat & INTSTAT_APE) 822 printf("%s: PCI address parity error\n", 823 sc->sc_dev.dv_xname); 824 else if (intstat & INTSTAT_DPE) 825 printf("%s: PCI data parity error\n", 826 sc->sc_dev.dv_xname); 827 else 828 printf("%s: unknown fatal error\n", 829 sc->sc_dev.dv_xname); 830 (void) epic_init(ifp); 831 } 832 833 /* 834 * Check for more interrupts. 835 */ 836 goto top; 837 } 838 839 /* 840 * One second timer, used to tick the MII. 841 */ 842 void 843 epic_tick(void *arg) 844 { 845 struct epic_softc *sc = arg; 846 int s; 847 848 s = splnet(); 849 mii_tick(&sc->sc_mii); 850 splx(s); 851 852 timeout_add_sec(&sc->sc_mii_timeout, 1); 853 } 854 855 /* 856 * Fixup the clock source on the EPIC. 857 */ 858 void 859 epic_fixup_clock_source(struct epic_softc *sc) 860 { 861 int i; 862 863 /* 864 * According to SMC Application Note 7-15, the EPIC's clock 865 * source is incorrect following a reset. This manifests itself 866 * as failure to recognize when host software has written to 867 * a register on the EPIC. The appnote recommends issuing at 868 * least 16 consecutive writes to the CLOCK TEST bit to correctly 869 * configure the clock source. 870 */ 871 for (i = 0; i < 16; i++) 872 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, 873 TEST_CLOCKTEST); 874 } 875 876 /* 877 * Perform a soft reset on the EPIC. 878 */ 879 void 880 epic_reset(struct epic_softc *sc) 881 { 882 883 epic_fixup_clock_source(sc); 884 885 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); 886 delay(100); 887 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); 888 delay(100); 889 890 epic_fixup_clock_source(sc); 891 } 892 893 /* 894 * Initialize the interface. Must be called at splnet(). 895 */ 896 int 897 epic_init(struct ifnet *ifp) 898 { 899 struct epic_softc *sc = ifp->if_softc; 900 bus_space_tag_t st = sc->sc_st; 901 bus_space_handle_t sh = sc->sc_sh; 902 struct epic_txdesc *txd; 903 struct epic_descsoft *ds; 904 u_int32_t genctl, reg0; 905 int i, error = 0; 906 907 /* 908 * Cancel any pending I/O. 909 */ 910 epic_stop(ifp, 0); 911 912 /* 913 * Reset the EPIC to a known state. 914 */ 915 epic_reset(sc); 916 917 /* 918 * Magical mystery initialization. 919 */ 920 bus_space_write_4(st, sh, EPIC_TXTEST, 0); 921 922 /* 923 * Initialize the EPIC genctl register: 924 * 925 * - 64 byte receive FIFO threshold 926 * - automatic advance to next receive frame 927 */ 928 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; 929 #if BYTE_ORDER == BIG_ENDIAN 930 genctl |= GENCTL_BIG_ENDIAN; 931 #endif 932 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 933 934 /* 935 * Reset the MII bus and PHY. 936 */ 937 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); 938 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); 939 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); 940 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); 941 delay(100); 942 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 943 delay(1000); 944 bus_space_write_4(st, sh, EPIC_NVCTL, reg0); 945 946 /* 947 * Initialize Ethernet address. 948 */ 949 reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0]; 950 bus_space_write_4(st, sh, EPIC_LAN0, reg0); 951 reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2]; 952 bus_space_write_4(st, sh, EPIC_LAN1, reg0); 953 reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4]; 954 bus_space_write_4(st, sh, EPIC_LAN2, reg0); 955 956 /* 957 * Initialize receive control. Remember the external buffer 958 * size setting. 959 */ 960 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & 961 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); 962 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); 963 if (ifp->if_flags & IFF_PROMISC) 964 reg0 |= RXCON_PROMISCMODE; 965 bus_space_write_4(st, sh, EPIC_RXCON, reg0); 966 967 /* Set the current media. */ 968 epic_mediachange(ifp); 969 970 /* Set up the multicast hash table. */ 971 epic_set_mchash(sc); 972 973 /* 974 * Initialize the transmit descriptor ring. txlast is initialized 975 * to the end of the list so that it will wrap around to the first 976 * descriptor when the first packet is transmitted. 977 */ 978 for (i = 0; i < EPIC_NTXDESC; i++) { 979 txd = EPIC_CDTX(sc, i); 980 memset(txd, 0, sizeof(struct epic_txdesc)); 981 txd->et_bufaddr = EPIC_CDFLADDR(sc, i); 982 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); 983 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 984 } 985 sc->sc_txpending = 0; 986 sc->sc_txdirty = 0; 987 sc->sc_txlast = EPIC_NTXDESC - 1; 988 989 /* 990 * Initialize the receive descriptor ring. 991 */ 992 for (i = 0; i < EPIC_NRXDESC; i++) { 993 ds = EPIC_DSRX(sc, i); 994 if (ds->ds_mbuf == NULL) { 995 if ((error = epic_add_rxbuf(sc, i)) != 0) { 996 printf("%s: unable to allocate or map rx " 997 "buffer %d error = %d\n", 998 sc->sc_dev.dv_xname, i, error); 999 /* 1000 * XXX Should attempt to run with fewer receive 1001 * XXX buffers instead of just failing. 1002 */ 1003 epic_rxdrain(sc); 1004 goto out; 1005 } 1006 } else 1007 EPIC_INIT_RXDESC(sc, i); 1008 } 1009 sc->sc_rxptr = 0; 1010 1011 /* 1012 * Initialize the interrupt mask and enable interrupts. 1013 */ 1014 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); 1015 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); 1016 1017 /* 1018 * Give the transmit and receive rings to the EPIC. 1019 */ 1020 bus_space_write_4(st, sh, EPIC_PTCDAR, 1021 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); 1022 bus_space_write_4(st, sh, EPIC_PRCDAR, 1023 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 1024 1025 /* 1026 * Set the EPIC in motion. 1027 */ 1028 bus_space_write_4(st, sh, EPIC_COMMAND, 1029 COMMAND_RXQUEUED | COMMAND_START_RX); 1030 1031 /* 1032 * ...all done! 1033 */ 1034 ifp->if_flags |= IFF_RUNNING; 1035 ifp->if_flags &= ~IFF_OACTIVE; 1036 1037 /* 1038 * Start the one second clock. 1039 */ 1040 timeout_add_sec(&sc->sc_mii_timeout, 1); 1041 1042 /* 1043 * Attempt to start output on the interface. 1044 */ 1045 epic_start(ifp); 1046 1047 out: 1048 if (error) 1049 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1050 return (error); 1051 } 1052 1053 /* 1054 * Drain the receive queue. 1055 */ 1056 void 1057 epic_rxdrain(struct epic_softc *sc) 1058 { 1059 struct epic_descsoft *ds; 1060 int i; 1061 1062 for (i = 0; i < EPIC_NRXDESC; i++) { 1063 ds = EPIC_DSRX(sc, i); 1064 if (ds->ds_mbuf != NULL) { 1065 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1066 m_freem(ds->ds_mbuf); 1067 ds->ds_mbuf = NULL; 1068 } 1069 } 1070 } 1071 1072 /* 1073 * Stop transmission on the interface. 1074 */ 1075 void 1076 epic_stop(struct ifnet *ifp, int disable) 1077 { 1078 struct epic_softc *sc = ifp->if_softc; 1079 bus_space_tag_t st = sc->sc_st; 1080 bus_space_handle_t sh = sc->sc_sh; 1081 struct epic_descsoft *ds; 1082 u_int32_t reg; 1083 int i; 1084 1085 /* 1086 * Stop the one second clock. 1087 */ 1088 timeout_del(&sc->sc_mii_timeout); 1089 1090 /* 1091 * Mark the interface down and cancel the watchdog timer. 1092 */ 1093 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1094 ifp->if_timer = 0; 1095 1096 /* Down the MII. */ 1097 mii_down(&sc->sc_mii); 1098 1099 /* Paranoia... */ 1100 epic_fixup_clock_source(sc); 1101 1102 /* 1103 * Disable interrupts. 1104 */ 1105 reg = bus_space_read_4(st, sh, EPIC_GENCTL); 1106 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); 1107 bus_space_write_4(st, sh, EPIC_INTMASK, 0); 1108 1109 /* 1110 * Stop the DMA engine and take the receiver off-line. 1111 */ 1112 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | 1113 COMMAND_STOP_TDMA | COMMAND_STOP_RX); 1114 1115 /* 1116 * Release any queued transmit buffers. 1117 */ 1118 for (i = 0; i < EPIC_NTXDESC; i++) { 1119 ds = EPIC_DSTX(sc, i); 1120 if (ds->ds_mbuf != NULL) { 1121 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1122 m_freem(ds->ds_mbuf); 1123 ds->ds_mbuf = NULL; 1124 } 1125 } 1126 1127 if (disable) 1128 epic_rxdrain(sc); 1129 } 1130 1131 /* 1132 * Read the EPIC Serial EEPROM. 1133 */ 1134 void 1135 epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data) 1136 { 1137 bus_space_tag_t st = sc->sc_st; 1138 bus_space_handle_t sh = sc->sc_sh; 1139 u_int16_t reg; 1140 int i, x; 1141 1142 #define EEPROM_WAIT_READY(st, sh) \ 1143 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ 1144 /* nothing */ 1145 1146 /* 1147 * Enable the EEPROM. 1148 */ 1149 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1150 EEPROM_WAIT_READY(st, sh); 1151 1152 for (i = 0; i < wordcnt; i++) { 1153 /* Send CHIP SELECT for one clock tick. */ 1154 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); 1155 EEPROM_WAIT_READY(st, sh); 1156 1157 /* Shift in the READ opcode. */ 1158 for (x = 3; x > 0; x--) { 1159 reg = EECTL_ENABLE|EECTL_EECS; 1160 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) 1161 reg |= EECTL_EEDI; 1162 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1163 EEPROM_WAIT_READY(st, sh); 1164 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1165 EEPROM_WAIT_READY(st, sh); 1166 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1167 EEPROM_WAIT_READY(st, sh); 1168 } 1169 1170 /* Shift in address. */ 1171 for (x = 6; x > 0; x--) { 1172 reg = EECTL_ENABLE|EECTL_EECS; 1173 if ((word + i) & (1 << (x - 1))) 1174 reg |= EECTL_EEDI; 1175 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1176 EEPROM_WAIT_READY(st, sh); 1177 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1178 EEPROM_WAIT_READY(st, sh); 1179 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1180 EEPROM_WAIT_READY(st, sh); 1181 } 1182 1183 /* Shift out data. */ 1184 reg = EECTL_ENABLE|EECTL_EECS; 1185 data[i] = 0; 1186 for (x = 16; x > 0; x--) { 1187 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1188 EEPROM_WAIT_READY(st, sh); 1189 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) 1190 data[i] |= (1 << (x - 1)); 1191 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1192 EEPROM_WAIT_READY(st, sh); 1193 } 1194 1195 /* Clear CHIP SELECT. */ 1196 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1197 EEPROM_WAIT_READY(st, sh); 1198 } 1199 1200 /* 1201 * Disable the EEPROM. 1202 */ 1203 bus_space_write_4(st, sh, EPIC_EECTL, 0); 1204 1205 #undef EEPROM_WAIT_READY 1206 } 1207 1208 /* 1209 * Add a receive buffer to the indicated descriptor. 1210 */ 1211 int 1212 epic_add_rxbuf(struct epic_softc *sc, int idx) 1213 { 1214 struct epic_descsoft *ds = EPIC_DSRX(sc, idx); 1215 struct mbuf *m; 1216 int error; 1217 1218 MGETHDR(m, M_DONTWAIT, MT_DATA); 1219 if (m == NULL) 1220 return (ENOBUFS); 1221 1222 MCLGET(m, M_DONTWAIT); 1223 if ((m->m_flags & M_EXT) == 0) { 1224 m_freem(m); 1225 return (ENOBUFS); 1226 } 1227 1228 if (ds->ds_mbuf != NULL) 1229 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1230 1231 ds->ds_mbuf = m; 1232 1233 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1234 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1235 BUS_DMA_READ|BUS_DMA_NOWAIT); 1236 if (error) { 1237 printf("%s: can't load rx DMA map %d, error = %d\n", 1238 sc->sc_dev.dv_xname, idx, error); 1239 panic("epic_add_rxbuf"); /* XXX */ 1240 } 1241 1242 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1243 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1244 1245 EPIC_INIT_RXDESC(sc, idx); 1246 1247 return (0); 1248 } 1249 1250 /* 1251 * Set the EPIC multicast hash table. 1252 * 1253 * NOTE: We rely on a recently-updated mii_media_active here! 1254 */ 1255 void 1256 epic_set_mchash(struct epic_softc *sc) 1257 { 1258 struct arpcom *ac = &sc->sc_arpcom; 1259 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1260 struct ether_multi *enm; 1261 struct ether_multistep step; 1262 u_int32_t hash, mchash[4]; 1263 1264 /* 1265 * Set up the multicast address filter by passing all multicast 1266 * addresses through a CRC generator, and then using the low-order 1267 * 6 bits as an index into the 64 bit multicast hash table (only 1268 * the lower 16 bits of each 32 bit multicast hash register are 1269 * valid). The high order bits select the register, while the 1270 * rest of the bits select the bit within the register. 1271 */ 1272 1273 if (ifp->if_flags & IFF_PROMISC) 1274 goto allmulti; 1275 1276 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { 1277 /* XXX hardware bug in 10Mbps mode. */ 1278 goto allmulti; 1279 } 1280 1281 if (ac->ac_multirangecnt > 0) 1282 goto allmulti; 1283 1284 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; 1285 1286 ETHER_FIRST_MULTI(step, ac, enm); 1287 while (enm != NULL) { 1288 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1289 hash >>= 26; 1290 1291 /* Set the corresponding bit in the hash table. */ 1292 mchash[hash >> 4] |= 1 << (hash & 0xf); 1293 1294 ETHER_NEXT_MULTI(step, enm); 1295 } 1296 1297 ifp->if_flags &= ~IFF_ALLMULTI; 1298 goto sethash; 1299 1300 allmulti: 1301 ifp->if_flags |= IFF_ALLMULTI; 1302 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; 1303 1304 sethash: 1305 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); 1306 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); 1307 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); 1308 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); 1309 } 1310 1311 /* 1312 * Wait for the MII to become ready. 1313 */ 1314 int 1315 epic_mii_wait(struct epic_softc *sc, u_int32_t rw) 1316 { 1317 int i; 1318 1319 for (i = 0; i < 50; i++) { 1320 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) 1321 == 0) 1322 break; 1323 delay(2); 1324 } 1325 if (i == 50) { 1326 printf("%s: MII timed out\n", sc->sc_dev.dv_xname); 1327 return (1); 1328 } 1329 1330 return (0); 1331 } 1332 1333 /* 1334 * Read from the MII. 1335 */ 1336 int 1337 epic_mii_read(struct device *self, int phy, int reg) 1338 { 1339 struct epic_softc *sc = (struct epic_softc *)self; 1340 1341 if (epic_mii_wait(sc, MMCTL_WRITE)) 1342 return (0); 1343 1344 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1345 MMCTL_ARG(phy, reg, MMCTL_READ)); 1346 1347 if (epic_mii_wait(sc, MMCTL_READ)) 1348 return (0); 1349 1350 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & 1351 MMDATA_MASK); 1352 } 1353 1354 /* 1355 * Write to the MII. 1356 */ 1357 void 1358 epic_mii_write(struct device *self, int phy, int reg, int val) 1359 { 1360 struct epic_softc *sc = (struct epic_softc *)self; 1361 1362 if (epic_mii_wait(sc, MMCTL_WRITE)) 1363 return; 1364 1365 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); 1366 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1367 MMCTL_ARG(phy, reg, MMCTL_WRITE)); 1368 } 1369 1370 /* 1371 * Callback from PHY when media changes. 1372 */ 1373 void 1374 epic_statchg(struct device *self) 1375 { 1376 struct epic_softc *sc = (struct epic_softc *)self; 1377 u_int32_t txcon, miicfg; 1378 1379 /* 1380 * Update loopback bits in TXCON to reflect duplex mode. 1381 */ 1382 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); 1383 if (sc->sc_mii.mii_media_active & IFM_FDX) 1384 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1385 else 1386 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1387 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); 1388 1389 /* On some cards we need manualy set fullduplex led */ 1390 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) { 1391 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1392 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) 1393 miicfg |= MIICFG_ENABLE; 1394 else 1395 miicfg &= ~MIICFG_ENABLE; 1396 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1397 } 1398 1399 /* 1400 * There is a multicast filter bug in 10Mbps mode. Kick the 1401 * multicast filter in case the speed changed. 1402 */ 1403 epic_set_mchash(sc); 1404 } 1405 1406 /* 1407 * Callback from ifmedia to request current media status. 1408 */ 1409 void 1410 epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1411 { 1412 struct epic_softc *sc = ifp->if_softc; 1413 1414 mii_pollstat(&sc->sc_mii); 1415 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1416 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1417 } 1418 1419 /* 1420 * Callback from ifmedia to request new media setting. 1421 */ 1422 int 1423 epic_mediachange(struct ifnet *ifp) 1424 { 1425 struct epic_softc *sc = ifp->if_softc; 1426 struct mii_data *mii = &sc->sc_mii; 1427 struct ifmedia *ifm = &mii->mii_media; 1428 int media = ifm->ifm_cur->ifm_media; 1429 u_int32_t miicfg; 1430 struct mii_softc *miisc; 1431 int cfg; 1432 1433 if (!(ifp->if_flags & IFF_UP)) 1434 return (0); 1435 1436 if (IFM_INST(media) != sc->sc_serinst) { 1437 /* If we're not selecting serial interface, select MII mode */ 1438 #ifdef EPICMEDIADEBUG 1439 printf("%s: parallel mode\n", ifp->if_xname); 1440 #endif 1441 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1442 miicfg &= ~MIICFG_SERMODEENA; 1443 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1444 } 1445 1446 mii_mediachg(mii); 1447 1448 if (IFM_INST(media) == sc->sc_serinst) { 1449 /* select serial interface */ 1450 #ifdef EPICMEDIADEBUG 1451 printf("%s: serial mode\n", ifp->if_xname); 1452 #endif 1453 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1454 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE); 1455 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1456 1457 /* There is no driver to fill this */ 1458 mii->mii_media_active = media; 1459 mii->mii_media_status = 0; 1460 1461 epic_statchg(&sc->sc_dev); 1462 return (0); 1463 } 1464 1465 /* Lookup selected PHY */ 1466 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1467 miisc = LIST_NEXT(miisc, mii_list)) { 1468 if (IFM_INST(media) == miisc->mii_inst) 1469 break; 1470 } 1471 if (!miisc) { 1472 printf("epic_mediachange: can't happen\n"); /* ??? panic */ 1473 return (0); 1474 } 1475 #ifdef EPICMEDIADEBUG 1476 printf("%s: using phy %s\n", ifp->if_xname, 1477 miisc->mii_dev.dv_xname); 1478 #endif 1479 1480 if (miisc->mii_flags & MIIF_HAVEFIBER) { 1481 /* XXX XXX assume it's a Level1 - should check */ 1482 1483 /* We have to powerup fiber transceivers */ 1484 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG); 1485 if (IFM_SUBTYPE(media) == IFM_100_FX) { 1486 #ifdef EPICMEDIADEBUG 1487 printf("%s: power up fiber\n", ifp->if_xname); 1488 #endif 1489 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0); 1490 } else { 1491 #ifdef EPICMEDIADEBUG 1492 printf("%s: power down fiber\n", ifp->if_xname); 1493 #endif 1494 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 1495 } 1496 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg); 1497 } 1498 1499 return (0); 1500 } 1501