1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_ste.c,v 1.14.2.9 2003/02/05 22:03:57 mbr Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/socket.h> 42 #include <sys/serialize.h> 43 #include <sys/bus.h> 44 #include <sys/rman.h> 45 #include <sys/thread2.h> 46 #include <sys/interrupt.h> 47 48 #include <net/if.h> 49 #include <net/ifq_var.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/vlan/if_vlan_var.h> 55 56 #include <net/bpf.h> 57 58 #include <vm/vm.h> /* for vtophys */ 59 #include <vm/pmap.h> /* for vtophys */ 60 61 #include "../mii_layer/mii.h" 62 #include "../mii_layer/miivar.h" 63 64 #include "pcidevs.h" 65 #include <bus/pci/pcireg.h> 66 #include <bus/pci/pcivar.h> 67 68 /* "controller miibus0" required. See GENERIC if you get errors here. */ 69 #include "miibus_if.h" 70 71 #define STE_USEIOSPACE 72 73 #include "if_stereg.h" 74 75 /* 76 * Various supported device vendors/types and their names. 77 */ 78 static struct ste_type ste_devs[] = { 79 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201, 80 "Sundance ST201 10/100BaseTX" }, 81 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201_0, 82 "Sundance ST201 10/100BaseTX" }, 83 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DL1002, 84 "D-Link DFE-550TX 10/100BaseTX" }, 85 { 0, 0, NULL } 86 }; 87 88 static int ste_probe (device_t); 89 static int ste_attach (device_t); 90 static int ste_detach (device_t); 91 static void ste_init (void *); 92 static void ste_intr (void *); 93 static void ste_rxeof (struct ste_softc *); 94 static void ste_txeoc (struct ste_softc *); 95 static void ste_txeof (struct ste_softc *); 96 static void ste_stats_update (void *); 97 static void ste_stop (struct ste_softc *); 98 static void ste_reset (struct ste_softc *); 99 static int ste_ioctl (struct ifnet *, u_long, caddr_t, 100 struct ucred *); 101 static int ste_encap (struct ste_softc *, struct ste_chain *, 102 struct mbuf *); 103 static void ste_start (struct ifnet *, struct ifaltq_subque *); 104 static void ste_watchdog (struct ifnet *); 105 static void ste_shutdown (device_t); 106 static int ste_newbuf (struct ste_softc *, 107 struct ste_chain_onefrag *, 108 struct mbuf *); 109 static int ste_ifmedia_upd (struct ifnet *); 110 static void ste_ifmedia_sts (struct ifnet *, struct ifmediareq *); 111 112 static void ste_mii_sync (struct ste_softc *); 113 static void ste_mii_send (struct ste_softc *, u_int32_t, int); 114 static int ste_mii_readreg (struct ste_softc *, 115 struct ste_mii_frame *); 116 static int ste_mii_writereg (struct ste_softc *, 117 struct ste_mii_frame *); 118 static int ste_miibus_readreg (device_t, int, int); 119 static int ste_miibus_writereg (device_t, int, int, int); 120 static void ste_miibus_statchg (device_t); 121 122 static int ste_eeprom_wait (struct ste_softc *); 123 static int ste_read_eeprom (struct ste_softc *, caddr_t, int, 124 int, int); 125 static void ste_wait (struct ste_softc *); 126 static void ste_setmulti (struct ste_softc *); 127 static int ste_init_rx_list (struct ste_softc *); 128 static void ste_init_tx_list (struct ste_softc *); 129 130 #ifdef STE_USEIOSPACE 131 #define STE_RES SYS_RES_IOPORT 132 #define STE_RID STE_PCI_LOIO 133 #else 134 #define STE_RES SYS_RES_MEMORY 135 #define STE_RID STE_PCI_LOMEM 136 #endif 137 138 static device_method_t ste_methods[] = { 139 /* Device interface */ 140 DEVMETHOD(device_probe, ste_probe), 141 DEVMETHOD(device_attach, ste_attach), 142 DEVMETHOD(device_detach, ste_detach), 143 DEVMETHOD(device_shutdown, ste_shutdown), 144 145 /* bus interface */ 146 DEVMETHOD(bus_print_child, bus_generic_print_child), 147 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 148 149 /* MII interface */ 150 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 151 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 152 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 153 154 DEVMETHOD_END 155 }; 156 157 static driver_t ste_driver = { 158 "ste", 159 ste_methods, 160 sizeof(struct ste_softc) 161 }; 162 163 static devclass_t ste_devclass; 164 165 DECLARE_DUMMY_MODULE(if_ste); 166 DRIVER_MODULE(if_ste, pci, ste_driver, ste_devclass, NULL, NULL); 167 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, NULL, NULL); 168 169 #define STE_SETBIT4(sc, reg, x) \ 170 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 171 172 #define STE_CLRBIT4(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 174 175 #define STE_SETBIT2(sc, reg, x) \ 176 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x) 177 178 #define STE_CLRBIT2(sc, reg, x) \ 179 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x) 180 181 #define STE_SETBIT1(sc, reg, x) \ 182 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x) 183 184 #define STE_CLRBIT1(sc, reg, x) \ 185 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x) 186 187 188 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 189 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 190 191 /* 192 * Sync the PHYs by setting data bit and strobing the clock 32 times. 193 */ 194 static void 195 ste_mii_sync(struct ste_softc *sc) 196 { 197 int i; 198 199 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 200 201 for (i = 0; i < 32; i++) { 202 MII_SET(STE_PHYCTL_MCLK); 203 DELAY(1); 204 MII_CLR(STE_PHYCTL_MCLK); 205 DELAY(1); 206 } 207 208 return; 209 } 210 211 /* 212 * Clock a series of bits through the MII. 213 */ 214 static void 215 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt) 216 { 217 int i; 218 219 MII_CLR(STE_PHYCTL_MCLK); 220 221 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 222 if (bits & i) { 223 MII_SET(STE_PHYCTL_MDATA); 224 } else { 225 MII_CLR(STE_PHYCTL_MDATA); 226 } 227 DELAY(1); 228 MII_CLR(STE_PHYCTL_MCLK); 229 DELAY(1); 230 MII_SET(STE_PHYCTL_MCLK); 231 } 232 } 233 234 /* 235 * Read an PHY register through the MII. 236 */ 237 static int 238 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame) 239 { 240 int i, ack; 241 242 /* 243 * Set up frame for RX. 244 */ 245 frame->mii_stdelim = STE_MII_STARTDELIM; 246 frame->mii_opcode = STE_MII_READOP; 247 frame->mii_turnaround = 0; 248 frame->mii_data = 0; 249 250 CSR_WRITE_2(sc, STE_PHYCTL, 0); 251 /* 252 * Turn on data xmit. 253 */ 254 MII_SET(STE_PHYCTL_MDIR); 255 256 ste_mii_sync(sc); 257 258 /* 259 * Send command/address info. 260 */ 261 ste_mii_send(sc, frame->mii_stdelim, 2); 262 ste_mii_send(sc, frame->mii_opcode, 2); 263 ste_mii_send(sc, frame->mii_phyaddr, 5); 264 ste_mii_send(sc, frame->mii_regaddr, 5); 265 266 /* Turn off xmit. */ 267 MII_CLR(STE_PHYCTL_MDIR); 268 269 /* Idle bit */ 270 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 271 DELAY(1); 272 MII_SET(STE_PHYCTL_MCLK); 273 DELAY(1); 274 275 /* Check for ack */ 276 MII_CLR(STE_PHYCTL_MCLK); 277 DELAY(1); 278 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 279 MII_SET(STE_PHYCTL_MCLK); 280 DELAY(1); 281 282 /* 283 * Now try reading data bits. If the ack failed, we still 284 * need to clock through 16 cycles to keep the PHY(s) in sync. 285 */ 286 if (ack) { 287 for(i = 0; i < 16; i++) { 288 MII_CLR(STE_PHYCTL_MCLK); 289 DELAY(1); 290 MII_SET(STE_PHYCTL_MCLK); 291 DELAY(1); 292 } 293 goto fail; 294 } 295 296 for (i = 0x8000; i; i >>= 1) { 297 MII_CLR(STE_PHYCTL_MCLK); 298 DELAY(1); 299 if (!ack) { 300 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 301 frame->mii_data |= i; 302 DELAY(1); 303 } 304 MII_SET(STE_PHYCTL_MCLK); 305 DELAY(1); 306 } 307 308 fail: 309 310 MII_CLR(STE_PHYCTL_MCLK); 311 DELAY(1); 312 MII_SET(STE_PHYCTL_MCLK); 313 DELAY(1); 314 315 if (ack) 316 return(1); 317 return(0); 318 } 319 320 /* 321 * Write to a PHY register through the MII. 322 */ 323 static int 324 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame) 325 { 326 /* 327 * Set up frame for TX. 328 */ 329 330 frame->mii_stdelim = STE_MII_STARTDELIM; 331 frame->mii_opcode = STE_MII_WRITEOP; 332 frame->mii_turnaround = STE_MII_TURNAROUND; 333 334 /* 335 * Turn on data output. 336 */ 337 MII_SET(STE_PHYCTL_MDIR); 338 339 ste_mii_sync(sc); 340 341 ste_mii_send(sc, frame->mii_stdelim, 2); 342 ste_mii_send(sc, frame->mii_opcode, 2); 343 ste_mii_send(sc, frame->mii_phyaddr, 5); 344 ste_mii_send(sc, frame->mii_regaddr, 5); 345 ste_mii_send(sc, frame->mii_turnaround, 2); 346 ste_mii_send(sc, frame->mii_data, 16); 347 348 /* Idle bit. */ 349 MII_SET(STE_PHYCTL_MCLK); 350 DELAY(1); 351 MII_CLR(STE_PHYCTL_MCLK); 352 DELAY(1); 353 354 /* 355 * Turn off xmit. 356 */ 357 MII_CLR(STE_PHYCTL_MDIR); 358 359 return(0); 360 } 361 362 static int 363 ste_miibus_readreg(device_t dev, int phy, int reg) 364 { 365 struct ste_softc *sc; 366 struct ste_mii_frame frame; 367 368 sc = device_get_softc(dev); 369 370 if ( sc->ste_one_phy && phy != 0 ) 371 return (0); 372 373 bzero((char *)&frame, sizeof(frame)); 374 375 frame.mii_phyaddr = phy; 376 frame.mii_regaddr = reg; 377 ste_mii_readreg(sc, &frame); 378 379 return(frame.mii_data); 380 } 381 382 static int 383 ste_miibus_writereg(device_t dev, int phy, int reg, int data) 384 { 385 struct ste_softc *sc; 386 struct ste_mii_frame frame; 387 388 sc = device_get_softc(dev); 389 bzero((char *)&frame, sizeof(frame)); 390 391 frame.mii_phyaddr = phy; 392 frame.mii_regaddr = reg; 393 frame.mii_data = data; 394 395 ste_mii_writereg(sc, &frame); 396 397 return(0); 398 } 399 400 static void 401 ste_miibus_statchg(device_t dev) 402 { 403 struct ste_softc *sc; 404 struct mii_data *mii; 405 int i; 406 407 sc = device_get_softc(dev); 408 mii = device_get_softc(sc->ste_miibus); 409 410 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 411 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 412 } else { 413 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 414 } 415 416 STE_SETBIT4(sc, STE_ASICCTL,STE_ASICCTL_RX_RESET | 417 STE_ASICCTL_TX_RESET); 418 for (i = 0; i < STE_TIMEOUT; i++) { 419 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 420 break; 421 } 422 if (i == STE_TIMEOUT) 423 if_printf(&sc->arpcom.ac_if, "rx reset never completed\n"); 424 425 return; 426 } 427 428 static int 429 ste_ifmedia_upd(struct ifnet *ifp) 430 { 431 struct ste_softc *sc; 432 struct mii_data *mii; 433 434 sc = ifp->if_softc; 435 mii = device_get_softc(sc->ste_miibus); 436 sc->ste_link = 0; 437 if (mii->mii_instance) { 438 struct mii_softc *miisc; 439 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 440 miisc = LIST_NEXT(miisc, mii_list)) 441 mii_phy_reset(miisc); 442 } 443 mii_mediachg(mii); 444 445 return(0); 446 } 447 448 static void 449 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 450 { 451 struct ste_softc *sc; 452 struct mii_data *mii; 453 454 sc = ifp->if_softc; 455 mii = device_get_softc(sc->ste_miibus); 456 457 mii_pollstat(mii); 458 ifmr->ifm_active = mii->mii_media_active; 459 ifmr->ifm_status = mii->mii_media_status; 460 461 return; 462 } 463 464 static void 465 ste_wait(struct ste_softc *sc) 466 { 467 int i; 468 469 for (i = 0; i < STE_TIMEOUT; i++) { 470 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 471 break; 472 } 473 474 if (i == STE_TIMEOUT) 475 if_printf(&sc->arpcom.ac_if, "command never completed!\n"); 476 477 return; 478 } 479 480 /* 481 * The EEPROM is slow: give it time to come ready after issuing 482 * it a command. 483 */ 484 static int 485 ste_eeprom_wait(struct ste_softc *sc) 486 { 487 int i; 488 489 DELAY(1000); 490 491 for (i = 0; i < 100; i++) { 492 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 493 DELAY(1000); 494 else 495 break; 496 } 497 498 if (i == 100) { 499 if_printf(&sc->arpcom.ac_if, "eeprom failed to come ready\n"); 500 return(1); 501 } 502 503 return(0); 504 } 505 506 /* 507 * Read a sequence of words from the EEPROM. Note that ethernet address 508 * data is stored in the EEPROM in network byte order. 509 */ 510 static int 511 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap) 512 { 513 int err = 0, i; 514 u_int16_t word = 0, *ptr; 515 516 if (ste_eeprom_wait(sc)) 517 return(1); 518 519 for (i = 0; i < cnt; i++) { 520 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 521 err = ste_eeprom_wait(sc); 522 if (err) 523 break; 524 word = CSR_READ_2(sc, STE_EEPROM_DATA); 525 ptr = (u_int16_t *)(dest + (i * 2)); 526 if (swap) 527 *ptr = ntohs(word); 528 else 529 *ptr = word; 530 } 531 532 return(err ? 1 : 0); 533 } 534 535 static void 536 ste_setmulti(struct ste_softc *sc) 537 { 538 struct ifnet *ifp; 539 int h = 0; 540 u_int32_t hashes[2] = { 0, 0 }; 541 struct ifmultiaddr *ifma; 542 543 ifp = &sc->arpcom.ac_if; 544 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 545 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 546 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 547 return; 548 } 549 550 /* first, zot all the existing hash bits */ 551 CSR_WRITE_2(sc, STE_MAR0, 0); 552 CSR_WRITE_2(sc, STE_MAR1, 0); 553 CSR_WRITE_2(sc, STE_MAR2, 0); 554 CSR_WRITE_2(sc, STE_MAR3, 0); 555 556 /* now program new ones */ 557 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 558 if (ifma->ifma_addr->sa_family != AF_LINK) 559 continue; 560 h = ether_crc32_be( 561 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 562 ETHER_ADDR_LEN) & 0x3f; 563 if (h < 32) 564 hashes[0] |= (1 << h); 565 else 566 hashes[1] |= (1 << (h - 32)); 567 } 568 569 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 570 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 571 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 572 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 573 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 574 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 575 576 return; 577 } 578 579 static void 580 ste_intr(void *xsc) 581 { 582 struct ste_softc *sc; 583 struct ifnet *ifp; 584 u_int16_t status; 585 586 sc = xsc; 587 ifp = &sc->arpcom.ac_if; 588 589 /* See if this is really our interrupt. */ 590 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) 591 return; 592 593 for (;;) { 594 status = CSR_READ_2(sc, STE_ISR_ACK); 595 596 if (!(status & STE_INTRS)) 597 break; 598 599 if (status & STE_ISR_RX_DMADONE) 600 ste_rxeof(sc); 601 602 if (status & STE_ISR_TX_DMADONE) 603 ste_txeof(sc); 604 605 if (status & STE_ISR_TX_DONE) 606 ste_txeoc(sc); 607 608 if (status & STE_ISR_STATS_OFLOW) { 609 callout_stop(&sc->ste_stat_timer); 610 ste_stats_update(sc); 611 } 612 613 if (status & STE_ISR_LINKEVENT) 614 mii_pollstat(device_get_softc(sc->ste_miibus)); 615 616 if (status & STE_ISR_HOSTERR) { 617 ste_reset(sc); 618 ste_init(sc); 619 } 620 } 621 622 /* Re-enable interrupts */ 623 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 624 625 if (!ifq_is_empty(&ifp->if_snd)) 626 if_devstart(ifp); 627 } 628 629 /* 630 * A frame has been uploaded: pass the resulting mbuf chain up to 631 * the higher level protocols. 632 */ 633 static void 634 ste_rxeof(struct ste_softc *sc) 635 { 636 struct mbuf *m; 637 struct ifnet *ifp; 638 struct ste_chain_onefrag *cur_rx; 639 int total_len = 0, count=0; 640 u_int32_t rxstat; 641 642 ifp = &sc->arpcom.ac_if; 643 644 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) 645 & STE_RXSTAT_DMADONE) { 646 if ((STE_RX_LIST_CNT - count) < 3) { 647 break; 648 } 649 650 cur_rx = sc->ste_cdata.ste_rx_head; 651 sc->ste_cdata.ste_rx_head = cur_rx->ste_next; 652 653 /* 654 * If an error occurs, update stats, clear the 655 * status word and leave the mbuf cluster in place: 656 * it should simply get re-used next time this descriptor 657 * comes up in the ring. 658 */ 659 if (rxstat & STE_RXSTAT_FRAME_ERR) { 660 IFNET_STAT_INC(ifp, ierrors, 1); 661 cur_rx->ste_ptr->ste_status = 0; 662 continue; 663 } 664 665 /* 666 * If there error bit was not set, the upload complete 667 * bit should be set which means we have a valid packet. 668 * If not, something truly strange has happened. 669 */ 670 if (!(rxstat & STE_RXSTAT_DMADONE)) { 671 if_printf(ifp, "bad receive status -- packet dropped"); 672 IFNET_STAT_INC(ifp, ierrors, 1); 673 cur_rx->ste_ptr->ste_status = 0; 674 continue; 675 } 676 677 /* No errors; receive the packet. */ 678 m = cur_rx->ste_mbuf; 679 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; 680 681 /* 682 * Try to conjure up a new mbuf cluster. If that 683 * fails, it means we have an out of memory condition and 684 * should leave the buffer in place and continue. This will 685 * result in a lost packet, but there's little else we 686 * can do in this situation. 687 */ 688 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 689 IFNET_STAT_INC(ifp, ierrors, 1); 690 cur_rx->ste_ptr->ste_status = 0; 691 continue; 692 } 693 694 IFNET_STAT_INC(ifp, ipackets, 1); 695 m->m_pkthdr.rcvif = ifp; 696 m->m_pkthdr.len = m->m_len = total_len; 697 698 ifp->if_input(ifp, m, NULL, -1); 699 700 cur_rx->ste_ptr->ste_status = 0; 701 count++; 702 } 703 704 return; 705 } 706 707 static void 708 ste_txeoc(struct ste_softc *sc) 709 { 710 u_int8_t txstat; 711 struct ifnet *ifp; 712 713 ifp = &sc->arpcom.ac_if; 714 715 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & 716 STE_TXSTATUS_TXDONE) { 717 if (txstat & STE_TXSTATUS_UNDERRUN || 718 txstat & STE_TXSTATUS_EXCESSCOLLS || 719 txstat & STE_TXSTATUS_RECLAIMERR) { 720 IFNET_STAT_INC(ifp, oerrors, 1); 721 if_printf(ifp, "transmission error: %x\n", txstat); 722 723 ste_reset(sc); 724 ste_init(sc); 725 726 if (txstat & STE_TXSTATUS_UNDERRUN && 727 sc->ste_tx_thresh < STE_PACKET_SIZE) { 728 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 729 if_printf(ifp, "tx underrun, increasing tx" 730 " start threshold to %d bytes\n", 731 sc->ste_tx_thresh); 732 } 733 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 734 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, 735 (STE_PACKET_SIZE >> 4)); 736 } 737 ste_init(sc); 738 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 739 } 740 741 return; 742 } 743 744 static void 745 ste_txeof(struct ste_softc *sc) 746 { 747 struct ste_chain *cur_tx = NULL; 748 struct ifnet *ifp; 749 int idx; 750 751 ifp = &sc->arpcom.ac_if; 752 753 idx = sc->ste_cdata.ste_tx_cons; 754 while(idx != sc->ste_cdata.ste_tx_prod) { 755 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 756 757 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) 758 break; 759 760 if (cur_tx->ste_mbuf != NULL) { 761 m_freem(cur_tx->ste_mbuf); 762 cur_tx->ste_mbuf = NULL; 763 } 764 765 IFNET_STAT_INC(ifp, opackets, 1); 766 767 sc->ste_cdata.ste_tx_cnt--; 768 STE_INC(idx, STE_TX_LIST_CNT); 769 ifp->if_timer = 0; 770 } 771 772 sc->ste_cdata.ste_tx_cons = idx; 773 774 if (cur_tx != NULL) 775 ifq_clr_oactive(&ifp->if_snd); 776 777 return; 778 } 779 780 static void 781 ste_stats_update(void *xsc) 782 { 783 struct ste_softc *sc; 784 struct ifnet *ifp; 785 struct mii_data *mii; 786 787 sc = xsc; 788 ifp = &sc->arpcom.ac_if; 789 mii = device_get_softc(sc->ste_miibus); 790 791 lwkt_serialize_enter(ifp->if_serializer); 792 793 IFNET_STAT_INC(ifp, collisions, CSR_READ_1(sc, STE_LATE_COLLS) 794 + CSR_READ_1(sc, STE_MULTI_COLLS) 795 + CSR_READ_1(sc, STE_SINGLE_COLLS)); 796 797 if (!sc->ste_link) { 798 mii_pollstat(mii); 799 if (mii->mii_media_status & IFM_ACTIVE && 800 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 801 sc->ste_link++; 802 /* 803 * we don't get a call-back on re-init so do it 804 * otherwise we get stuck in the wrong link state 805 */ 806 ste_miibus_statchg(sc->ste_dev); 807 if (!ifq_is_empty(&ifp->if_snd)) 808 if_devstart(ifp); 809 } 810 } 811 812 callout_reset(&sc->ste_stat_timer, hz, ste_stats_update, sc); 813 lwkt_serialize_exit(ifp->if_serializer); 814 } 815 816 817 /* 818 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 819 * IDs against our list and return a device name if we find a match. 820 */ 821 static int 822 ste_probe(device_t dev) 823 { 824 struct ste_type *t; 825 826 t = ste_devs; 827 828 while(t->ste_name != NULL) { 829 if ((pci_get_vendor(dev) == t->ste_vid) && 830 (pci_get_device(dev) == t->ste_did)) { 831 device_set_desc(dev, t->ste_name); 832 return(0); 833 } 834 t++; 835 } 836 837 return(ENXIO); 838 } 839 840 /* 841 * Attach the interface. Allocate softc structures, do ifmedia 842 * setup and ethernet/BPF attach. 843 */ 844 static int 845 ste_attach(device_t dev) 846 { 847 struct ste_softc *sc; 848 struct ifnet *ifp; 849 int error = 0, rid; 850 uint8_t eaddr[ETHER_ADDR_LEN]; 851 852 sc = device_get_softc(dev); 853 sc->ste_dev = dev; 854 855 /* 856 * Only use one PHY since this chip reports multiple 857 * Note on the DFE-550 the PHY is at 1 on the DFE-580 858 * it is at 0 & 1. It is rev 0x12. 859 */ 860 if (pci_get_vendor(dev) == PCI_VENDOR_DLINK && 861 pci_get_device(dev) == PCI_PRODUCT_DLINK_DL1002 && 862 pci_get_revid(dev) == 0x12 ) 863 sc->ste_one_phy = 1; 864 865 /* 866 * Handle power management nonsense. 867 */ 868 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 869 u_int32_t iobase, membase, irq; 870 871 /* Save important PCI config data. */ 872 iobase = pci_read_config(dev, STE_PCI_LOIO, 4); 873 membase = pci_read_config(dev, STE_PCI_LOMEM, 4); 874 irq = pci_read_config(dev, STE_PCI_INTLINE, 4); 875 876 /* Reset the power state. */ 877 device_printf(dev, "chip is in D%d power mode " 878 "-- setting to D0\n", pci_get_powerstate(dev)); 879 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 880 881 /* Restore PCI config data. */ 882 pci_write_config(dev, STE_PCI_LOIO, iobase, 4); 883 pci_write_config(dev, STE_PCI_LOMEM, membase, 4); 884 pci_write_config(dev, STE_PCI_INTLINE, irq, 4); 885 } 886 887 /* 888 * Map control/status registers. 889 */ 890 pci_enable_busmaster(dev); 891 892 rid = STE_RID; 893 sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE); 894 895 if (sc->ste_res == NULL) { 896 device_printf(dev, "couldn't map ports/memory\n"); 897 error = ENXIO; 898 goto fail; 899 } 900 901 sc->ste_btag = rman_get_bustag(sc->ste_res); 902 sc->ste_bhandle = rman_get_bushandle(sc->ste_res); 903 904 rid = 0; 905 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 906 RF_SHAREABLE | RF_ACTIVE); 907 908 if (sc->ste_irq == NULL) { 909 device_printf(dev, "couldn't map interrupt\n"); 910 error = ENXIO; 911 goto fail; 912 } 913 914 callout_init(&sc->ste_stat_timer); 915 916 ifp = &sc->arpcom.ac_if; 917 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 918 919 /* Reset the adapter. */ 920 ste_reset(sc); 921 922 /* 923 * Get station address from the EEPROM. 924 */ 925 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, 3, 0)) { 926 device_printf(dev, "failed to read station address\n"); 927 error = ENXIO; 928 goto fail; 929 } 930 931 /* Allocate the descriptor queues. */ 932 sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, 933 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 934 935 if (sc->ste_ldata == NULL) { 936 device_printf(dev, "no memory for list buffers!\n"); 937 error = ENXIO; 938 goto fail; 939 } 940 941 /* Do MII setup. */ 942 if (mii_phy_probe(dev, &sc->ste_miibus, 943 ste_ifmedia_upd, ste_ifmedia_sts)) { 944 device_printf(dev, "MII without any phy!\n"); 945 error = ENXIO; 946 goto fail; 947 } 948 949 ifp->if_softc = sc; 950 ifp->if_mtu = ETHERMTU; 951 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 952 ifp->if_ioctl = ste_ioctl; 953 ifp->if_start = ste_start; 954 ifp->if_watchdog = ste_watchdog; 955 ifp->if_init = ste_init; 956 ifp->if_baudrate = 10000000; 957 ifq_set_maxlen(&ifp->if_snd, STE_TX_LIST_CNT - 1); 958 ifq_set_ready(&ifp->if_snd); 959 960 sc->ste_tx_thresh = STE_TXSTART_THRESH; 961 962 /* 963 * Call MI attach routine. 964 */ 965 ether_ifattach(ifp, eaddr, NULL); 966 967 /* 968 * Tell the upper layer(s) we support long frames. 969 */ 970 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 971 972 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ste_irq)); 973 974 error = bus_setup_intr(dev, sc->ste_irq, INTR_MPSAFE, 975 ste_intr, sc, &sc->ste_intrhand, 976 ifp->if_serializer); 977 if (error) { 978 device_printf(dev, "couldn't set up irq\n"); 979 ether_ifdetach(ifp); 980 goto fail; 981 } 982 983 return 0; 984 985 fail: 986 ste_detach(dev); 987 return(error); 988 } 989 990 static int 991 ste_detach(device_t dev) 992 { 993 struct ste_softc *sc = device_get_softc(dev); 994 struct ifnet *ifp = &sc->arpcom.ac_if; 995 996 if (device_is_attached(dev)) { 997 lwkt_serialize_enter(ifp->if_serializer); 998 ste_stop(sc); 999 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1000 lwkt_serialize_exit(ifp->if_serializer); 1001 1002 ether_ifdetach(ifp); 1003 } 1004 if (sc->ste_miibus != NULL) 1005 device_delete_child(dev, sc->ste_miibus); 1006 bus_generic_detach(dev); 1007 1008 if (sc->ste_irq != NULL) 1009 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1010 if (sc->ste_res != NULL) 1011 bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); 1012 if (sc->ste_ldata != NULL) { 1013 contigfree(sc->ste_ldata, sizeof(struct ste_list_data), 1014 M_DEVBUF); 1015 } 1016 1017 return(0); 1018 } 1019 1020 static int 1021 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, 1022 struct mbuf *m) 1023 { 1024 struct mbuf *m_new = NULL; 1025 1026 if (m == NULL) { 1027 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 1028 if (m_new == NULL) 1029 return(ENOBUFS); 1030 MCLGET(m_new, MB_DONTWAIT); 1031 if (!(m_new->m_flags & M_EXT)) { 1032 m_freem(m_new); 1033 return(ENOBUFS); 1034 } 1035 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1036 } else { 1037 m_new = m; 1038 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1039 m_new->m_data = m_new->m_ext.ext_buf; 1040 } 1041 1042 m_adj(m_new, ETHER_ALIGN); 1043 1044 c->ste_mbuf = m_new; 1045 c->ste_ptr->ste_status = 0; 1046 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); 1047 c->ste_ptr->ste_frag.ste_len = (1536 + EVL_ENCAPLEN) | STE_FRAG_LAST; 1048 1049 return(0); 1050 } 1051 1052 static int 1053 ste_init_rx_list(struct ste_softc *sc) 1054 { 1055 struct ste_chain_data *cd; 1056 struct ste_list_data *ld; 1057 int i; 1058 1059 cd = &sc->ste_cdata; 1060 ld = sc->ste_ldata; 1061 1062 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1063 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1064 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) 1065 return(ENOBUFS); 1066 if (i == (STE_RX_LIST_CNT - 1)) { 1067 cd->ste_rx_chain[i].ste_next = 1068 &cd->ste_rx_chain[0]; 1069 ld->ste_rx_list[i].ste_next = 1070 vtophys(&ld->ste_rx_list[0]); 1071 } else { 1072 cd->ste_rx_chain[i].ste_next = 1073 &cd->ste_rx_chain[i + 1]; 1074 ld->ste_rx_list[i].ste_next = 1075 vtophys(&ld->ste_rx_list[i + 1]); 1076 } 1077 ld->ste_rx_list[i].ste_status = 0; 1078 } 1079 1080 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1081 1082 return(0); 1083 } 1084 1085 static void 1086 ste_init_tx_list(struct ste_softc *sc) 1087 { 1088 struct ste_chain_data *cd; 1089 struct ste_list_data *ld; 1090 int i; 1091 1092 cd = &sc->ste_cdata; 1093 ld = sc->ste_ldata; 1094 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1095 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1096 cd->ste_tx_chain[i].ste_ptr->ste_next = 0; 1097 cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0; 1098 cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); 1099 if (i == (STE_TX_LIST_CNT - 1)) 1100 cd->ste_tx_chain[i].ste_next = 1101 &cd->ste_tx_chain[0]; 1102 else 1103 cd->ste_tx_chain[i].ste_next = 1104 &cd->ste_tx_chain[i + 1]; 1105 if (i == 0) 1106 cd->ste_tx_chain[i].ste_prev = 1107 &cd->ste_tx_chain[STE_TX_LIST_CNT - 1]; 1108 else 1109 cd->ste_tx_chain[i].ste_prev = 1110 &cd->ste_tx_chain[i - 1]; 1111 } 1112 1113 cd->ste_tx_prod = 0; 1114 cd->ste_tx_cons = 0; 1115 cd->ste_tx_cnt = 0; 1116 1117 return; 1118 } 1119 1120 static void 1121 ste_init(void *xsc) 1122 { 1123 struct ste_softc *sc; 1124 int i; 1125 struct ifnet *ifp; 1126 1127 sc = xsc; 1128 ifp = &sc->arpcom.ac_if; 1129 1130 ste_stop(sc); 1131 1132 /* Init our MAC address */ 1133 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1134 CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1135 } 1136 1137 /* Init RX list */ 1138 if (ste_init_rx_list(sc) == ENOBUFS) { 1139 if_printf(ifp, "initialization failed: no " 1140 "memory for RX buffers\n"); 1141 ste_stop(sc); 1142 return; 1143 } 1144 1145 /* Set RX polling interval */ 1146 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 1); 1147 1148 /* Init TX descriptors */ 1149 ste_init_tx_list(sc); 1150 1151 /* Set the TX freethresh value */ 1152 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1153 1154 /* Set the TX start threshold for best performance. */ 1155 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1156 1157 /* Set the TX reclaim threshold. */ 1158 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1159 1160 /* Set up the RX filter. */ 1161 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); 1162 1163 /* If we want promiscuous mode, set the allframes bit. */ 1164 if (ifp->if_flags & IFF_PROMISC) { 1165 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1166 } else { 1167 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1168 } 1169 1170 /* Set capture broadcast bit to accept broadcast frames. */ 1171 if (ifp->if_flags & IFF_BROADCAST) { 1172 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1173 } else { 1174 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1175 } 1176 1177 ste_setmulti(sc); 1178 1179 /* Load the address of the RX list. */ 1180 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1181 ste_wait(sc); 1182 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1183 vtophys(&sc->ste_ldata->ste_rx_list[0])); 1184 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1185 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1186 1187 /* Set TX polling interval (defer until we TX first packet */ 1188 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1189 1190 /* Load address of the TX list */ 1191 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1192 ste_wait(sc); 1193 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1194 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1195 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1196 ste_wait(sc); 1197 sc->ste_tx_prev_idx=-1; 1198 1199 /* Enable receiver and transmitter */ 1200 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1201 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1202 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1203 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1204 1205 /* Enable stats counters. */ 1206 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1207 1208 /* Enable interrupts. */ 1209 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1210 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1211 1212 /* Accept VLAN length packets */ 1213 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + EVL_ENCAPLEN); 1214 1215 ste_ifmedia_upd(ifp); 1216 1217 ifp->if_flags |= IFF_RUNNING; 1218 ifq_clr_oactive(&ifp->if_snd); 1219 1220 callout_reset(&sc->ste_stat_timer, hz, ste_stats_update, sc); 1221 } 1222 1223 static void 1224 ste_stop(struct ste_softc *sc) 1225 { 1226 int i; 1227 struct ifnet *ifp; 1228 1229 ifp = &sc->arpcom.ac_if; 1230 1231 callout_stop(&sc->ste_stat_timer); 1232 1233 CSR_WRITE_2(sc, STE_IMR, 0); 1234 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); 1235 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); 1236 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); 1237 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1238 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1239 ste_wait(sc); 1240 /* 1241 * Try really hard to stop the RX engine or under heavy RX 1242 * data chip will write into de-allocated memory. 1243 */ 1244 ste_reset(sc); 1245 1246 sc->ste_link = 0; 1247 1248 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1249 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { 1250 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); 1251 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; 1252 } 1253 } 1254 1255 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1256 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { 1257 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); 1258 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; 1259 } 1260 } 1261 1262 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1263 1264 ifp->if_flags &= ~IFF_RUNNING; 1265 ifq_clr_oactive(&ifp->if_snd); 1266 1267 return; 1268 } 1269 1270 static void 1271 ste_reset(struct ste_softc *sc) 1272 { 1273 int i; 1274 1275 STE_SETBIT4(sc, STE_ASICCTL, 1276 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| 1277 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| 1278 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| 1279 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| 1280 STE_ASICCTL_EXTRESET_RESET); 1281 1282 DELAY(100000); 1283 1284 for (i = 0; i < STE_TIMEOUT; i++) { 1285 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1286 break; 1287 } 1288 1289 if (i == STE_TIMEOUT) 1290 if_printf(&sc->arpcom.ac_if, "global reset never completed\n"); 1291 1292 return; 1293 } 1294 1295 static int 1296 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1297 { 1298 struct ste_softc *sc; 1299 struct ifreq *ifr; 1300 struct mii_data *mii; 1301 int error = 0; 1302 1303 sc = ifp->if_softc; 1304 ifr = (struct ifreq *)data; 1305 1306 switch(command) { 1307 case SIOCSIFFLAGS: 1308 if (ifp->if_flags & IFF_UP) { 1309 if (ifp->if_flags & IFF_RUNNING && 1310 ifp->if_flags & IFF_PROMISC && 1311 !(sc->ste_if_flags & IFF_PROMISC)) { 1312 STE_SETBIT1(sc, STE_RX_MODE, 1313 STE_RXMODE_PROMISC); 1314 } else if (ifp->if_flags & IFF_RUNNING && 1315 !(ifp->if_flags & IFF_PROMISC) && 1316 sc->ste_if_flags & IFF_PROMISC) { 1317 STE_CLRBIT1(sc, STE_RX_MODE, 1318 STE_RXMODE_PROMISC); 1319 } 1320 if (!(ifp->if_flags & IFF_RUNNING)) { 1321 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1322 ste_init(sc); 1323 } 1324 } else { 1325 if (ifp->if_flags & IFF_RUNNING) 1326 ste_stop(sc); 1327 } 1328 sc->ste_if_flags = ifp->if_flags; 1329 error = 0; 1330 break; 1331 case SIOCADDMULTI: 1332 case SIOCDELMULTI: 1333 ste_setmulti(sc); 1334 error = 0; 1335 break; 1336 case SIOCGIFMEDIA: 1337 case SIOCSIFMEDIA: 1338 mii = device_get_softc(sc->ste_miibus); 1339 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1340 break; 1341 default: 1342 error = ether_ioctl(ifp, command, data); 1343 break; 1344 } 1345 return(error); 1346 } 1347 1348 static int 1349 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head) 1350 { 1351 int frag = 0; 1352 struct ste_frag *f = NULL; 1353 struct mbuf *m; 1354 struct ste_desc *d; 1355 int total_len = 0; 1356 1357 d = c->ste_ptr; 1358 d->ste_ctl = 0; 1359 1360 encap_retry: 1361 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1362 if (m->m_len != 0) { 1363 if (frag == STE_MAXFRAGS) 1364 break; 1365 total_len += m->m_len; 1366 f = &d->ste_frags[frag]; 1367 f->ste_addr = vtophys(mtod(m, vm_offset_t)); 1368 f->ste_len = m->m_len; 1369 frag++; 1370 } 1371 } 1372 1373 if (m != NULL) { 1374 struct mbuf *mn; 1375 1376 /* 1377 * We ran out of segments. We have to recopy this 1378 * mbuf chain first. Bail out if we can't get the 1379 * new buffers. Code borrowed from if_fxp.c. 1380 */ 1381 MGETHDR(mn, MB_DONTWAIT, MT_DATA); 1382 if (mn == NULL) { 1383 m_freem(m_head); 1384 return ENOMEM; 1385 } 1386 if (m_head->m_pkthdr.len > MHLEN) { 1387 MCLGET(mn, MB_DONTWAIT); 1388 if ((mn->m_flags & M_EXT) == 0) { 1389 m_freem(mn); 1390 m_freem(m_head); 1391 return ENOMEM; 1392 } 1393 } 1394 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1395 mtod(mn, caddr_t)); 1396 mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len; 1397 m_freem(m_head); 1398 m_head = mn; 1399 goto encap_retry; 1400 } 1401 1402 c->ste_mbuf = m_head; 1403 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; 1404 d->ste_ctl = 1; 1405 1406 return(0); 1407 } 1408 1409 static void 1410 ste_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1411 { 1412 struct ste_softc *sc; 1413 struct mbuf *m_head = NULL; 1414 struct ste_chain *cur_tx = NULL; 1415 int idx; 1416 1417 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1418 1419 sc = ifp->if_softc; 1420 1421 if (!sc->ste_link) { 1422 ifq_purge(&ifp->if_snd); 1423 return; 1424 } 1425 1426 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1427 return; 1428 1429 idx = sc->ste_cdata.ste_tx_prod; 1430 1431 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { 1432 1433 if ((STE_TX_LIST_CNT - sc->ste_cdata.ste_tx_cnt) < 3) { 1434 ifq_set_oactive(&ifp->if_snd); 1435 break; 1436 } 1437 1438 m_head = ifq_dequeue(&ifp->if_snd); 1439 if (m_head == NULL) 1440 break; 1441 1442 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1443 1444 if (ste_encap(sc, cur_tx, m_head) != 0) 1445 break; 1446 1447 cur_tx->ste_ptr->ste_next = 0; 1448 1449 if(sc->ste_tx_prev_idx < 0){ 1450 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1451 /* Load address of the TX list */ 1452 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1453 ste_wait(sc); 1454 1455 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1456 vtophys(&sc->ste_ldata->ste_tx_list[0])); 1457 1458 /* Set TX polling interval to start TX engine */ 1459 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1460 1461 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1462 ste_wait(sc); 1463 }else{ 1464 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1465 sc->ste_cdata.ste_tx_chain[ 1466 sc->ste_tx_prev_idx].ste_ptr->ste_next 1467 = cur_tx->ste_phys; 1468 } 1469 1470 sc->ste_tx_prev_idx=idx; 1471 1472 BPF_MTAP(ifp, cur_tx->ste_mbuf); 1473 1474 STE_INC(idx, STE_TX_LIST_CNT); 1475 sc->ste_cdata.ste_tx_cnt++; 1476 ifp->if_timer = 5; 1477 sc->ste_cdata.ste_tx_prod = idx; 1478 } 1479 } 1480 1481 static void 1482 ste_watchdog(struct ifnet *ifp) 1483 { 1484 struct ste_softc *sc; 1485 1486 sc = ifp->if_softc; 1487 1488 IFNET_STAT_INC(ifp, oerrors, 1); 1489 if_printf(ifp, "watchdog timeout\n"); 1490 1491 ste_txeoc(sc); 1492 ste_txeof(sc); 1493 ste_rxeof(sc); 1494 ste_reset(sc); 1495 ste_init(sc); 1496 1497 if (!ifq_is_empty(&ifp->if_snd)) 1498 if_devstart(ifp); 1499 } 1500 1501 static void 1502 ste_shutdown(device_t dev) 1503 { 1504 struct ste_softc *sc; 1505 1506 sc = device_get_softc(dev); 1507 1508 ste_stop(sc); 1509 1510 return; 1511 } 1512