1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_sf.c,v 1.18.2.8 2001/12/16 15:46:07 luigi Exp $ 33 * $DragonFly: src/sys/dev/netif/sf/if_sf.c,v 1.30 2006/12/22 23:26:22 swildner Exp $ 34 */ 35 36 /* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * ftp.adaptec.com:/pub/BBS/userguides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45 46 /* 47 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 48 * controller designed with flexibility and reducing CPU load in mind. 49 * The Starfire offers high and low priority buffer queues, a 50 * producer/consumer index mechanism and several different buffer 51 * queue and completion queue descriptor types. Any one of a number 52 * of different driver designs can be used, depending on system and 53 * OS requirements. This driver makes use of type0 transmit frame 54 * descriptors (since BSD fragments packets across an mbuf chain) 55 * and two RX buffer queues prioritized on size (one queue for small 56 * frames that will fit into a single mbuf, another with full size 57 * mbuf clusters for everything else). The producer/consumer indexes 58 * and completion queues are also used. 59 * 60 * One downside to the Starfire has to do with alignment: buffer 61 * queues must be aligned on 256-byte boundaries, and receive buffers 62 * must be aligned on longword boundaries. The receive buffer alignment 63 * causes problems on the Alpha platform, where the packet payload 64 * should be longword aligned. There is no simple way around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/sockio.h> 86 #include <sys/mbuf.h> 87 #include <sys/malloc.h> 88 #include <sys/kernel.h> 89 #include <sys/socket.h> 90 #include <sys/serialize.h> 91 #include <sys/bus.h> 92 #include <sys/rman.h> 93 #include <sys/thread2.h> 94 95 #include <net/if.h> 96 #include <net/ifq_var.h> 97 #include <net/if_arp.h> 98 #include <net/ethernet.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 102 #include <net/bpf.h> 103 104 #include <vm/vm.h> /* for vtophys */ 105 #include <vm/pmap.h> /* for vtophys */ 106 107 #include <machine/clock.h> /* for DELAY */ 108 109 #include "../mii_layer/mii.h" 110 #include "../mii_layer/miivar.h" 111 112 /* "controller miibus0" required. See GENERIC if you get errors here. */ 113 #include "miibus_if.h" 114 115 #include <bus/pci/pcidevs.h> 116 #include <bus/pci/pcireg.h> 117 #include <bus/pci/pcivar.h> 118 119 #define SF_USEIOSPACE 120 121 #include "if_sfreg.h" 122 123 static struct sf_type sf_devs[] = { 124 { PCI_VENDOR_ADP, PCI_PRODUCT_ADP_AIC6915, 125 "Adaptec AIC-6915 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int sf_probe (device_t); 130 static int sf_attach (device_t); 131 static int sf_detach (device_t); 132 static void sf_intr (void *); 133 static void sf_stats_update (void *); 134 static void sf_rxeof (struct sf_softc *); 135 static void sf_txeof (struct sf_softc *); 136 static int sf_encap (struct sf_softc *, 137 struct sf_tx_bufdesc_type0 *, 138 struct mbuf *); 139 static void sf_start (struct ifnet *); 140 static int sf_ioctl (struct ifnet *, u_long, caddr_t, 141 struct ucred *); 142 static void sf_init (void *); 143 static void sf_stop (struct sf_softc *); 144 static void sf_watchdog (struct ifnet *); 145 static void sf_shutdown (device_t); 146 static int sf_ifmedia_upd (struct ifnet *); 147 static void sf_ifmedia_sts (struct ifnet *, struct ifmediareq *); 148 static void sf_reset (struct sf_softc *); 149 static int sf_init_rx_ring (struct sf_softc *); 150 static void sf_init_tx_ring (struct sf_softc *); 151 static int sf_newbuf (struct sf_softc *, 152 struct sf_rx_bufdesc_type0 *, 153 struct mbuf *); 154 static void sf_setmulti (struct sf_softc *); 155 static int sf_setperf (struct sf_softc *, int, caddr_t); 156 static int sf_sethash (struct sf_softc *, caddr_t, int); 157 #ifdef notdef 158 static int sf_setvlan (struct sf_softc *, int, u_int32_t); 159 #endif 160 161 static u_int8_t sf_read_eeprom (struct sf_softc *, int); 162 static u_int32_t sf_calchash (caddr_t); 163 164 static int sf_miibus_readreg (device_t, int, int); 165 static int sf_miibus_writereg (device_t, int, int, int); 166 static void sf_miibus_statchg (device_t); 167 168 static u_int32_t csr_read_4 (struct sf_softc *, int); 169 static void csr_write_4 (struct sf_softc *, int, u_int32_t); 170 static void sf_txthresh_adjust (struct sf_softc *); 171 172 #ifdef SF_USEIOSPACE 173 #define SF_RES SYS_RES_IOPORT 174 #define SF_RID SF_PCI_LOIO 175 #else 176 #define SF_RES SYS_RES_MEMORY 177 #define SF_RID SF_PCI_LOMEM 178 #endif 179 180 static device_method_t sf_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, sf_probe), 183 DEVMETHOD(device_attach, sf_attach), 184 DEVMETHOD(device_detach, sf_detach), 185 DEVMETHOD(device_shutdown, sf_shutdown), 186 187 /* bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 193 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 194 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 195 196 { 0, 0 } 197 }; 198 199 static driver_t sf_driver = { 200 "sf", 201 sf_methods, 202 sizeof(struct sf_softc), 203 }; 204 205 static devclass_t sf_devclass; 206 207 DECLARE_DUMMY_MODULE(if_sf); 208 DRIVER_MODULE(if_sf, pci, sf_driver, sf_devclass, 0, 0); 209 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 210 211 #define SF_SETBIT(sc, reg, x) \ 212 csr_write_4(sc, reg, csr_read_4(sc, reg) | x) 213 214 #define SF_CLRBIT(sc, reg, x) \ 215 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~x) 216 217 static u_int32_t 218 csr_read_4(struct sf_softc *sc, int reg) 219 { 220 u_int32_t val; 221 222 #ifdef SF_USEIOSPACE 223 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 224 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 225 #else 226 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 227 #endif 228 229 return(val); 230 } 231 232 static u_int8_t 233 sf_read_eeprom(struct sf_softc *sc, int reg) 234 { 235 u_int8_t val; 236 237 val = (csr_read_4(sc, SF_EEADDR_BASE + 238 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 239 240 return(val); 241 } 242 243 static void 244 csr_write_4(struct sf_softc *sc, int reg, u_int32_t val) 245 { 246 #ifdef SF_USEIOSPACE 247 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 248 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 249 #else 250 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 251 #endif 252 return; 253 } 254 255 static u_int32_t 256 sf_calchash(caddr_t addr) 257 { 258 u_int32_t crc, carry; 259 int i, j; 260 u_int8_t c; 261 262 /* Compute CRC for the address value. */ 263 crc = 0xFFFFFFFF; /* initial value */ 264 265 for (i = 0; i < 6; i++) { 266 c = *(addr + i); 267 for (j = 0; j < 8; j++) { 268 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 269 crc <<= 1; 270 c >>= 1; 271 if (carry) 272 crc = (crc ^ 0x04c11db6) | carry; 273 } 274 } 275 276 /* return the filter bit position */ 277 return(crc >> 23 & 0x1FF); 278 } 279 280 /* 281 * Copy the address 'mac' into the perfect RX filter entry at 282 * offset 'idx.' The perfect filter only has 16 entries so do 283 * some sanity tests. 284 */ 285 static int 286 sf_setperf(struct sf_softc *sc, int idx, caddr_t mac) 287 { 288 u_int16_t *p; 289 290 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 291 return(EINVAL); 292 293 if (mac == NULL) 294 return(EINVAL); 295 296 p = (u_int16_t *)mac; 297 298 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 299 (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2])); 300 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 301 (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1])); 302 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 303 (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0])); 304 305 return(0); 306 } 307 308 /* 309 * Set the bit in the 512-bit hash table that corresponds to the 310 * specified mac address 'mac.' If 'prio' is nonzero, update the 311 * priority hash table instead of the filter hash table. 312 */ 313 static int 314 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 315 { 316 u_int32_t h = 0; 317 318 if (mac == NULL) 319 return(EINVAL); 320 321 h = sf_calchash(mac); 322 323 if (prio) { 324 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 325 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 326 } else { 327 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 328 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 329 } 330 331 return(0); 332 } 333 334 #ifdef notdef 335 /* 336 * Set a VLAN tag in the receive filter. 337 */ 338 static int 339 sf_setvlan(struct sf_softc *sc, int idx, u_int32_t vlan) 340 { 341 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 342 return(EINVAL); 343 344 csr_write_4(sc, SF_RXFILT_HASH_BASE + 345 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 346 347 return(0); 348 } 349 #endif 350 351 static int 352 sf_miibus_readreg(device_t dev, int phy, int reg) 353 { 354 struct sf_softc *sc; 355 int i; 356 u_int32_t val = 0; 357 358 sc = device_get_softc(dev); 359 360 for (i = 0; i < SF_TIMEOUT; i++) { 361 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 362 if (val & SF_MII_DATAVALID) 363 break; 364 } 365 366 if (i == SF_TIMEOUT) 367 return(0); 368 369 if ((val & 0x0000FFFF) == 0xFFFF) 370 return(0); 371 372 return(val & 0x0000FFFF); 373 } 374 375 static int 376 sf_miibus_writereg(device_t dev, int phy, int reg, int val) 377 { 378 struct sf_softc *sc; 379 int i; 380 int busy; 381 382 sc = device_get_softc(dev); 383 384 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 385 386 for (i = 0; i < SF_TIMEOUT; i++) { 387 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 388 if (!(busy & SF_MII_BUSY)) 389 break; 390 } 391 392 return(0); 393 } 394 395 static void 396 sf_miibus_statchg(device_t dev) 397 { 398 struct sf_softc *sc; 399 struct mii_data *mii; 400 401 sc = device_get_softc(dev); 402 mii = device_get_softc(sc->sf_miibus); 403 404 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 405 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); 406 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 407 } else { 408 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); 409 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 410 } 411 412 return; 413 } 414 415 static void 416 sf_setmulti(struct sf_softc *sc) 417 { 418 struct ifnet *ifp; 419 int i; 420 struct ifmultiaddr *ifma; 421 u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 }; 422 423 ifp = &sc->arpcom.ac_if; 424 425 /* First zot all the existing filters. */ 426 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 427 sf_setperf(sc, i, (char *)&dummy); 428 for (i = SF_RXFILT_HASH_BASE; 429 i < (SF_RXFILT_HASH_MAX + 1); i += 4) 430 csr_write_4(sc, i, 0); 431 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); 432 433 /* Now program new ones. */ 434 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 435 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); 436 } else { 437 i = 1; 438 /* First find the tail of the list. */ 439 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 440 ifma = ifma->ifma_link.le_next) { 441 if (ifma->ifma_link.le_next == NULL) 442 break; 443 } 444 /* Now traverse the list backwards. */ 445 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; 446 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { 447 if (ifma->ifma_addr->sa_family != AF_LINK) 448 continue; 449 /* 450 * Program the first 15 multicast groups 451 * into the perfect filter. For all others, 452 * use the hash table. 453 */ 454 if (i < SF_RXFILT_PERFECT_CNT) { 455 sf_setperf(sc, i, 456 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 457 i++; 458 continue; 459 } 460 461 sf_sethash(sc, 462 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 463 } 464 } 465 466 return; 467 } 468 469 /* 470 * Set media options. 471 */ 472 static int 473 sf_ifmedia_upd(struct ifnet *ifp) 474 { 475 struct sf_softc *sc; 476 struct mii_data *mii; 477 478 sc = ifp->if_softc; 479 mii = device_get_softc(sc->sf_miibus); 480 sc->sf_link = 0; 481 if (mii->mii_instance) { 482 struct mii_softc *miisc; 483 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 484 miisc = LIST_NEXT(miisc, mii_list)) 485 mii_phy_reset(miisc); 486 } 487 mii_mediachg(mii); 488 489 return(0); 490 } 491 492 /* 493 * Report current media status. 494 */ 495 static void 496 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 497 { 498 struct sf_softc *sc; 499 struct mii_data *mii; 500 501 sc = ifp->if_softc; 502 mii = device_get_softc(sc->sf_miibus); 503 504 mii_pollstat(mii); 505 ifmr->ifm_active = mii->mii_media_active; 506 ifmr->ifm_status = mii->mii_media_status; 507 508 return; 509 } 510 511 static int 512 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 513 { 514 struct sf_softc *sc = ifp->if_softc; 515 struct ifreq *ifr = (struct ifreq *) data; 516 struct mii_data *mii; 517 int error = 0; 518 519 switch(command) { 520 case SIOCSIFFLAGS: 521 if (ifp->if_flags & IFF_UP) { 522 if (ifp->if_flags & IFF_RUNNING && 523 ifp->if_flags & IFF_PROMISC && 524 !(sc->sf_if_flags & IFF_PROMISC)) { 525 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 526 } else if (ifp->if_flags & IFF_RUNNING && 527 !(ifp->if_flags & IFF_PROMISC) && 528 sc->sf_if_flags & IFF_PROMISC) { 529 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 530 } else if (!(ifp->if_flags & IFF_RUNNING)) 531 sf_init(sc); 532 } else { 533 if (ifp->if_flags & IFF_RUNNING) 534 sf_stop(sc); 535 } 536 sc->sf_if_flags = ifp->if_flags; 537 error = 0; 538 break; 539 case SIOCADDMULTI: 540 case SIOCDELMULTI: 541 sf_setmulti(sc); 542 error = 0; 543 break; 544 case SIOCGIFMEDIA: 545 case SIOCSIFMEDIA: 546 mii = device_get_softc(sc->sf_miibus); 547 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 548 break; 549 default: 550 error = ether_ioctl(ifp, command, data); 551 break; 552 } 553 554 return(error); 555 } 556 557 static void 558 sf_reset(struct sf_softc *sc) 559 { 560 int i; 561 562 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 563 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 564 DELAY(1000); 565 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 566 567 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 568 569 for (i = 0; i < SF_TIMEOUT; i++) { 570 DELAY(10); 571 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 572 break; 573 } 574 575 if (i == SF_TIMEOUT) 576 kprintf("sf%d: reset never completed!\n", sc->sf_unit); 577 578 /* Wait a little while for the chip to get its brains in order. */ 579 DELAY(1000); 580 return; 581 } 582 583 /* 584 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 585 * IDs against our list and return a device name if we find a match. 586 * We also check the subsystem ID so that we can identify exactly which 587 * NIC has been found, if possible. 588 */ 589 static int 590 sf_probe(device_t dev) 591 { 592 struct sf_type *t; 593 594 t = sf_devs; 595 596 while(t->sf_name != NULL) { 597 if ((pci_get_vendor(dev) == t->sf_vid) && 598 (pci_get_device(dev) == t->sf_did)) { 599 switch((pci_read_config(dev, 600 SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) { 601 case AD_SUBSYSID_62011_REV0: 602 case AD_SUBSYSID_62011_REV1: 603 device_set_desc(dev, 604 "Adaptec ANA-62011 10/100BaseTX"); 605 return(0); 606 break; 607 case AD_SUBSYSID_62022: 608 device_set_desc(dev, 609 "Adaptec ANA-62022 10/100BaseTX"); 610 return(0); 611 break; 612 case AD_SUBSYSID_62044_REV0: 613 case AD_SUBSYSID_62044_REV1: 614 device_set_desc(dev, 615 "Adaptec ANA-62044 10/100BaseTX"); 616 return(0); 617 break; 618 case AD_SUBSYSID_62020: 619 device_set_desc(dev, 620 "Adaptec ANA-62020 10/100BaseFX"); 621 return(0); 622 break; 623 case AD_SUBSYSID_69011: 624 device_set_desc(dev, 625 "Adaptec ANA-69011 10/100BaseTX"); 626 return(0); 627 break; 628 default: 629 device_set_desc(dev, t->sf_name); 630 return(0); 631 break; 632 } 633 } 634 t++; 635 } 636 637 return(ENXIO); 638 } 639 640 /* 641 * Attach the interface. Allocate softc structures, do ifmedia 642 * setup and ethernet/BPF attach. 643 */ 644 static int 645 sf_attach(device_t dev) 646 { 647 int i; 648 u_int32_t command; 649 struct sf_softc *sc; 650 struct ifnet *ifp; 651 int unit, rid, error = 0; 652 653 sc = device_get_softc(dev); 654 unit = device_get_unit(dev); 655 656 /* 657 * Handle power management nonsense. 658 */ 659 command = pci_read_config(dev, SF_PCI_CAPID, 4) & 0x000000FF; 660 if (command == 0x01) { 661 662 command = pci_read_config(dev, SF_PCI_PWRMGMTCTRL, 4); 663 if (command & SF_PSTATE_MASK) { 664 u_int32_t iobase, membase, irq; 665 666 /* Save important PCI config data. */ 667 iobase = pci_read_config(dev, SF_PCI_LOIO, 4); 668 membase = pci_read_config(dev, SF_PCI_LOMEM, 4); 669 irq = pci_read_config(dev, SF_PCI_INTLINE, 4); 670 671 /* Reset the power state. */ 672 kprintf("sf%d: chip is in D%d power mode " 673 "-- setting to D0\n", unit, command & SF_PSTATE_MASK); 674 command &= 0xFFFFFFFC; 675 pci_write_config(dev, SF_PCI_PWRMGMTCTRL, command, 4); 676 677 /* Restore PCI config data. */ 678 pci_write_config(dev, SF_PCI_LOIO, iobase, 4); 679 pci_write_config(dev, SF_PCI_LOMEM, membase, 4); 680 pci_write_config(dev, SF_PCI_INTLINE, irq, 4); 681 } 682 } 683 684 /* 685 * Map control/status registers. 686 */ 687 command = pci_read_config(dev, PCIR_COMMAND, 4); 688 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 689 pci_write_config(dev, PCIR_COMMAND, command, 4); 690 command = pci_read_config(dev, PCIR_COMMAND, 4); 691 692 #ifdef SF_USEIOSPACE 693 if (!(command & PCIM_CMD_PORTEN)) { 694 kprintf("sf%d: failed to enable I/O ports!\n", unit); 695 error = ENXIO; 696 return(error); 697 } 698 #else 699 if (!(command & PCIM_CMD_MEMEN)) { 700 kprintf("sf%d: failed to enable memory mapping!\n", unit); 701 error = ENXIO; 702 return(error); 703 } 704 #endif 705 706 rid = SF_RID; 707 sc->sf_res = bus_alloc_resource_any(dev, SF_RES, &rid, RF_ACTIVE); 708 709 if (sc->sf_res == NULL) { 710 kprintf ("sf%d: couldn't map ports\n", unit); 711 error = ENXIO; 712 return(error); 713 } 714 715 sc->sf_btag = rman_get_bustag(sc->sf_res); 716 sc->sf_bhandle = rman_get_bushandle(sc->sf_res); 717 718 /* Allocate interrupt */ 719 rid = 0; 720 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 721 RF_SHAREABLE | RF_ACTIVE); 722 723 if (sc->sf_irq == NULL) { 724 kprintf("sf%d: couldn't map interrupt\n", unit); 725 error = ENXIO; 726 goto fail; 727 } 728 729 callout_init(&sc->sf_stat_timer); 730 731 /* Reset the adapter. */ 732 sf_reset(sc); 733 734 /* 735 * Get station address from the EEPROM. 736 */ 737 for (i = 0; i < ETHER_ADDR_LEN; i++) 738 sc->arpcom.ac_enaddr[i] = 739 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 740 741 sc->sf_unit = unit; 742 743 /* Allocate the descriptor queues. */ 744 sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF, 745 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 746 747 if (sc->sf_ldata == NULL) { 748 kprintf("sf%d: no memory for list buffers!\n", unit); 749 error = ENXIO; 750 goto fail; 751 } 752 753 bzero(sc->sf_ldata, sizeof(struct sf_list_data)); 754 755 /* Do MII setup. */ 756 if (mii_phy_probe(dev, &sc->sf_miibus, 757 sf_ifmedia_upd, sf_ifmedia_sts)) { 758 kprintf("sf%d: MII without any phy!\n", sc->sf_unit); 759 error = ENXIO; 760 goto fail; 761 } 762 763 ifp = &sc->arpcom.ac_if; 764 ifp->if_softc = sc; 765 if_initname(ifp, "sf", unit); 766 ifp->if_mtu = ETHERMTU; 767 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 768 ifp->if_ioctl = sf_ioctl; 769 ifp->if_start = sf_start; 770 ifp->if_watchdog = sf_watchdog; 771 ifp->if_init = sf_init; 772 ifp->if_baudrate = 10000000; 773 ifq_set_maxlen(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 774 ifq_set_ready(&ifp->if_snd); 775 776 /* 777 * Call MI attach routine. 778 */ 779 ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL); 780 781 error = bus_setup_intr(dev, sc->sf_irq, INTR_NETSAFE, 782 sf_intr, sc, &sc->sf_intrhand, 783 ifp->if_serializer); 784 785 if (error) { 786 ether_ifdetach(ifp); 787 device_printf(dev, "couldn't set up irq\n"); 788 goto fail; 789 } 790 791 return(0); 792 793 fail: 794 sf_detach(dev); 795 return(error); 796 } 797 798 static int 799 sf_detach(device_t dev) 800 { 801 struct sf_softc *sc = device_get_softc(dev); 802 struct ifnet *ifp = &sc->arpcom.ac_if; 803 804 if (device_is_attached(dev)) { 805 lwkt_serialize_enter(ifp->if_serializer); 806 sf_stop(sc); 807 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 808 lwkt_serialize_exit(ifp->if_serializer); 809 810 ether_ifdetach(ifp); 811 } 812 813 if (sc->sf_miibus) 814 device_delete_child(dev, sc->sf_miibus); 815 bus_generic_detach(dev); 816 817 if (sc->sf_irq) 818 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 819 if(sc->sf_res) 820 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 821 822 if (sc->sf_ldata) { 823 contigfree(sc->sf_ldata, sizeof(struct sf_list_data), 824 M_DEVBUF); 825 } 826 827 return(0); 828 } 829 830 static int 831 sf_init_rx_ring(struct sf_softc *sc) 832 { 833 struct sf_list_data *ld; 834 int i; 835 836 ld = sc->sf_ldata; 837 838 bzero((char *)ld->sf_rx_dlist_big, 839 sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT); 840 bzero((char *)ld->sf_rx_clist, 841 sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT); 842 843 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 844 if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS) 845 return(ENOBUFS); 846 } 847 848 return(0); 849 } 850 851 static void 852 sf_init_tx_ring(struct sf_softc *sc) 853 { 854 struct sf_list_data *ld; 855 int i; 856 857 ld = sc->sf_ldata; 858 859 bzero((char *)ld->sf_tx_dlist, 860 sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT); 861 bzero((char *)ld->sf_tx_clist, 862 sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT); 863 864 for (i = 0; i < SF_TX_DLIST_CNT; i++) 865 ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID; 866 for (i = 0; i < SF_TX_CLIST_CNT; i++) 867 ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX; 868 869 ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1; 870 sc->sf_tx_cnt = 0; 871 872 return; 873 } 874 875 static int 876 sf_newbuf(struct sf_softc *sc, struct sf_rx_bufdesc_type0 *c, 877 struct mbuf *m) 878 { 879 struct mbuf *m_new = NULL; 880 881 if (m == NULL) { 882 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 883 if (m_new == NULL) 884 return(ENOBUFS); 885 886 MCLGET(m_new, MB_DONTWAIT); 887 if (!(m_new->m_flags & M_EXT)) { 888 m_freem(m_new); 889 return(ENOBUFS); 890 } 891 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 892 } else { 893 m_new = m; 894 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 895 m_new->m_data = m_new->m_ext.ext_buf; 896 } 897 898 m_adj(m_new, sizeof(u_int64_t)); 899 900 c->sf_mbuf = m_new; 901 c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t))); 902 c->sf_valid = 1; 903 904 return(0); 905 } 906 907 /* 908 * The starfire is programmed to use 'normal' mode for packet reception, 909 * which means we use the consumer/producer model for both the buffer 910 * descriptor queue and the completion descriptor queue. The only problem 911 * with this is that it involves a lot of register accesses: we have to 912 * read the RX completion consumer and producer indexes and the RX buffer 913 * producer index, plus the RX completion consumer and RX buffer producer 914 * indexes have to be updated. It would have been easier if Adaptec had 915 * put each index in a separate register, especially given that the damn 916 * NIC has a 512K register space. 917 * 918 * In spite of all the lovely features that Adaptec crammed into the 6915, 919 * it is marred by one truly stupid design flaw, which is that receive 920 * buffer addresses must be aligned on a longword boundary. This forces 921 * the packet payload to be unaligned, which is suboptimal on the x86 and 922 * completely unuseable on the Alpha. Our only recourse is to copy received 923 * packets into properly aligned buffers before handing them off. 924 */ 925 926 static void 927 sf_rxeof(struct sf_softc *sc) 928 { 929 struct mbuf *m; 930 struct ifnet *ifp; 931 struct sf_rx_bufdesc_type0 *desc; 932 struct sf_rx_cmpdesc_type3 *cur_rx; 933 u_int32_t rxcons, rxprod; 934 int cmpprodidx, cmpconsidx, bufprodidx; 935 936 ifp = &sc->arpcom.ac_if; 937 938 rxcons = csr_read_4(sc, SF_CQ_CONSIDX); 939 rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1); 940 cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX)); 941 cmpconsidx = SF_IDX_LO(rxcons); 942 bufprodidx = SF_IDX_LO(rxprod); 943 944 while (cmpconsidx != cmpprodidx) { 945 struct mbuf *m0; 946 947 cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; 948 desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; 949 m = desc->sf_mbuf; 950 SF_INC(cmpconsidx, SF_RX_CLIST_CNT); 951 SF_INC(bufprodidx, SF_RX_DLIST_CNT); 952 953 if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) { 954 ifp->if_ierrors++; 955 sf_newbuf(sc, desc, m); 956 continue; 957 } 958 959 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 960 cur_rx->sf_len + ETHER_ALIGN, 0, ifp, NULL); 961 sf_newbuf(sc, desc, m); 962 if (m0 == NULL) { 963 ifp->if_ierrors++; 964 continue; 965 } 966 m_adj(m0, ETHER_ALIGN); 967 m = m0; 968 969 ifp->if_ipackets++; 970 971 ifp->if_input(ifp, m); 972 } 973 974 csr_write_4(sc, SF_CQ_CONSIDX, 975 (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx); 976 csr_write_4(sc, SF_RXDQ_PTR_Q1, 977 (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx); 978 979 return; 980 } 981 982 /* 983 * Read the transmit status from the completion queue and release 984 * mbufs. Note that the buffer descriptor index in the completion 985 * descriptor is an offset from the start of the transmit buffer 986 * descriptor list in bytes. This is important because the manual 987 * gives the impression that it should match the producer/consumer 988 * index, which is the offset in 8 byte blocks. 989 */ 990 static void 991 sf_txeof(struct sf_softc *sc) 992 { 993 int txcons, cmpprodidx, cmpconsidx; 994 struct sf_tx_cmpdesc_type1 *cur_cmp; 995 struct sf_tx_bufdesc_type0 *cur_tx; 996 struct ifnet *ifp; 997 998 ifp = &sc->arpcom.ac_if; 999 1000 txcons = csr_read_4(sc, SF_CQ_CONSIDX); 1001 cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX)); 1002 cmpconsidx = SF_IDX_HI(txcons); 1003 1004 while (cmpconsidx != cmpprodidx) { 1005 cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx]; 1006 cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7]; 1007 1008 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK) 1009 ifp->if_opackets++; 1010 else { 1011 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_UNDERRUN) 1012 sf_txthresh_adjust(sc); 1013 ifp->if_oerrors++; 1014 } 1015 1016 sc->sf_tx_cnt--; 1017 if (cur_tx->sf_mbuf != NULL) { 1018 m_freem(cur_tx->sf_mbuf); 1019 cur_tx->sf_mbuf = NULL; 1020 } else 1021 break; 1022 SF_INC(cmpconsidx, SF_TX_CLIST_CNT); 1023 } 1024 1025 ifp->if_timer = 0; 1026 ifp->if_flags &= ~IFF_OACTIVE; 1027 1028 csr_write_4(sc, SF_CQ_CONSIDX, 1029 (txcons & ~SF_CQ_CONSIDX_TXQ) | 1030 ((cmpconsidx << 16) & 0xFFFF0000)); 1031 1032 return; 1033 } 1034 1035 static void 1036 sf_txthresh_adjust(struct sf_softc *sc) 1037 { 1038 u_int32_t txfctl; 1039 u_int8_t txthresh; 1040 1041 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1042 txthresh = txfctl & SF_TXFRMCTL_TXTHRESH; 1043 if (txthresh < 0xFF) { 1044 txthresh++; 1045 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1046 txfctl |= txthresh; 1047 #ifdef DIAGNOSTIC 1048 kprintf("sf%d: tx underrun, increasing " 1049 "tx threshold to %d bytes\n", 1050 sc->sf_unit, txthresh * 4); 1051 #endif 1052 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1053 } 1054 1055 return; 1056 } 1057 1058 static void 1059 sf_intr(void *arg) 1060 { 1061 struct sf_softc *sc; 1062 struct ifnet *ifp; 1063 u_int32_t status; 1064 1065 sc = arg; 1066 ifp = &sc->arpcom.ac_if; 1067 1068 if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) 1069 return; 1070 1071 /* Disable interrupts. */ 1072 csr_write_4(sc, SF_IMR, 0x00000000); 1073 1074 for (;;) { 1075 status = csr_read_4(sc, SF_ISR); 1076 if (status) 1077 csr_write_4(sc, SF_ISR, status); 1078 1079 if (!(status & SF_INTRS)) 1080 break; 1081 1082 if (status & SF_ISR_RXDQ1_DMADONE) 1083 sf_rxeof(sc); 1084 1085 if (status & SF_ISR_TX_TXDONE || 1086 status & SF_ISR_TX_DMADONE || 1087 status & SF_ISR_TX_QUEUEDONE) 1088 sf_txeof(sc); 1089 1090 if (status & SF_ISR_TX_LOFIFO) 1091 sf_txthresh_adjust(sc); 1092 1093 if (status & SF_ISR_ABNORMALINTR) { 1094 if (status & SF_ISR_STATSOFLOW) { 1095 callout_stop(&sc->sf_stat_timer); 1096 sf_stats_update(sc); 1097 } else 1098 sf_init(sc); 1099 } 1100 } 1101 1102 /* Re-enable interrupts. */ 1103 csr_write_4(sc, SF_IMR, SF_INTRS); 1104 1105 if (!ifq_is_empty(&ifp->if_snd)) 1106 sf_start(ifp); 1107 1108 return; 1109 } 1110 1111 static void 1112 sf_init(void *xsc) 1113 { 1114 struct sf_softc *sc = xsc; 1115 struct ifnet *ifp = &sc->arpcom.ac_if; 1116 int i; 1117 1118 sf_stop(sc); 1119 sf_reset(sc); 1120 1121 /* Init all the receive filter registers */ 1122 for (i = SF_RXFILT_PERFECT_BASE; 1123 i < (SF_RXFILT_HASH_MAX + 1); i += 4) 1124 csr_write_4(sc, i, 0); 1125 1126 /* Empty stats counter registers. */ 1127 for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++) 1128 csr_write_4(sc, SF_STATS_BASE + 1129 (i + sizeof(u_int32_t)), 0); 1130 1131 /* Init our MAC address */ 1132 csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1133 csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1134 sf_setperf(sc, 0, (caddr_t)&sc->arpcom.ac_enaddr); 1135 1136 if (sf_init_rx_ring(sc) == ENOBUFS) { 1137 kprintf("sf%d: initialization failed: no " 1138 "memory for rx buffers\n", sc->sf_unit); 1139 return; 1140 } 1141 1142 sf_init_tx_ring(sc); 1143 1144 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN); 1145 1146 /* If we want promiscuous mode, set the allframes bit. */ 1147 if (ifp->if_flags & IFF_PROMISC) { 1148 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 1149 } else { 1150 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 1151 } 1152 1153 if (ifp->if_flags & IFF_BROADCAST) { 1154 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); 1155 } else { 1156 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); 1157 } 1158 1159 /* 1160 * Load the multicast filter. 1161 */ 1162 sf_setmulti(sc); 1163 1164 /* Init the completion queue indexes */ 1165 csr_write_4(sc, SF_CQ_CONSIDX, 0); 1166 csr_write_4(sc, SF_CQ_PRODIDX, 0); 1167 1168 /* Init the RX completion queue */ 1169 csr_write_4(sc, SF_RXCQ_CTL_1, 1170 vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR); 1171 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3); 1172 1173 /* Init RX DMA control. */ 1174 SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS); 1175 1176 /* Init the RX buffer descriptor queue. */ 1177 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 1178 vtophys(sc->sf_ldata->sf_rx_dlist_big)); 1179 csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES); 1180 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 1181 1182 /* Init the TX completion queue */ 1183 csr_write_4(sc, SF_TXCQ_CTL, 1184 vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR); 1185 1186 /* Init the TX buffer descriptor queue. */ 1187 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 1188 vtophys(sc->sf_ldata->sf_tx_dlist)); 1189 SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX); 1190 csr_write_4(sc, SF_TXDQ_CTL, 1191 SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES); 1192 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP); 1193 1194 /* Enable autopadding of short TX frames. */ 1195 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 1196 1197 /* Enable interrupts. */ 1198 csr_write_4(sc, SF_IMR, SF_INTRS); 1199 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 1200 1201 /* Enable the RX and TX engines. */ 1202 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB); 1203 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB); 1204 1205 /*mii_mediachg(mii);*/ 1206 sf_ifmedia_upd(ifp); 1207 1208 ifp->if_flags |= IFF_RUNNING; 1209 ifp->if_flags &= ~IFF_OACTIVE; 1210 1211 callout_reset(&sc->sf_stat_timer, hz, sf_stats_update, sc); 1212 } 1213 1214 static int 1215 sf_encap(struct sf_softc *sc, struct sf_tx_bufdesc_type0 *c, 1216 struct mbuf *m_head) 1217 { 1218 int frag = 0; 1219 struct sf_frag *f = NULL; 1220 struct mbuf *m; 1221 1222 m = m_head; 1223 1224 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1225 if (m->m_len != 0) { 1226 if (frag == SF_MAXFRAGS) 1227 break; 1228 f = &c->sf_frags[frag]; 1229 if (frag == 0) 1230 f->sf_pktlen = m_head->m_pkthdr.len; 1231 f->sf_fraglen = m->m_len; 1232 f->sf_addr = vtophys(mtod(m, vm_offset_t)); 1233 frag++; 1234 } 1235 } 1236 1237 if (m != NULL) { 1238 struct mbuf *m_new = NULL; 1239 1240 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 1241 if (m_new == NULL) { 1242 kprintf("sf%d: no memory for tx list", sc->sf_unit); 1243 return(1); 1244 } 1245 1246 if (m_head->m_pkthdr.len > MHLEN) { 1247 MCLGET(m_new, MB_DONTWAIT); 1248 if (!(m_new->m_flags & M_EXT)) { 1249 m_freem(m_new); 1250 kprintf("sf%d: no memory for tx list", 1251 sc->sf_unit); 1252 return(1); 1253 } 1254 } 1255 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1256 mtod(m_new, caddr_t)); 1257 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1258 m_freem(m_head); 1259 m_head = m_new; 1260 f = &c->sf_frags[0]; 1261 f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len; 1262 f->sf_addr = vtophys(mtod(m_head, caddr_t)); 1263 frag = 1; 1264 } 1265 1266 c->sf_mbuf = m_head; 1267 c->sf_id = SF_TX_BUFDESC_ID; 1268 c->sf_fragcnt = frag; 1269 c->sf_intr = 1; 1270 c->sf_caltcp = 0; 1271 c->sf_crcen = 1; 1272 1273 return(0); 1274 } 1275 1276 static void 1277 sf_start(struct ifnet *ifp) 1278 { 1279 struct sf_softc *sc; 1280 struct sf_tx_bufdesc_type0 *cur_tx = NULL; 1281 struct mbuf *m_head = NULL; 1282 int i, txprod; 1283 1284 sc = ifp->if_softc; 1285 1286 if (!sc->sf_link) 1287 return; 1288 1289 if (ifp->if_flags & IFF_OACTIVE) 1290 return; 1291 1292 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); 1293 i = SF_IDX_HI(txprod) >> 4; 1294 1295 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { 1296 kprintf("sf%d: TX ring full, resetting\n", sc->sf_unit); 1297 sf_init(sc); 1298 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); 1299 i = SF_IDX_HI(txprod) >> 4; 1300 } 1301 1302 while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) { 1303 if (sc->sf_tx_cnt >= (SF_TX_DLIST_CNT - 5)) { 1304 ifp->if_flags |= IFF_OACTIVE; 1305 cur_tx = NULL; 1306 break; 1307 } 1308 m_head = ifq_poll(&ifp->if_snd); 1309 if (m_head == NULL) 1310 break; 1311 1312 cur_tx = &sc->sf_ldata->sf_tx_dlist[i]; 1313 if (sf_encap(sc, cur_tx, m_head)) { 1314 ifp->if_flags |= IFF_OACTIVE; 1315 cur_tx = NULL; 1316 break; 1317 } 1318 ifq_dequeue(&ifp->if_snd, m_head); 1319 BPF_MTAP(ifp, cur_tx->sf_mbuf); 1320 1321 SF_INC(i, SF_TX_DLIST_CNT); 1322 sc->sf_tx_cnt++; 1323 /* 1324 * Don't get the TX DMA queue get too full. 1325 */ 1326 if (sc->sf_tx_cnt > 64) 1327 break; 1328 } 1329 1330 if (cur_tx == NULL) 1331 return; 1332 1333 /* Transmit */ 1334 csr_write_4(sc, SF_TXDQ_PRODIDX, 1335 (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) | 1336 ((i << 20) & 0xFFFF0000)); 1337 1338 ifp->if_timer = 5; 1339 1340 return; 1341 } 1342 1343 static void 1344 sf_stop(struct sf_softc *sc) 1345 { 1346 int i; 1347 struct ifnet *ifp; 1348 1349 ifp = &sc->arpcom.ac_if; 1350 1351 callout_stop(&sc->sf_stat_timer); 1352 1353 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 1354 csr_write_4(sc, SF_CQ_CONSIDX, 0); 1355 csr_write_4(sc, SF_CQ_PRODIDX, 0); 1356 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 1357 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 1358 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 1359 csr_write_4(sc, SF_TXCQ_CTL, 0); 1360 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 1361 csr_write_4(sc, SF_TXDQ_CTL, 0); 1362 sf_reset(sc); 1363 1364 sc->sf_link = 0; 1365 1366 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1367 if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) { 1368 m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf); 1369 sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL; 1370 } 1371 } 1372 1373 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1374 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { 1375 m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf); 1376 sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL; 1377 } 1378 } 1379 1380 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 1381 1382 return; 1383 } 1384 1385 /* 1386 * Note: it is important that this function not be interrupted. We 1387 * use a two-stage register access scheme: if we are interrupted in 1388 * between setting the indirect address register and reading from the 1389 * indirect data register, the contents of the address register could 1390 * be changed out from under us. 1391 */ 1392 static void 1393 sf_stats_update(void *xsc) 1394 { 1395 struct sf_softc *sc = xsc; 1396 struct ifnet *ifp = &sc->arpcom.ac_if; 1397 struct mii_data *mii = device_get_softc(sc->sf_miibus); 1398 struct sf_stats stats; 1399 u_int32_t *ptr; 1400 int i; 1401 1402 lwkt_serialize_enter(ifp->if_serializer); 1403 1404 ptr = (u_int32_t *)&stats; 1405 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) 1406 ptr[i] = csr_read_4(sc, SF_STATS_BASE + 1407 (i + sizeof(u_int32_t))); 1408 1409 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) 1410 csr_write_4(sc, SF_STATS_BASE + 1411 (i + sizeof(u_int32_t)), 0); 1412 1413 ifp->if_collisions += stats.sf_tx_single_colls + 1414 stats.sf_tx_multi_colls + stats.sf_tx_excess_colls; 1415 1416 mii_tick(mii); 1417 if (!sc->sf_link) { 1418 mii_pollstat(mii); 1419 if (mii->mii_media_status & IFM_ACTIVE && 1420 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1421 sc->sf_link++; 1422 if (!ifq_is_empty(&ifp->if_snd)) 1423 sf_start(ifp); 1424 } 1425 1426 callout_reset(&sc->sf_stat_timer, hz, sf_stats_update, sc); 1427 1428 lwkt_serialize_exit(ifp->if_serializer); 1429 } 1430 1431 static void 1432 sf_watchdog(struct ifnet *ifp) 1433 { 1434 struct sf_softc *sc; 1435 1436 sc = ifp->if_softc; 1437 1438 ifp->if_oerrors++; 1439 kprintf("sf%d: watchdog timeout\n", sc->sf_unit); 1440 1441 sf_stop(sc); 1442 sf_reset(sc); 1443 sf_init(sc); 1444 1445 if (!ifq_is_empty(&ifp->if_snd)) 1446 sf_start(ifp); 1447 1448 return; 1449 } 1450 1451 static void 1452 sf_shutdown(device_t dev) 1453 { 1454 struct sf_softc *sc; 1455 struct ifnet *ifp; 1456 1457 sc = device_get_softc(dev); 1458 ifp = &sc->arpcom.ac_if; 1459 lwkt_serialize_enter(ifp->if_serializer); 1460 sf_stop(sc); 1461 lwkt_serialize_exit(ifp->if_serializer); 1462 1463 return; 1464 } 1465