1 /* $OpenBSD: xl.c,v 1.112 2013/12/28 03:35:01 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $ 35 */ 36 37 /* 38 * 3Com 3c90x Etherlink XL PCI NIC driver 39 * 40 * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI 41 * bus-master chips (3c90x cards and embedded controllers) including 42 * the following: 43 * 44 * 3Com 3c900-TPO 10Mbps/RJ-45 45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC 46 * 3Com 3c905-TX 10/100Mbps/RJ-45 47 * 3Com 3c905-T4 10/100Mbps/RJ-45 48 * 3Com 3c900B-TPO 10Mbps/RJ-45 49 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC 50 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC 51 * 3Com 3c900B-FL 10Mbps/Fiber-optic 52 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC 53 * 3Com 3c905B-TX 10/100Mbps/RJ-45 54 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic 55 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) 56 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) 57 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) 58 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) 59 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) 60 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) 61 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 62 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 63 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 64 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 65 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 66 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 67 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 68 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) 69 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 70 * Dell on-board 3c920 10/100Mbps/RJ-45 71 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 72 * Dell Latitude laptop docking station embedded 3c905-TX 73 * 74 * Written by Bill Paul <wpaul@ctr.columbia.edu> 75 * Electrical Engineering Department 76 * Columbia University, New York City 77 */ 78 79 /* 80 * The 3c90x series chips use a bus-master DMA interface for transferring 81 * packets to and from the controller chip. Some of the "vortex" cards 82 * (3c59x) also supported a bus master mode, however for those chips 83 * you could only DMA packets to/from a contiguous memory buffer. For 84 * transmission this would mean copying the contents of the queued mbuf 85 * chain into an mbuf cluster and then DMAing the cluster. This extra 86 * copy would sort of defeat the purpose of the bus master support for 87 * any packet that doesn't fit into a single mbuf. 88 * 89 * By contrast, the 3c90x cards support a fragment-based bus master 90 * mode where mbuf chains can be encapsulated using TX descriptors. 91 * This is similar to other PCI chips such as the Texas Instruments 92 * ThunderLAN and the Intel 82557/82558. 93 * 94 * The "vortex" driver (if_vx.c) happens to work for the "boomerang" 95 * bus master chips because they maintain the old PIO interface for 96 * backwards compatibility, but starting with the 3c905B and the 97 * "cyclone" chips, the compatibility interface has been dropped. 98 * Since using bus master DMA is a big win, we use this driver to 99 * support the PCI "boomerang" chips even though they work with the 100 * "vortex" driver in order to obtain better performance. 101 */ 102 103 #include "bpfilter.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/mbuf.h> 108 #include <sys/protosw.h> 109 #include <sys/socket.h> 110 #include <sys/ioctl.h> 111 #include <sys/errno.h> 112 #include <sys/malloc.h> 113 #include <sys/kernel.h> 114 #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */ 115 #include <sys/device.h> 116 117 #include <net/if.h> 118 #include <net/if_dl.h> 119 #include <net/if_types.h> 120 #include <net/if_media.h> 121 122 #ifdef INET 123 #include <netinet/in.h> 124 #include <netinet/in_systm.h> 125 #include <netinet/ip.h> 126 #include <netinet/if_ether.h> 127 #endif 128 129 #include <dev/mii/mii.h> 130 #include <dev/mii/miivar.h> 131 132 #include <machine/bus.h> 133 134 #if NBPFILTER > 0 135 #include <net/bpf.h> 136 #endif 137 138 #include <dev/ic/xlreg.h> 139 140 /* 141 * TX Checksumming is disabled by default for two reasons: 142 * - TX Checksumming will occasionally produce corrupt packets 143 * - TX Checksumming seems to reduce performance 144 * 145 * Only 905B/C cards were reported to have this problem, it is possible 146 * that later chips _may_ be immune. 147 */ 148 #define XL905B_TXCSUM_BROKEN 1 149 150 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); 151 void xl_stats_update(void *); 152 int xl_encap(struct xl_softc *, struct xl_chain *, 153 struct mbuf * ); 154 void xl_rxeof(struct xl_softc *); 155 void xl_txeof(struct xl_softc *); 156 void xl_txeof_90xB(struct xl_softc *); 157 void xl_txeoc(struct xl_softc *); 158 int xl_intr(void *); 159 void xl_start(struct ifnet *); 160 void xl_start_90xB(struct ifnet *); 161 int xl_ioctl(struct ifnet *, u_long, caddr_t); 162 void xl_freetxrx(struct xl_softc *); 163 void xl_watchdog(struct ifnet *); 164 int xl_ifmedia_upd(struct ifnet *); 165 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 166 167 int xl_eeprom_wait(struct xl_softc *); 168 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); 169 void xl_mii_sync(struct xl_softc *); 170 void xl_mii_send(struct xl_softc *, u_int32_t, int); 171 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); 172 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); 173 174 void xl_setcfg(struct xl_softc *); 175 void xl_setmode(struct xl_softc *, int); 176 void xl_iff(struct xl_softc *); 177 void xl_iff_90x(struct xl_softc *); 178 void xl_iff_905b(struct xl_softc *); 179 int xl_list_rx_init(struct xl_softc *); 180 void xl_fill_rx_ring(struct xl_softc *); 181 int xl_list_tx_init(struct xl_softc *); 182 int xl_list_tx_init_90xB(struct xl_softc *); 183 void xl_wait(struct xl_softc *); 184 void xl_mediacheck(struct xl_softc *); 185 void xl_choose_xcvr(struct xl_softc *, int); 186 #ifdef notdef 187 void xl_testpacket(struct xl_softc *); 188 #endif 189 190 int xl_miibus_readreg(struct device *, int, int); 191 void xl_miibus_writereg(struct device *, int, int, int); 192 void xl_miibus_statchg(struct device *); 193 #ifndef SMALL_KERNEL 194 int xl_wol(struct ifnet *, int); 195 void xl_wol_power(struct xl_softc *); 196 #endif 197 198 int 199 xl_activate(struct device *self, int act) 200 { 201 struct xl_softc *sc = (struct xl_softc *)self; 202 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 203 int rv = 0; 204 205 switch (act) { 206 case DVACT_SUSPEND: 207 if (ifp->if_flags & IFF_RUNNING) { 208 xl_reset(sc); 209 xl_stop(sc); 210 } 211 rv = config_activate_children(self, act); 212 break; 213 case DVACT_RESUME: 214 xl_reset(sc); 215 if (ifp->if_flags & IFF_UP) 216 xl_init(sc); 217 break; 218 case DVACT_POWERDOWN: 219 rv = config_activate_children(self, act); 220 #ifndef SMALL_KERNEL 221 xl_wol_power(sc); 222 #endif 223 break; 224 default: 225 rv = config_activate_children(self, act); 226 break; 227 } 228 return (rv); 229 } 230 231 /* 232 * Murphy's law says that it's possible the chip can wedge and 233 * the 'command in progress' bit may never clear. Hence, we wait 234 * only a finite amount of time to avoid getting caught in an 235 * infinite loop. Normally this delay routine would be a macro, 236 * but it isn't called during normal operation so we can afford 237 * to make it a function. 238 */ 239 void 240 xl_wait(struct xl_softc *sc) 241 { 242 int i; 243 244 for (i = 0; i < XL_TIMEOUT; i++) { 245 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 246 break; 247 } 248 249 if (i == XL_TIMEOUT) 250 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 251 } 252 253 /* 254 * MII access routines are provided for adapters with external 255 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in 256 * autoneg logic that's faked up to look like a PHY (3c905B-TX). 257 * Note: if you don't perform the MDIO operations just right, 258 * it's possible to end up with code that works correctly with 259 * some chips/CPUs/processor speeds/bus speeds/etc but not 260 * with others. 261 */ 262 #define MII_SET(x) \ 263 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 264 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x)) 265 266 #define MII_CLR(x) \ 267 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 268 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x)) 269 270 /* 271 * Sync the PHYs by setting data bit and strobing the clock 32 times. 272 */ 273 void 274 xl_mii_sync(struct xl_softc *sc) 275 { 276 int i; 277 278 XL_SEL_WIN(4); 279 MII_SET(XL_MII_DIR|XL_MII_DATA); 280 281 for (i = 0; i < 32; i++) { 282 MII_SET(XL_MII_CLK); 283 MII_SET(XL_MII_DATA); 284 MII_SET(XL_MII_DATA); 285 MII_CLR(XL_MII_CLK); 286 MII_SET(XL_MII_DATA); 287 MII_SET(XL_MII_DATA); 288 } 289 } 290 291 /* 292 * Clock a series of bits through the MII. 293 */ 294 void 295 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt) 296 { 297 int i; 298 299 XL_SEL_WIN(4); 300 MII_CLR(XL_MII_CLK); 301 302 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 303 if (bits & i) { 304 MII_SET(XL_MII_DATA); 305 } else { 306 MII_CLR(XL_MII_DATA); 307 } 308 MII_CLR(XL_MII_CLK); 309 MII_SET(XL_MII_CLK); 310 } 311 } 312 313 /* 314 * Read an PHY register through the MII. 315 */ 316 int 317 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame) 318 { 319 int i, ack, s; 320 321 s = splnet(); 322 323 /* 324 * Set up frame for RX. 325 */ 326 frame->mii_stdelim = XL_MII_STARTDELIM; 327 frame->mii_opcode = XL_MII_READOP; 328 frame->mii_turnaround = 0; 329 frame->mii_data = 0; 330 331 /* 332 * Select register window 4. 333 */ 334 335 XL_SEL_WIN(4); 336 337 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); 338 /* 339 * Turn on data xmit. 340 */ 341 MII_SET(XL_MII_DIR); 342 343 xl_mii_sync(sc); 344 345 /* 346 * Send command/address info. 347 */ 348 xl_mii_send(sc, frame->mii_stdelim, 2); 349 xl_mii_send(sc, frame->mii_opcode, 2); 350 xl_mii_send(sc, frame->mii_phyaddr, 5); 351 xl_mii_send(sc, frame->mii_regaddr, 5); 352 353 /* Idle bit */ 354 MII_CLR((XL_MII_CLK|XL_MII_DATA)); 355 MII_SET(XL_MII_CLK); 356 357 /* Turn off xmit. */ 358 MII_CLR(XL_MII_DIR); 359 360 /* Check for ack */ 361 MII_CLR(XL_MII_CLK); 362 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; 363 MII_SET(XL_MII_CLK); 364 365 /* 366 * Now try reading data bits. If the ack failed, we still 367 * need to clock through 16 cycles to keep the PHY(s) in sync. 368 */ 369 if (ack) { 370 for(i = 0; i < 16; i++) { 371 MII_CLR(XL_MII_CLK); 372 MII_SET(XL_MII_CLK); 373 } 374 goto fail; 375 } 376 377 for (i = 0x8000; i; i >>= 1) { 378 MII_CLR(XL_MII_CLK); 379 if (!ack) { 380 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) 381 frame->mii_data |= i; 382 } 383 MII_SET(XL_MII_CLK); 384 } 385 386 fail: 387 388 MII_CLR(XL_MII_CLK); 389 MII_SET(XL_MII_CLK); 390 391 splx(s); 392 393 if (ack) 394 return (1); 395 return (0); 396 } 397 398 /* 399 * Write to a PHY register through the MII. 400 */ 401 int 402 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame) 403 { 404 int s; 405 406 s = splnet(); 407 408 /* 409 * Set up frame for TX. 410 */ 411 412 frame->mii_stdelim = XL_MII_STARTDELIM; 413 frame->mii_opcode = XL_MII_WRITEOP; 414 frame->mii_turnaround = XL_MII_TURNAROUND; 415 416 /* 417 * Select the window 4. 418 */ 419 XL_SEL_WIN(4); 420 421 /* 422 * Turn on data output. 423 */ 424 MII_SET(XL_MII_DIR); 425 426 xl_mii_sync(sc); 427 428 xl_mii_send(sc, frame->mii_stdelim, 2); 429 xl_mii_send(sc, frame->mii_opcode, 2); 430 xl_mii_send(sc, frame->mii_phyaddr, 5); 431 xl_mii_send(sc, frame->mii_regaddr, 5); 432 xl_mii_send(sc, frame->mii_turnaround, 2); 433 xl_mii_send(sc, frame->mii_data, 16); 434 435 /* Idle bit. */ 436 MII_SET(XL_MII_CLK); 437 MII_CLR(XL_MII_CLK); 438 439 /* 440 * Turn off xmit. 441 */ 442 MII_CLR(XL_MII_DIR); 443 444 splx(s); 445 446 return (0); 447 } 448 449 int 450 xl_miibus_readreg(struct device *self, int phy, int reg) 451 { 452 struct xl_softc *sc = (struct xl_softc *)self; 453 struct xl_mii_frame frame; 454 455 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 456 return (0); 457 458 bzero(&frame, sizeof(frame)); 459 460 frame.mii_phyaddr = phy; 461 frame.mii_regaddr = reg; 462 xl_mii_readreg(sc, &frame); 463 464 return (frame.mii_data); 465 } 466 467 void 468 xl_miibus_writereg(struct device *self, int phy, int reg, int data) 469 { 470 struct xl_softc *sc = (struct xl_softc *)self; 471 struct xl_mii_frame frame; 472 473 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 474 return; 475 476 bzero(&frame, sizeof(frame)); 477 478 frame.mii_phyaddr = phy; 479 frame.mii_regaddr = reg; 480 frame.mii_data = data; 481 482 xl_mii_writereg(sc, &frame); 483 } 484 485 void 486 xl_miibus_statchg(struct device *self) 487 { 488 struct xl_softc *sc = (struct xl_softc *)self; 489 490 xl_setcfg(sc); 491 492 /* Set ASIC's duplex mode to match the PHY. */ 493 XL_SEL_WIN(3); 494 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 495 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 496 else 497 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 498 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 499 } 500 501 /* 502 * The EEPROM is slow: give it time to come ready after issuing 503 * it a command. 504 */ 505 int 506 xl_eeprom_wait(struct xl_softc *sc) 507 { 508 int i; 509 510 for (i = 0; i < 100; i++) { 511 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) 512 DELAY(162); 513 else 514 break; 515 } 516 517 if (i == 100) { 518 printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname); 519 return (1); 520 } 521 522 return (0); 523 } 524 525 /* 526 * Read a sequence of words from the EEPROM. Note that ethernet address 527 * data is stored in the EEPROM in network byte order. 528 */ 529 int 530 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) 531 { 532 int err = 0, i; 533 u_int16_t word = 0, *ptr; 534 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) 535 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) 536 /* WARNING! DANGER! 537 * It's easy to accidentally overwrite the rom content! 538 * Note: the 3c575 uses 8bit EEPROM offsets. 539 */ 540 XL_SEL_WIN(0); 541 542 if (xl_eeprom_wait(sc)) 543 return (1); 544 545 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) 546 off += 0x30; 547 548 for (i = 0; i < cnt; i++) { 549 if (sc->xl_flags & XL_FLAG_8BITROM) 550 CSR_WRITE_2(sc, XL_W0_EE_CMD, 551 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); 552 else 553 CSR_WRITE_2(sc, XL_W0_EE_CMD, 554 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); 555 err = xl_eeprom_wait(sc); 556 if (err) 557 break; 558 word = CSR_READ_2(sc, XL_W0_EE_DATA); 559 ptr = (u_int16_t *)(dest + (i * 2)); 560 if (swap) 561 *ptr = ntohs(word); 562 else 563 *ptr = word; 564 } 565 566 return (err ? 1 : 0); 567 } 568 569 void 570 xl_iff(struct xl_softc *sc) 571 { 572 if (sc->xl_type == XL_TYPE_905B) 573 xl_iff_905b(sc); 574 else 575 xl_iff_90x(sc); 576 } 577 578 /* 579 * NICs older than the 3c905B have only one multicast option, which 580 * is to enable reception of all multicast frames. 581 */ 582 void 583 xl_iff_90x(struct xl_softc *sc) 584 { 585 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 586 struct arpcom *ac = &sc->sc_arpcom; 587 u_int8_t rxfilt; 588 589 XL_SEL_WIN(5); 590 591 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 592 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 593 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); 594 ifp->if_flags &= ~IFF_ALLMULTI; 595 596 /* 597 * Always accept broadcast frames. 598 * Always accept frames destined to our station address. 599 */ 600 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 601 602 if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) { 603 ifp->if_flags |= IFF_ALLMULTI; 604 if (ifp->if_flags & IFF_PROMISC) 605 rxfilt |= XL_RXFILTER_ALLFRAMES; 606 else 607 rxfilt |= XL_RXFILTER_ALLMULTI; 608 } 609 610 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 611 612 XL_SEL_WIN(7); 613 } 614 615 /* 616 * 3c905B adapters have a hash filter that we can program. 617 */ 618 void 619 xl_iff_905b(struct xl_softc *sc) 620 { 621 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 622 struct arpcom *ac = &sc->sc_arpcom; 623 int h = 0, i; 624 struct ether_multi *enm; 625 struct ether_multistep step; 626 u_int8_t rxfilt; 627 628 XL_SEL_WIN(5); 629 630 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 631 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 632 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | 633 XL_RXFILTER_MULTIHASH); 634 ifp->if_flags &= ~IFF_ALLMULTI; 635 636 /* 637 * Always accept broadcast frames. 638 * Always accept frames destined to our station address. 639 */ 640 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 641 642 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 643 ifp->if_flags |= IFF_ALLMULTI; 644 if (ifp->if_flags & IFF_PROMISC) 645 rxfilt |= XL_RXFILTER_ALLFRAMES; 646 else 647 rxfilt |= XL_RXFILTER_ALLMULTI; 648 } else { 649 rxfilt |= XL_RXFILTER_MULTIHASH; 650 651 /* first, zot all the existing hash bits */ 652 for (i = 0; i < XL_HASHFILT_SIZE; i++) 653 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); 654 655 /* now program new ones */ 656 ETHER_FIRST_MULTI(step, ac, enm); 657 while (enm != NULL) { 658 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 659 0x000000FF; 660 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | 661 XL_HASH_SET | h); 662 663 ETHER_NEXT_MULTI(step, enm); 664 } 665 } 666 667 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 668 669 XL_SEL_WIN(7); 670 } 671 672 #ifdef notdef 673 void 674 xl_testpacket(struct xl_softc *sc) 675 { 676 struct mbuf *m; 677 struct ifnet *ifp; 678 int error; 679 680 ifp = &sc->sc_arpcom.ac_if; 681 682 MGETHDR(m, M_DONTWAIT, MT_DATA); 683 684 if (m == NULL) 685 return; 686 687 bcopy(&sc->sc_arpcom.ac_enaddr, 688 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); 689 bcopy(&sc->sc_arpcom.ac_enaddr, 690 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); 691 mtod(m, struct ether_header *)->ether_type = htons(3); 692 mtod(m, unsigned char *)[14] = 0; 693 mtod(m, unsigned char *)[15] = 0; 694 mtod(m, unsigned char *)[16] = 0xE3; 695 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; 696 IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 697 xl_start(ifp); 698 } 699 #endif 700 701 void 702 xl_setcfg(struct xl_softc *sc) 703 { 704 u_int32_t icfg; 705 706 XL_SEL_WIN(3); 707 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 708 icfg &= ~XL_ICFG_CONNECTOR_MASK; 709 if (sc->xl_media & XL_MEDIAOPT_MII || 710 sc->xl_media & XL_MEDIAOPT_BT4) 711 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); 712 if (sc->xl_media & XL_MEDIAOPT_BTX) 713 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); 714 715 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 716 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 717 } 718 719 void 720 xl_setmode(struct xl_softc *sc, int media) 721 { 722 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 723 u_int32_t icfg; 724 u_int16_t mediastat; 725 726 XL_SEL_WIN(4); 727 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 728 XL_SEL_WIN(3); 729 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 730 731 if (sc->xl_media & XL_MEDIAOPT_BT) { 732 if (IFM_SUBTYPE(media) == IFM_10_T) { 733 ifp->if_baudrate = IF_Mbps(10); 734 sc->xl_xcvr = XL_XCVR_10BT; 735 icfg &= ~XL_ICFG_CONNECTOR_MASK; 736 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); 737 mediastat |= XL_MEDIASTAT_LINKBEAT| 738 XL_MEDIASTAT_JABGUARD; 739 mediastat &= ~XL_MEDIASTAT_SQEENB; 740 } 741 } 742 743 if (sc->xl_media & XL_MEDIAOPT_BFX) { 744 if (IFM_SUBTYPE(media) == IFM_100_FX) { 745 ifp->if_baudrate = IF_Mbps(100); 746 sc->xl_xcvr = XL_XCVR_100BFX; 747 icfg &= ~XL_ICFG_CONNECTOR_MASK; 748 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); 749 mediastat |= XL_MEDIASTAT_LINKBEAT; 750 mediastat &= ~XL_MEDIASTAT_SQEENB; 751 } 752 } 753 754 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 755 if (IFM_SUBTYPE(media) == IFM_10_5) { 756 ifp->if_baudrate = IF_Mbps(10); 757 sc->xl_xcvr = XL_XCVR_AUI; 758 icfg &= ~XL_ICFG_CONNECTOR_MASK; 759 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 760 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 761 XL_MEDIASTAT_JABGUARD); 762 mediastat |= ~XL_MEDIASTAT_SQEENB; 763 } 764 if (IFM_SUBTYPE(media) == IFM_10_FL) { 765 ifp->if_baudrate = IF_Mbps(10); 766 sc->xl_xcvr = XL_XCVR_AUI; 767 icfg &= ~XL_ICFG_CONNECTOR_MASK; 768 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 769 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 770 XL_MEDIASTAT_JABGUARD); 771 mediastat |= ~XL_MEDIASTAT_SQEENB; 772 } 773 } 774 775 if (sc->xl_media & XL_MEDIAOPT_BNC) { 776 if (IFM_SUBTYPE(media) == IFM_10_2) { 777 ifp->if_baudrate = IF_Mbps(10); 778 sc->xl_xcvr = XL_XCVR_COAX; 779 icfg &= ~XL_ICFG_CONNECTOR_MASK; 780 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); 781 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 782 XL_MEDIASTAT_JABGUARD| 783 XL_MEDIASTAT_SQEENB); 784 } 785 } 786 787 if ((media & IFM_GMASK) == IFM_FDX || 788 IFM_SUBTYPE(media) == IFM_100_FX) { 789 XL_SEL_WIN(3); 790 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 791 } else { 792 XL_SEL_WIN(3); 793 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 794 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 795 } 796 797 if (IFM_SUBTYPE(media) == IFM_10_2) 798 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 799 else 800 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 801 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 802 XL_SEL_WIN(4); 803 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); 804 DELAY(800); 805 XL_SEL_WIN(7); 806 } 807 808 void 809 xl_reset(struct xl_softc *sc) 810 { 811 int i; 812 813 XL_SEL_WIN(0); 814 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 815 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? 816 XL_RESETOPT_DISADVFD:0)); 817 818 /* 819 * Pause briefly after issuing the reset command before trying 820 * to access any other registers. With my 3c575C cardbus card, 821 * failing to do this results in the system locking up while 822 * trying to poll the command busy bit in the status register. 823 */ 824 DELAY(100000); 825 826 for (i = 0; i < XL_TIMEOUT; i++) { 827 DELAY(10); 828 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 829 break; 830 } 831 832 if (i == XL_TIMEOUT) 833 printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname); 834 835 /* Note: the RX reset takes an absurd amount of time 836 * on newer versions of the Tornado chips such as those 837 * on the 3c905CX and newer 3c908C cards. We wait an 838 * extra amount of time so that xl_wait() doesn't complain 839 * and annoy the users. 840 */ 841 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 842 DELAY(100000); 843 xl_wait(sc); 844 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 845 xl_wait(sc); 846 847 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || 848 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { 849 XL_SEL_WIN(2); 850 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, 851 XL_W2_RESET_OPTIONS) 852 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0) 853 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0) 854 ); 855 } 856 857 /* Wait a little while for the chip to get its brains in order. */ 858 DELAY(100000); 859 } 860 861 /* 862 * This routine is a kludge to work around possible hardware faults 863 * or manufacturing defects that can cause the media options register 864 * (or reset options register, as it's called for the first generation 865 * 3c90x adapters) to return an incorrect result. I have encountered 866 * one Dell Latitude laptop docking station with an integrated 3c905-TX 867 * which doesn't have any of the 'mediaopt' bits set. This screws up 868 * the attach routine pretty badly because it doesn't know what media 869 * to look for. If we find ourselves in this predicament, this routine 870 * will try to guess the media options values and warn the user of a 871 * possible manufacturing defect with his adapter/system/whatever. 872 */ 873 void 874 xl_mediacheck(struct xl_softc *sc) 875 { 876 /* 877 * If some of the media options bits are set, assume they are 878 * correct. If not, try to figure it out down below. 879 * XXX I should check for 10baseFL, but I don't have an adapter 880 * to test with. 881 */ 882 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { 883 /* 884 * Check the XCVR value. If it's not in the normal range 885 * of values, we need to fake it up here. 886 */ 887 if (sc->xl_xcvr <= XL_XCVR_AUTO) 888 return; 889 else { 890 printf("%s: bogus xcvr value " 891 "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr); 892 printf("%s: choosing new default based " 893 "on card type\n", sc->sc_dev.dv_xname); 894 } 895 } else { 896 if (sc->xl_type == XL_TYPE_905B && 897 sc->xl_media & XL_MEDIAOPT_10FL) 898 return; 899 printf("%s: WARNING: no media options bits set in " 900 "the media options register!!\n", sc->sc_dev.dv_xname); 901 printf("%s: this could be a manufacturing defect in " 902 "your adapter or system\n", sc->sc_dev.dv_xname); 903 printf("%s: attempting to guess media type; you " 904 "should probably consult your vendor\n", sc->sc_dev.dv_xname); 905 } 906 907 xl_choose_xcvr(sc, 1); 908 } 909 910 void 911 xl_choose_xcvr(struct xl_softc *sc, int verbose) 912 { 913 u_int16_t devid; 914 915 /* 916 * Read the device ID from the EEPROM. 917 * This is what's loaded into the PCI device ID register, so it has 918 * to be correct otherwise we wouldn't have gotten this far. 919 */ 920 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); 921 922 switch(devid) { 923 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ 924 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ 925 sc->xl_media = XL_MEDIAOPT_BT; 926 sc->xl_xcvr = XL_XCVR_10BT; 927 if (verbose) 928 printf("%s: guessing 10BaseT transceiver\n", 929 sc->sc_dev.dv_xname); 930 break; 931 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ 932 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ 933 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 934 sc->xl_xcvr = XL_XCVR_10BT; 935 if (verbose) 936 printf("%s: guessing COMBO (AUI/BNC/TP)\n", 937 sc->sc_dev.dv_xname); 938 break; 939 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ 940 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; 941 sc->xl_xcvr = XL_XCVR_10BT; 942 if (verbose) 943 printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname); 944 break; 945 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ 946 sc->xl_media = XL_MEDIAOPT_10FL; 947 sc->xl_xcvr = XL_XCVR_AUI; 948 if (verbose) 949 printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname); 950 break; 951 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ 952 case TC_DEVICEID_HURRICANE_555: /* 3c555 */ 953 case TC_DEVICEID_HURRICANE_556: /* 3c556 */ 954 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ 955 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ 956 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ 957 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ 958 case TC_DEVICEID_HURRICANE_656: /* 3c656 */ 959 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ 960 case TC_DEVICEID_TORNADO_656C: /* 3c656C */ 961 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ 962 sc->xl_media = XL_MEDIAOPT_MII; 963 sc->xl_xcvr = XL_XCVR_MII; 964 if (verbose) 965 printf("%s: guessing MII\n", sc->sc_dev.dv_xname); 966 break; 967 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ 968 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ 969 sc->xl_media = XL_MEDIAOPT_BT4; 970 sc->xl_xcvr = XL_XCVR_MII; 971 if (verbose) 972 printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname); 973 break; 974 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ 975 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */ 976 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ 977 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ 978 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ 979 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ 980 sc->xl_media = XL_MEDIAOPT_BTX; 981 sc->xl_xcvr = XL_XCVR_AUTO; 982 if (verbose) 983 printf("%s: guessing 10/100 internal\n", 984 sc->sc_dev.dv_xname); 985 break; 986 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ 987 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 988 sc->xl_xcvr = XL_XCVR_AUTO; 989 if (verbose) 990 printf("%s: guessing 10/100 plus BNC/AUI\n", 991 sc->sc_dev.dv_xname); 992 break; 993 default: 994 printf("%s: unknown device ID: %x -- " 995 "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid); 996 sc->xl_media = XL_MEDIAOPT_BT; 997 break; 998 } 999 } 1000 1001 /* 1002 * Initialize the transmit descriptors. 1003 */ 1004 int 1005 xl_list_tx_init(struct xl_softc *sc) 1006 { 1007 struct xl_chain_data *cd; 1008 struct xl_list_data *ld; 1009 int i; 1010 1011 cd = &sc->xl_cdata; 1012 ld = sc->xl_ldata; 1013 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1014 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1015 if (i == (XL_TX_LIST_CNT - 1)) 1016 cd->xl_tx_chain[i].xl_next = NULL; 1017 else 1018 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; 1019 } 1020 1021 cd->xl_tx_free = &cd->xl_tx_chain[0]; 1022 cd->xl_tx_tail = cd->xl_tx_head = NULL; 1023 1024 return (0); 1025 } 1026 1027 /* 1028 * Initialize the transmit descriptors. 1029 */ 1030 int 1031 xl_list_tx_init_90xB(struct xl_softc *sc) 1032 { 1033 struct xl_chain_data *cd; 1034 struct xl_list_data *ld; 1035 int i, next, prev; 1036 1037 cd = &sc->xl_cdata; 1038 ld = sc->xl_ldata; 1039 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1040 if (i == (XL_TX_LIST_CNT - 1)) 1041 next = 0; 1042 else 1043 next = i + 1; 1044 if (i == 0) 1045 prev = XL_TX_LIST_CNT - 1; 1046 else 1047 prev = i - 1; 1048 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1049 cd->xl_tx_chain[i].xl_phys = 1050 sc->sc_listmap->dm_segs[0].ds_addr + 1051 offsetof(struct xl_list_data, xl_tx_list[i]); 1052 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next]; 1053 cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev]; 1054 } 1055 1056 bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT); 1057 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); 1058 1059 cd->xl_tx_prod = 1; 1060 cd->xl_tx_cons = 1; 1061 cd->xl_tx_cnt = 0; 1062 1063 return (0); 1064 } 1065 1066 /* 1067 * Initialize the RX descriptors and allocate mbufs for them. Note that 1068 * we arrange the descriptors in a closed ring, so that the last descriptor 1069 * points back to the first. 1070 */ 1071 int 1072 xl_list_rx_init(struct xl_softc *sc) 1073 { 1074 struct xl_chain_data *cd; 1075 struct xl_list_data *ld; 1076 int i, n; 1077 bus_addr_t next; 1078 1079 cd = &sc->xl_cdata; 1080 ld = sc->xl_ldata; 1081 1082 for (i = 0; i < XL_RX_LIST_CNT; i++) { 1083 cd->xl_rx_chain[i].xl_ptr = 1084 (struct xl_list_onefrag *)&ld->xl_rx_list[i]; 1085 if (i == (XL_RX_LIST_CNT - 1)) 1086 n = 0; 1087 else 1088 n = i + 1; 1089 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n]; 1090 next = sc->sc_listmap->dm_segs[0].ds_addr + 1091 offsetof(struct xl_list_data, xl_rx_list[n]); 1092 ld->xl_rx_list[i].xl_next = htole32(next); 1093 } 1094 1095 cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0]; 1096 cd->xl_rx_cnt = 0; 1097 xl_fill_rx_ring(sc); 1098 return (0); 1099 } 1100 1101 void 1102 xl_fill_rx_ring(struct xl_softc *sc) 1103 { 1104 struct xl_chain_data *cd; 1105 struct xl_list_data *ld; 1106 1107 cd = &sc->xl_cdata; 1108 ld = sc->xl_ldata; 1109 1110 while (cd->xl_rx_cnt < XL_RX_LIST_CNT) { 1111 if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS) 1112 break; 1113 cd->xl_rx_prod = cd->xl_rx_prod->xl_next; 1114 cd->xl_rx_cnt++; 1115 } 1116 } 1117 1118 /* 1119 * Initialize an RX descriptor and attach an MBUF cluster. 1120 */ 1121 int 1122 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) 1123 { 1124 struct mbuf *m_new = NULL; 1125 bus_dmamap_t map; 1126 1127 m_new = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1128 if (!m_new) 1129 return (ENOBUFS); 1130 1131 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1132 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap, 1133 mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) { 1134 m_freem(m_new); 1135 return (ENOBUFS); 1136 } 1137 1138 /* sync the old map, and unload it (if necessary) */ 1139 if (c->map->dm_nsegs != 0) { 1140 bus_dmamap_sync(sc->sc_dmat, c->map, 1141 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1142 bus_dmamap_unload(sc->sc_dmat, c->map); 1143 } 1144 1145 map = c->map; 1146 c->map = sc->sc_rx_sparemap; 1147 sc->sc_rx_sparemap = map; 1148 1149 /* Force longword alignment for packet payload. */ 1150 m_adj(m_new, ETHER_ALIGN); 1151 1152 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1153 BUS_DMASYNC_PREREAD); 1154 1155 c->xl_mbuf = m_new; 1156 c->xl_ptr->xl_frag.xl_addr = 1157 htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN); 1158 c->xl_ptr->xl_frag.xl_len = 1159 htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG); 1160 c->xl_ptr->xl_status = htole32(0); 1161 1162 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1163 ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list), 1164 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1165 1166 return (0); 1167 } 1168 1169 /* 1170 * A frame has been uploaded: pass the resulting mbuf chain up to 1171 * the higher level protocols. 1172 */ 1173 void 1174 xl_rxeof(struct xl_softc *sc) 1175 { 1176 struct mbuf *m; 1177 struct ifnet *ifp; 1178 struct xl_chain_onefrag *cur_rx; 1179 int total_len = 0; 1180 u_int32_t rxstat; 1181 u_int16_t sumflags = 0; 1182 1183 ifp = &sc->sc_arpcom.ac_if; 1184 1185 again: 1186 1187 while (sc->xl_cdata.xl_rx_cnt > 0) { 1188 cur_rx = sc->xl_cdata.xl_rx_cons; 1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1190 ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva), 1191 sizeof(struct xl_list), 1192 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1193 if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0) 1194 break; 1195 m = cur_rx->xl_mbuf; 1196 cur_rx->xl_mbuf = NULL; 1197 sc->xl_cdata.xl_rx_cons = cur_rx->xl_next; 1198 sc->xl_cdata.xl_rx_cnt--; 1199 total_len = rxstat & XL_RXSTAT_LENMASK; 1200 1201 /* 1202 * Since we have told the chip to allow large frames, 1203 * we need to trap giant frame errors in software. We allow 1204 * a little more than the normal frame size to account for 1205 * frames with VLAN tags. 1206 */ 1207 if (total_len > XL_MAX_FRAMELEN) 1208 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); 1209 1210 /* 1211 * If an error occurs, update stats, clear the 1212 * status word and leave the mbuf cluster in place: 1213 * it should simply get re-used next time this descriptor 1214 * comes up in the ring. 1215 */ 1216 if (rxstat & XL_RXSTAT_UP_ERROR) { 1217 ifp->if_ierrors++; 1218 cur_rx->xl_ptr->xl_status = htole32(0); 1219 m_freem(m); 1220 continue; 1221 } 1222 1223 /* 1224 * If the error bit was not set, the upload complete 1225 * bit should be set which means we have a valid packet. 1226 * If not, something truly strange has happened. 1227 */ 1228 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { 1229 printf("%s: bad receive status -- " 1230 "packet dropped\n", sc->sc_dev.dv_xname); 1231 ifp->if_ierrors++; 1232 cur_rx->xl_ptr->xl_status = htole32(0); 1233 m_freem(m); 1234 continue; 1235 } 1236 1237 ifp->if_ipackets++; 1238 m->m_pkthdr.rcvif = ifp; 1239 m->m_pkthdr.len = m->m_len = total_len; 1240 #if NBPFILTER > 0 1241 /* 1242 * Handle BPF listeners. Let the BPF user see the packet. 1243 */ 1244 if (ifp->if_bpf) { 1245 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1246 } 1247 #endif 1248 1249 if (sc->xl_type == XL_TYPE_905B) { 1250 if (!(rxstat & XL_RXSTAT_IPCKERR) && 1251 (rxstat & XL_RXSTAT_IPCKOK)) 1252 sumflags |= M_IPV4_CSUM_IN_OK; 1253 1254 if (!(rxstat & XL_RXSTAT_TCPCKERR) && 1255 (rxstat & XL_RXSTAT_TCPCKOK)) 1256 sumflags |= M_TCP_CSUM_IN_OK; 1257 1258 if (!(rxstat & XL_RXSTAT_UDPCKERR) && 1259 (rxstat & XL_RXSTAT_UDPCKOK)) 1260 sumflags |= M_UDP_CSUM_IN_OK; 1261 1262 m->m_pkthdr.csum_flags = sumflags; 1263 } 1264 1265 ether_input_mbuf(ifp, m); 1266 } 1267 1268 xl_fill_rx_ring(sc); 1269 1270 /* 1271 * Handle the 'end of channel' condition. When the upload 1272 * engine hits the end of the RX ring, it will stall. This 1273 * is our cue to flush the RX ring, reload the uplist pointer 1274 * register and unstall the engine. 1275 * XXX This is actually a little goofy. With the ThunderLAN 1276 * chip, you get an interrupt when the receiver hits the end 1277 * of the receive ring, which tells you exactly when you 1278 * you need to reload the ring pointer. Here we have to 1279 * fake it. I'm mad at myself for not being clever enough 1280 * to avoid the use of a goto here. 1281 */ 1282 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || 1283 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { 1284 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 1285 xl_wait(sc); 1286 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 1287 xl_fill_rx_ring(sc); 1288 goto again; 1289 } 1290 } 1291 1292 /* 1293 * A frame was downloaded to the chip. It's safe for us to clean up 1294 * the list buffers. 1295 */ 1296 void 1297 xl_txeof(struct xl_softc *sc) 1298 { 1299 struct xl_chain *cur_tx; 1300 struct ifnet *ifp; 1301 1302 ifp = &sc->sc_arpcom.ac_if; 1303 1304 /* 1305 * Go through our tx list and free mbufs for those 1306 * frames that have been uploaded. Note: the 3c905B 1307 * sets a special bit in the status word to let us 1308 * know that a frame has been downloaded, but the 1309 * original 3c900/3c905 adapters don't do that. 1310 * Consequently, we have to use a different test if 1311 * xl_type != XL_TYPE_905B. 1312 */ 1313 while (sc->xl_cdata.xl_tx_head != NULL) { 1314 cur_tx = sc->xl_cdata.xl_tx_head; 1315 1316 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1317 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva), 1318 sizeof(struct xl_list), 1319 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1320 1321 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1322 break; 1323 1324 sc->xl_cdata.xl_tx_head = cur_tx->xl_next; 1325 ifp->if_opackets++; 1326 if (cur_tx->map->dm_nsegs != 0) { 1327 bus_dmamap_t map = cur_tx->map; 1328 1329 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1330 BUS_DMASYNC_POSTWRITE); 1331 bus_dmamap_unload(sc->sc_dmat, map); 1332 } 1333 if (cur_tx->xl_mbuf != NULL) { 1334 m_freem(cur_tx->xl_mbuf); 1335 cur_tx->xl_mbuf = NULL; 1336 } 1337 cur_tx->xl_next = sc->xl_cdata.xl_tx_free; 1338 sc->xl_cdata.xl_tx_free = cur_tx; 1339 } 1340 1341 if (sc->xl_cdata.xl_tx_head == NULL) { 1342 ifp->if_flags &= ~IFF_OACTIVE; 1343 /* Clear the timeout timer. */ 1344 ifp->if_timer = 0; 1345 sc->xl_cdata.xl_tx_tail = NULL; 1346 } else { 1347 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || 1348 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { 1349 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1350 sc->sc_listmap->dm_segs[0].ds_addr + 1351 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1352 sc->sc_listkva)); 1353 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1354 } 1355 } 1356 } 1357 1358 void 1359 xl_txeof_90xB(struct xl_softc *sc) 1360 { 1361 struct xl_chain *cur_tx = NULL; 1362 struct ifnet *ifp; 1363 int idx; 1364 1365 ifp = &sc->sc_arpcom.ac_if; 1366 1367 idx = sc->xl_cdata.xl_tx_cons; 1368 while (idx != sc->xl_cdata.xl_tx_prod) { 1369 1370 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1371 1372 if ((cur_tx->xl_ptr->xl_status & 1373 htole32(XL_TXSTAT_DL_COMPLETE)) == 0) 1374 break; 1375 1376 if (cur_tx->xl_mbuf != NULL) { 1377 m_freem(cur_tx->xl_mbuf); 1378 cur_tx->xl_mbuf = NULL; 1379 } 1380 1381 if (cur_tx->map->dm_nsegs != 0) { 1382 bus_dmamap_sync(sc->sc_dmat, cur_tx->map, 1383 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1384 bus_dmamap_unload(sc->sc_dmat, cur_tx->map); 1385 } 1386 1387 ifp->if_opackets++; 1388 1389 sc->xl_cdata.xl_tx_cnt--; 1390 XL_INC(idx, XL_TX_LIST_CNT); 1391 } 1392 1393 sc->xl_cdata.xl_tx_cons = idx; 1394 1395 if (cur_tx != NULL) 1396 ifp->if_flags &= ~IFF_OACTIVE; 1397 if (sc->xl_cdata.xl_tx_cnt == 0) 1398 ifp->if_timer = 0; 1399 } 1400 1401 /* 1402 * TX 'end of channel' interrupt handler. Actually, we should 1403 * only get a 'TX complete' interrupt if there's a transmit error, 1404 * so this is really TX error handler. 1405 */ 1406 void 1407 xl_txeoc(struct xl_softc *sc) 1408 { 1409 u_int8_t txstat; 1410 1411 while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { 1412 if (txstat & XL_TXSTATUS_UNDERRUN || 1413 txstat & XL_TXSTATUS_JABBER || 1414 txstat & XL_TXSTATUS_RECLAIM) { 1415 if (txstat != 0x90) { 1416 printf("%s: transmission error: %x\n", 1417 sc->sc_dev.dv_xname, txstat); 1418 } 1419 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1420 xl_wait(sc); 1421 if (sc->xl_type == XL_TYPE_905B) { 1422 if (sc->xl_cdata.xl_tx_cnt) { 1423 int i; 1424 struct xl_chain *c; 1425 1426 i = sc->xl_cdata.xl_tx_cons; 1427 c = &sc->xl_cdata.xl_tx_chain[i]; 1428 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1429 c->xl_phys); 1430 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 1431 } 1432 } else { 1433 if (sc->xl_cdata.xl_tx_head != NULL) 1434 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1435 sc->sc_listmap->dm_segs[0].ds_addr + 1436 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1437 sc->sc_listkva)); 1438 } 1439 /* 1440 * Remember to set this for the 1441 * first generation 3c90X chips. 1442 */ 1443 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1444 if (txstat & XL_TXSTATUS_UNDERRUN && 1445 sc->xl_tx_thresh < XL_PACKET_SIZE) { 1446 sc->xl_tx_thresh += XL_MIN_FRAMELEN; 1447 #ifdef notdef 1448 printf("%s: tx underrun, increasing tx start" 1449 " threshold to %d\n", sc->sc_dev.dv_xname, 1450 sc->xl_tx_thresh); 1451 #endif 1452 } 1453 CSR_WRITE_2(sc, XL_COMMAND, 1454 XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1455 if (sc->xl_type == XL_TYPE_905B) { 1456 CSR_WRITE_2(sc, XL_COMMAND, 1457 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1458 } 1459 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1460 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1461 } else { 1462 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1463 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1464 } 1465 /* 1466 * Write an arbitrary byte to the TX_STATUS register 1467 * to clear this interrupt/error and advance to the next. 1468 */ 1469 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); 1470 } 1471 } 1472 1473 int 1474 xl_intr(void *arg) 1475 { 1476 struct xl_softc *sc; 1477 struct ifnet *ifp; 1478 u_int16_t status; 1479 int claimed = 0; 1480 1481 sc = arg; 1482 ifp = &sc->sc_arpcom.ac_if; 1483 1484 while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { 1485 1486 claimed = 1; 1487 1488 CSR_WRITE_2(sc, XL_COMMAND, 1489 XL_CMD_INTR_ACK|(status & XL_INTRS)); 1490 1491 if (sc->intr_ack) 1492 (*sc->intr_ack)(sc); 1493 1494 if (status & XL_STAT_UP_COMPLETE) 1495 xl_rxeof(sc); 1496 1497 if (status & XL_STAT_DOWN_COMPLETE) { 1498 if (sc->xl_type == XL_TYPE_905B) 1499 xl_txeof_90xB(sc); 1500 else 1501 xl_txeof(sc); 1502 } 1503 1504 if (status & XL_STAT_TX_COMPLETE) { 1505 ifp->if_oerrors++; 1506 xl_txeoc(sc); 1507 } 1508 1509 if (status & XL_STAT_ADFAIL) { 1510 xl_reset(sc); 1511 xl_init(sc); 1512 } 1513 1514 if (status & XL_STAT_STATSOFLOW) { 1515 sc->xl_stats_no_timeout = 1; 1516 xl_stats_update(sc); 1517 sc->xl_stats_no_timeout = 0; 1518 } 1519 } 1520 1521 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1522 (*ifp->if_start)(ifp); 1523 1524 return (claimed); 1525 } 1526 1527 void 1528 xl_stats_update(void *xsc) 1529 { 1530 struct xl_softc *sc; 1531 struct ifnet *ifp; 1532 struct xl_stats xl_stats; 1533 u_int8_t *p; 1534 int i; 1535 struct mii_data *mii = NULL; 1536 1537 bzero(&xl_stats, sizeof(struct xl_stats)); 1538 1539 sc = xsc; 1540 ifp = &sc->sc_arpcom.ac_if; 1541 if (sc->xl_hasmii) 1542 mii = &sc->sc_mii; 1543 1544 p = (u_int8_t *)&xl_stats; 1545 1546 /* Read all the stats registers. */ 1547 XL_SEL_WIN(6); 1548 1549 for (i = 0; i < 16; i++) 1550 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); 1551 1552 ifp->if_ierrors += xl_stats.xl_rx_overrun; 1553 1554 ifp->if_collisions += xl_stats.xl_tx_multi_collision + 1555 xl_stats.xl_tx_single_collision + 1556 xl_stats.xl_tx_late_collision; 1557 1558 /* 1559 * Boomerang and cyclone chips have an extra stats counter 1560 * in window 4 (BadSSD). We have to read this too in order 1561 * to clear out all the stats registers and avoid a statsoflow 1562 * interrupt. 1563 */ 1564 XL_SEL_WIN(4); 1565 CSR_READ_1(sc, XL_W4_BADSSD); 1566 1567 if (mii != NULL && (!sc->xl_stats_no_timeout)) 1568 mii_tick(mii); 1569 1570 XL_SEL_WIN(7); 1571 1572 if (!sc->xl_stats_no_timeout) 1573 timeout_add_sec(&sc->xl_stsup_tmo, 1); 1574 } 1575 1576 /* 1577 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1578 * pointers to the fragment pointers. 1579 */ 1580 int 1581 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head) 1582 { 1583 int error, frag, total_len; 1584 u_int32_t status; 1585 bus_dmamap_t map; 1586 1587 map = sc->sc_tx_sparemap; 1588 1589 reload: 1590 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1591 m_head, BUS_DMA_NOWAIT); 1592 1593 if (error && error != EFBIG) { 1594 m_freem(m_head); 1595 return (1); 1596 } 1597 1598 /* 1599 * Start packing the mbufs in this chain into 1600 * the fragment pointers. Stop when we run out 1601 * of fragments or hit the end of the mbuf chain. 1602 */ 1603 for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) { 1604 if (frag == XL_MAXFRAGS) 1605 break; 1606 total_len += map->dm_segs[frag].ds_len; 1607 c->xl_ptr->xl_frag[frag].xl_addr = 1608 htole32(map->dm_segs[frag].ds_addr); 1609 c->xl_ptr->xl_frag[frag].xl_len = 1610 htole32(map->dm_segs[frag].ds_len); 1611 } 1612 1613 /* 1614 * Handle special case: we used up all 63 fragments, 1615 * but we have more mbufs left in the chain. Copy the 1616 * data into an mbuf cluster. Note that we don't 1617 * bother clearing the values in the other fragment 1618 * pointers/counters; it wouldn't gain us anything, 1619 * and would waste cycles. 1620 */ 1621 if (error) { 1622 struct mbuf *m_new = NULL; 1623 1624 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1625 if (m_new == NULL) { 1626 m_freem(m_head); 1627 return (1); 1628 } 1629 if (m_head->m_pkthdr.len > MHLEN) { 1630 MCLGET(m_new, M_DONTWAIT); 1631 if (!(m_new->m_flags & M_EXT)) { 1632 m_freem(m_new); 1633 m_freem(m_head); 1634 return (1); 1635 } 1636 } 1637 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1638 mtod(m_new, caddr_t)); 1639 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1640 m_freem(m_head); 1641 m_head = m_new; 1642 goto reload; 1643 } 1644 1645 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1646 BUS_DMASYNC_PREWRITE); 1647 1648 if (c->map->dm_nsegs != 0) { 1649 bus_dmamap_sync(sc->sc_dmat, c->map, 1650 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1651 bus_dmamap_unload(sc->sc_dmat, c->map); 1652 } 1653 1654 c->xl_mbuf = m_head; 1655 sc->sc_tx_sparemap = c->map; 1656 c->map = map; 1657 c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG); 1658 c->xl_ptr->xl_status = htole32(total_len); 1659 c->xl_ptr->xl_next = 0; 1660 1661 if (sc->xl_type == XL_TYPE_905B) { 1662 status = XL_TXSTAT_RND_DEFEAT; 1663 1664 #ifndef XL905B_TXCSUM_BROKEN 1665 if (m_head->m_pkthdr.csum_flags) { 1666 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1667 status |= XL_TXSTAT_IPCKSUM; 1668 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1669 status |= XL_TXSTAT_TCPCKSUM; 1670 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1671 status |= XL_TXSTAT_UDPCKSUM; 1672 } 1673 #endif 1674 c->xl_ptr->xl_status = htole32(status); 1675 } 1676 1677 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1678 offsetof(struct xl_list_data, xl_tx_list[0]), 1679 sizeof(struct xl_list) * XL_TX_LIST_CNT, 1680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1681 1682 return (0); 1683 } 1684 1685 /* 1686 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1687 * to the mbuf data regions directly in the transmit lists. We also save a 1688 * copy of the pointers since the transmit list fragment pointers are 1689 * physical addresses. 1690 */ 1691 void 1692 xl_start(struct ifnet *ifp) 1693 { 1694 struct xl_softc *sc; 1695 struct mbuf *m_head = NULL; 1696 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1697 struct xl_chain *prev_tx; 1698 int error; 1699 1700 sc = ifp->if_softc; 1701 1702 /* 1703 * Check for an available queue slot. If there are none, 1704 * punt. 1705 */ 1706 if (sc->xl_cdata.xl_tx_free == NULL) { 1707 xl_txeoc(sc); 1708 xl_txeof(sc); 1709 if (sc->xl_cdata.xl_tx_free == NULL) { 1710 ifp->if_flags |= IFF_OACTIVE; 1711 return; 1712 } 1713 } 1714 1715 start_tx = sc->xl_cdata.xl_tx_free; 1716 1717 while (sc->xl_cdata.xl_tx_free != NULL) { 1718 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1719 if (m_head == NULL) 1720 break; 1721 1722 /* Pick a descriptor off the free list. */ 1723 prev_tx = cur_tx; 1724 cur_tx = sc->xl_cdata.xl_tx_free; 1725 1726 /* Pack the data into the descriptor. */ 1727 error = xl_encap(sc, cur_tx, m_head); 1728 if (error) { 1729 cur_tx = prev_tx; 1730 continue; 1731 } 1732 1733 sc->xl_cdata.xl_tx_free = cur_tx->xl_next; 1734 cur_tx->xl_next = NULL; 1735 1736 /* Chain it together. */ 1737 if (prev != NULL) { 1738 prev->xl_next = cur_tx; 1739 prev->xl_ptr->xl_next = 1740 sc->sc_listmap->dm_segs[0].ds_addr + 1741 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva); 1742 1743 } 1744 prev = cur_tx; 1745 1746 #if NBPFILTER > 0 1747 /* 1748 * If there's a BPF listener, bounce a copy of this frame 1749 * to him. 1750 */ 1751 if (ifp->if_bpf) 1752 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1753 BPF_DIRECTION_OUT); 1754 #endif 1755 } 1756 1757 /* 1758 * If there are no packets queued, bail. 1759 */ 1760 if (cur_tx == NULL) 1761 return; 1762 1763 /* 1764 * Place the request for the upload interrupt 1765 * in the last descriptor in the chain. This way, if 1766 * we're chaining several packets at once, we'll only 1767 * get an interrupt once for the whole chain rather than 1768 * once for each packet. 1769 */ 1770 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1771 1772 /* 1773 * Queue the packets. If the TX channel is clear, update 1774 * the downlist pointer register. 1775 */ 1776 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 1777 xl_wait(sc); 1778 1779 if (sc->xl_cdata.xl_tx_head != NULL) { 1780 sc->xl_cdata.xl_tx_tail->xl_next = start_tx; 1781 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = 1782 sc->sc_listmap->dm_segs[0].ds_addr + 1783 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva); 1784 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= 1785 htole32(~XL_TXSTAT_DL_INTR); 1786 sc->xl_cdata.xl_tx_tail = cur_tx; 1787 } else { 1788 sc->xl_cdata.xl_tx_head = start_tx; 1789 sc->xl_cdata.xl_tx_tail = cur_tx; 1790 } 1791 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1792 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1793 sc->sc_listmap->dm_segs[0].ds_addr + 1794 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva)); 1795 1796 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1797 1798 XL_SEL_WIN(7); 1799 1800 /* 1801 * Set a timeout in case the chip goes out to lunch. 1802 */ 1803 ifp->if_timer = 5; 1804 1805 /* 1806 * XXX Under certain conditions, usually on slower machines 1807 * where interrupts may be dropped, it's possible for the 1808 * adapter to chew up all the buffers in the receive ring 1809 * and stall, without us being able to do anything about it. 1810 * To guard against this, we need to make a pass over the 1811 * RX queue to make sure there aren't any packets pending. 1812 * Doing it here means we can flush the receive ring at the 1813 * same time the chip is DMAing the transmit descriptors we 1814 * just gave it. 1815 * 1816 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) 1817 * nature of their chips in all their marketing literature; 1818 * we may as well take advantage of it. :) 1819 */ 1820 xl_rxeof(sc); 1821 } 1822 1823 void 1824 xl_start_90xB(struct ifnet *ifp) 1825 { 1826 struct xl_softc *sc; 1827 struct mbuf *m_head = NULL; 1828 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1829 struct xl_chain *prev_tx; 1830 int error, idx; 1831 1832 sc = ifp->if_softc; 1833 1834 if (ifp->if_flags & IFF_OACTIVE) 1835 return; 1836 1837 idx = sc->xl_cdata.xl_tx_prod; 1838 start_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1839 1840 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { 1841 1842 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { 1843 ifp->if_flags |= IFF_OACTIVE; 1844 break; 1845 } 1846 1847 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1848 if (m_head == NULL) 1849 break; 1850 1851 prev_tx = cur_tx; 1852 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1853 1854 /* Pack the data into the descriptor. */ 1855 error = xl_encap(sc, cur_tx, m_head); 1856 if (error) { 1857 cur_tx = prev_tx; 1858 continue; 1859 } 1860 1861 /* Chain it together. */ 1862 if (prev != NULL) 1863 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); 1864 prev = cur_tx; 1865 1866 #if NBPFILTER > 0 1867 /* 1868 * If there's a BPF listener, bounce a copy of this frame 1869 * to him. 1870 */ 1871 if (ifp->if_bpf) 1872 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1873 BPF_DIRECTION_OUT); 1874 #endif 1875 1876 XL_INC(idx, XL_TX_LIST_CNT); 1877 sc->xl_cdata.xl_tx_cnt++; 1878 } 1879 1880 /* 1881 * If there are no packets queued, bail. 1882 */ 1883 if (cur_tx == NULL) 1884 return; 1885 1886 /* 1887 * Place the request for the upload interrupt 1888 * in the last descriptor in the chain. This way, if 1889 * we're chaining several packets at once, we'll only 1890 * get an interrupt once for the whole chain rather than 1891 * once for each packet. 1892 */ 1893 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1894 1895 /* Start transmission */ 1896 sc->xl_cdata.xl_tx_prod = idx; 1897 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); 1898 1899 /* 1900 * Set a timeout in case the chip goes out to lunch. 1901 */ 1902 ifp->if_timer = 5; 1903 } 1904 1905 void 1906 xl_init(void *xsc) 1907 { 1908 struct xl_softc *sc = xsc; 1909 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1910 int s, i; 1911 struct mii_data *mii = NULL; 1912 1913 s = splnet(); 1914 1915 /* 1916 * Cancel pending I/O and free all RX/TX buffers. 1917 */ 1918 xl_stop(sc); 1919 1920 if (sc->xl_hasmii) 1921 mii = &sc->sc_mii; 1922 1923 if (mii == NULL) { 1924 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1925 xl_wait(sc); 1926 } 1927 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1928 xl_wait(sc); 1929 DELAY(10000); 1930 1931 /* Init our MAC address */ 1932 XL_SEL_WIN(2); 1933 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1934 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, 1935 sc->sc_arpcom.ac_enaddr[i]); 1936 } 1937 1938 /* Clear the station mask. */ 1939 for (i = 0; i < 3; i++) 1940 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); 1941 #ifdef notdef 1942 /* Reset TX and RX. */ 1943 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1944 xl_wait(sc); 1945 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1946 xl_wait(sc); 1947 #endif 1948 /* Init circular RX list. */ 1949 if (xl_list_rx_init(sc) == ENOBUFS) { 1950 printf("%s: initialization failed: no " 1951 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1952 xl_stop(sc); 1953 splx(s); 1954 return; 1955 } 1956 1957 /* Init TX descriptors. */ 1958 if (sc->xl_type == XL_TYPE_905B) 1959 xl_list_tx_init_90xB(sc); 1960 else 1961 xl_list_tx_init(sc); 1962 1963 /* 1964 * Set the TX freethresh value. 1965 * Note that this has no effect on 3c905B "cyclone" 1966 * cards but is required for 3c900/3c905 "boomerang" 1967 * cards in order to enable the download engine. 1968 */ 1969 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1970 1971 /* Set the TX start threshold for best performance. */ 1972 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1973 1974 /* 1975 * If this is a 3c905B, also set the tx reclaim threshold. 1976 * This helps cut down on the number of tx reclaim errors 1977 * that could happen on a busy network. The chip multiplies 1978 * the register value by 16 to obtain the actual threshold 1979 * in bytes, so we divide by 16 when setting the value here. 1980 * The existing threshold value can be examined by reading 1981 * the register at offset 9 in window 5. 1982 */ 1983 if (sc->xl_type == XL_TYPE_905B) { 1984 CSR_WRITE_2(sc, XL_COMMAND, 1985 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1986 } 1987 1988 /* Program promiscuous mode and multicast filters. */ 1989 xl_iff(sc); 1990 1991 /* 1992 * Load the address of the RX list. We have to 1993 * stall the upload engine before we can manipulate 1994 * the uplist pointer register, then unstall it when 1995 * we're finished. We also have to wait for the 1996 * stall command to complete before proceeding. 1997 * Note that we have to do this after any RX resets 1998 * have completed since the uplist register is cleared 1999 * by a reset. 2000 */ 2001 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 2002 xl_wait(sc); 2003 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr + 2004 offsetof(struct xl_list_data, xl_rx_list[0])); 2005 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 2006 xl_wait(sc); 2007 2008 if (sc->xl_type == XL_TYPE_905B) { 2009 /* Set polling interval */ 2010 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 2011 /* Load the address of the TX list */ 2012 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 2013 xl_wait(sc); 2014 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 2015 sc->sc_listmap->dm_segs[0].ds_addr + 2016 offsetof(struct xl_list_data, xl_tx_list[0])); 2017 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 2018 xl_wait(sc); 2019 } 2020 2021 /* 2022 * If the coax transceiver is on, make sure to enable 2023 * the DC-DC converter. 2024 */ 2025 XL_SEL_WIN(3); 2026 if (sc->xl_xcvr == XL_XCVR_COAX) 2027 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 2028 else 2029 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2030 2031 /* 2032 * increase packet size to allow reception of 802.1q or ISL packets. 2033 * For the 3c90x chip, set the 'allow large packets' bit in the MAC 2034 * control register. For 3c90xB/C chips, use the RX packet size 2035 * register. 2036 */ 2037 2038 if (sc->xl_type == XL_TYPE_905B) 2039 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); 2040 else { 2041 u_int8_t macctl; 2042 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); 2043 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; 2044 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); 2045 } 2046 2047 /* Clear out the stats counters. */ 2048 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2049 sc->xl_stats_no_timeout = 1; 2050 xl_stats_update(sc); 2051 sc->xl_stats_no_timeout = 0; 2052 XL_SEL_WIN(4); 2053 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); 2054 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); 2055 2056 /* 2057 * Enable interrupts. 2058 */ 2059 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); 2060 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); 2061 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); 2062 2063 if (sc->intr_ack) 2064 (*sc->intr_ack)(sc); 2065 2066 /* Set the RX early threshold */ 2067 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); 2068 CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); 2069 2070 /* Enable receiver and transmitter. */ 2071 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 2072 xl_wait(sc); 2073 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2074 xl_wait(sc); 2075 2076 /* Restore state of BMCR */ 2077 if (mii != NULL) 2078 mii_mediachg(mii); 2079 2080 /* Select window 7 for normal operations. */ 2081 XL_SEL_WIN(7); 2082 2083 ifp->if_flags |= IFF_RUNNING; 2084 ifp->if_flags &= ~IFF_OACTIVE; 2085 2086 splx(s); 2087 2088 timeout_add_sec(&sc->xl_stsup_tmo, 1); 2089 } 2090 2091 /* 2092 * Set media options. 2093 */ 2094 int 2095 xl_ifmedia_upd(struct ifnet *ifp) 2096 { 2097 struct xl_softc *sc; 2098 struct ifmedia *ifm = NULL; 2099 struct mii_data *mii = NULL; 2100 2101 sc = ifp->if_softc; 2102 2103 if (sc->xl_hasmii) 2104 mii = &sc->sc_mii; 2105 if (mii == NULL) 2106 ifm = &sc->ifmedia; 2107 else 2108 ifm = &mii->mii_media; 2109 2110 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2111 case IFM_100_FX: 2112 case IFM_10_FL: 2113 case IFM_10_2: 2114 case IFM_10_5: 2115 xl_setmode(sc, ifm->ifm_media); 2116 return (0); 2117 break; 2118 default: 2119 break; 2120 } 2121 2122 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2123 || sc->xl_media & XL_MEDIAOPT_BT4) { 2124 xl_init(sc); 2125 } else { 2126 xl_setmode(sc, ifm->ifm_media); 2127 } 2128 2129 return (0); 2130 } 2131 2132 /* 2133 * Report current media status. 2134 */ 2135 void 2136 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2137 { 2138 struct xl_softc *sc; 2139 u_int32_t icfg; 2140 u_int16_t status = 0; 2141 struct mii_data *mii = NULL; 2142 2143 sc = ifp->if_softc; 2144 if (sc->xl_hasmii != 0) 2145 mii = &sc->sc_mii; 2146 2147 XL_SEL_WIN(4); 2148 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2149 2150 XL_SEL_WIN(3); 2151 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; 2152 icfg >>= XL_ICFG_CONNECTOR_BITS; 2153 2154 ifmr->ifm_active = IFM_ETHER; 2155 ifmr->ifm_status = IFM_AVALID; 2156 2157 if ((status & XL_MEDIASTAT_CARRIER) == 0) 2158 ifmr->ifm_status |= IFM_ACTIVE; 2159 2160 switch(icfg) { 2161 case XL_XCVR_10BT: 2162 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2163 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2164 ifmr->ifm_active |= IFM_FDX; 2165 else 2166 ifmr->ifm_active |= IFM_HDX; 2167 break; 2168 case XL_XCVR_AUI: 2169 if (sc->xl_type == XL_TYPE_905B && 2170 sc->xl_media == XL_MEDIAOPT_10FL) { 2171 ifmr->ifm_active = IFM_ETHER|IFM_10_FL; 2172 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2173 ifmr->ifm_active |= IFM_FDX; 2174 else 2175 ifmr->ifm_active |= IFM_HDX; 2176 } else 2177 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2178 break; 2179 case XL_XCVR_COAX: 2180 ifmr->ifm_active = IFM_ETHER|IFM_10_2; 2181 break; 2182 /* 2183 * XXX MII and BTX/AUTO should be separate cases. 2184 */ 2185 2186 case XL_XCVR_100BTX: 2187 case XL_XCVR_AUTO: 2188 case XL_XCVR_MII: 2189 if (mii != NULL) { 2190 mii_pollstat(mii); 2191 ifmr->ifm_active = mii->mii_media_active; 2192 ifmr->ifm_status = mii->mii_media_status; 2193 } 2194 break; 2195 case XL_XCVR_100BFX: 2196 ifmr->ifm_active = IFM_ETHER|IFM_100_FX; 2197 break; 2198 default: 2199 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg); 2200 break; 2201 } 2202 } 2203 2204 int 2205 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2206 { 2207 struct xl_softc *sc = ifp->if_softc; 2208 struct ifreq *ifr = (struct ifreq *)data; 2209 struct ifaddr *ifa = (struct ifaddr *)data; 2210 int s, error = 0; 2211 struct mii_data *mii = NULL; 2212 2213 s = splnet(); 2214 2215 switch(command) { 2216 case SIOCSIFADDR: 2217 ifp->if_flags |= IFF_UP; 2218 if (!(ifp->if_flags & IFF_RUNNING)) 2219 xl_init(sc); 2220 #ifdef INET 2221 if (ifa->ifa_addr->sa_family == AF_INET) 2222 arp_ifinit(&sc->sc_arpcom, ifa); 2223 #endif 2224 break; 2225 2226 case SIOCSIFFLAGS: 2227 if (ifp->if_flags & IFF_UP) { 2228 if (ifp->if_flags & IFF_RUNNING) 2229 error = ENETRESET; 2230 else 2231 xl_init(sc); 2232 } else { 2233 if (ifp->if_flags & IFF_RUNNING) 2234 xl_stop(sc); 2235 } 2236 break; 2237 2238 case SIOCGIFMEDIA: 2239 case SIOCSIFMEDIA: 2240 if (sc->xl_hasmii != 0) 2241 mii = &sc->sc_mii; 2242 if (mii == NULL) 2243 error = ifmedia_ioctl(ifp, ifr, 2244 &sc->ifmedia, command); 2245 else 2246 error = ifmedia_ioctl(ifp, ifr, 2247 &mii->mii_media, command); 2248 break; 2249 2250 default: 2251 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2252 } 2253 2254 if (error == ENETRESET) { 2255 if (ifp->if_flags & IFF_RUNNING) 2256 xl_iff(sc); 2257 error = 0; 2258 } 2259 2260 splx(s); 2261 return (error); 2262 } 2263 2264 void 2265 xl_watchdog(struct ifnet *ifp) 2266 { 2267 struct xl_softc *sc; 2268 u_int16_t status = 0; 2269 2270 sc = ifp->if_softc; 2271 2272 ifp->if_oerrors++; 2273 XL_SEL_WIN(4); 2274 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2275 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2276 2277 if (status & XL_MEDIASTAT_CARRIER) 2278 printf("%s: no carrier - transceiver cable problem?\n", 2279 sc->sc_dev.dv_xname); 2280 xl_txeoc(sc); 2281 xl_txeof(sc); 2282 xl_rxeof(sc); 2283 xl_reset(sc); 2284 xl_init(sc); 2285 2286 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2287 (*ifp->if_start)(ifp); 2288 } 2289 2290 void 2291 xl_freetxrx(struct xl_softc *sc) 2292 { 2293 bus_dmamap_t map; 2294 int i; 2295 2296 /* 2297 * Free data in the RX lists. 2298 */ 2299 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2300 if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) { 2301 map = sc->xl_cdata.xl_rx_chain[i].map; 2302 2303 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2304 BUS_DMASYNC_POSTREAD); 2305 bus_dmamap_unload(sc->sc_dmat, map); 2306 } 2307 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { 2308 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); 2309 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; 2310 } 2311 } 2312 bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list)); 2313 /* 2314 * Free the TX list buffers. 2315 */ 2316 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2317 if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) { 2318 map = sc->xl_cdata.xl_tx_chain[i].map; 2319 2320 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2321 BUS_DMASYNC_POSTWRITE); 2322 bus_dmamap_unload(sc->sc_dmat, map); 2323 } 2324 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { 2325 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); 2326 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; 2327 } 2328 } 2329 bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list)); 2330 } 2331 2332 /* 2333 * Stop the adapter and free any mbufs allocated to the 2334 * RX and TX lists. 2335 */ 2336 void 2337 xl_stop(struct xl_softc *sc) 2338 { 2339 struct ifnet *ifp; 2340 2341 /* Stop the stats updater. */ 2342 timeout_del(&sc->xl_stsup_tmo); 2343 2344 ifp = &sc->sc_arpcom.ac_if; 2345 ifp->if_timer = 0; 2346 2347 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); 2348 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2349 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); 2350 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); 2351 xl_wait(sc); 2352 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); 2353 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2354 DELAY(800); 2355 2356 #ifdef foo 2357 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2358 xl_wait(sc); 2359 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2360 xl_wait(sc); 2361 #endif 2362 2363 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); 2364 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); 2365 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); 2366 2367 if (sc->intr_ack) 2368 (*sc->intr_ack)(sc); 2369 2370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2371 2372 xl_freetxrx(sc); 2373 } 2374 2375 #ifndef SMALL_KERNEL 2376 void 2377 xl_wol_power(struct xl_softc *sc) 2378 { 2379 /* Re-enable RX and call upper layer WOL power routine 2380 * if WOL is enabled. */ 2381 if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) { 2382 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2383 sc->wol_power(sc->wol_power_arg); 2384 } 2385 } 2386 #endif 2387 2388 void 2389 xl_attach(struct xl_softc *sc) 2390 { 2391 u_int8_t enaddr[ETHER_ADDR_LEN]; 2392 u_int16_t xcvr[2]; 2393 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2394 int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX; 2395 struct ifmedia *ifm; 2396 2397 i = splnet(); 2398 xl_reset(sc); 2399 splx(i); 2400 2401 /* 2402 * Get station address from the EEPROM. 2403 */ 2404 if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) { 2405 printf("\n%s: failed to read station address\n", 2406 sc->sc_dev.dv_xname); 2407 return; 2408 } 2409 bcopy(enaddr, &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 2410 2411 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data), 2412 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 2413 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 2414 printf(": can't alloc list mem\n"); 2415 return; 2416 } 2417 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 2418 sizeof(struct xl_list_data), &sc->sc_listkva, 2419 BUS_DMA_NOWAIT) != 0) { 2420 printf(": can't map list mem\n"); 2421 return; 2422 } 2423 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1, 2424 sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT, 2425 &sc->sc_listmap) != 0) { 2426 printf(": can't alloc list map\n"); 2427 return; 2428 } 2429 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 2430 sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 2431 printf(": can't load list map\n"); 2432 return; 2433 } 2434 sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva; 2435 2436 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2437 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 2438 0, BUS_DMA_NOWAIT, 2439 &sc->xl_cdata.xl_rx_chain[i].map) != 0) { 2440 printf(": can't create rx map\n"); 2441 return; 2442 } 2443 } 2444 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 2445 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 2446 printf(": can't create rx spare map\n"); 2447 return; 2448 } 2449 2450 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2451 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2452 XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 2453 &sc->xl_cdata.xl_tx_chain[i].map) != 0) { 2454 printf(": can't create tx map\n"); 2455 return; 2456 } 2457 } 2458 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3, 2459 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 2460 printf(": can't create tx spare map\n"); 2461 return; 2462 } 2463 2464 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 2465 2466 if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) { 2467 u_int16_t n; 2468 2469 XL_SEL_WIN(2); 2470 n = CSR_READ_2(sc, 12); 2471 2472 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR) 2473 n |= 0x0010; 2474 2475 if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) 2476 n |= 0x4000; 2477 2478 CSR_WRITE_2(sc, 12, n); 2479 } 2480 2481 /* 2482 * Figure out the card type. 3c905B adapters have the 2483 * 'supportsNoTxLength' bit set in the capabilities 2484 * word in the EEPROM. 2485 * Note: my 3c575C cardbus card lies. It returns a value 2486 * of 0x1578 for its capabilities word, which is somewhat 2487 * nonsensical. Another way to distinguish a 3c90x chip 2488 * from a 3c90xB/C chip is to check for the 'supportsLargePackets' 2489 * bit. This will only be set for 3c90x boomerage chips. 2490 */ 2491 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); 2492 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || 2493 !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) 2494 sc->xl_type = XL_TYPE_905B; 2495 else 2496 sc->xl_type = XL_TYPE_90X; 2497 2498 /* Set the TX start threshold for best performance. */ 2499 sc->xl_tx_thresh = XL_MIN_FRAMELEN; 2500 2501 timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc); 2502 2503 ifp->if_softc = sc; 2504 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2505 ifp->if_ioctl = xl_ioctl; 2506 if (sc->xl_type == XL_TYPE_905B) 2507 ifp->if_start = xl_start_90xB; 2508 else 2509 ifp->if_start = xl_start; 2510 ifp->if_watchdog = xl_watchdog; 2511 ifp->if_baudrate = 10000000; 2512 IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); 2513 IFQ_SET_READY(&ifp->if_snd); 2514 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 2515 2516 m_clsetwms(ifp, MCLBYTES, 2, XL_RX_LIST_CNT - 1); 2517 2518 ifp->if_capabilities = IFCAP_VLAN_MTU; 2519 2520 #ifndef XL905B_TXCSUM_BROKEN 2521 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 2522 IFCAP_CSUM_UDPv4; 2523 #endif 2524 2525 XL_SEL_WIN(3); 2526 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); 2527 2528 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); 2529 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; 2530 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; 2531 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; 2532 2533 xl_mediacheck(sc); 2534 2535 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2536 || sc->xl_media & XL_MEDIAOPT_BT4) { 2537 ifmedia_init(&sc->sc_mii.mii_media, 0, 2538 xl_ifmedia_upd, xl_ifmedia_sts); 2539 sc->xl_hasmii = 1; 2540 sc->sc_mii.mii_ifp = ifp; 2541 sc->sc_mii.mii_readreg = xl_miibus_readreg; 2542 sc->sc_mii.mii_writereg = xl_miibus_writereg; 2543 sc->sc_mii.mii_statchg = xl_miibus_statchg; 2544 xl_setcfg(sc); 2545 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, 2546 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2547 2548 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2549 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 2550 0, NULL); 2551 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2552 } 2553 else { 2554 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2555 } 2556 ifm = &sc->sc_mii.mii_media; 2557 } 2558 else { 2559 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); 2560 sc->xl_hasmii = 0; 2561 ifm = &sc->ifmedia; 2562 } 2563 2564 /* 2565 * Sanity check. If the user has selected "auto" and this isn't 2566 * a 10/100 card of some kind, we need to force the transceiver 2567 * type to something sane. 2568 */ 2569 if (sc->xl_xcvr == XL_XCVR_AUTO) { 2570 xl_choose_xcvr(sc, 0); 2571 i = splnet(); 2572 xl_reset(sc); 2573 splx(i); 2574 } 2575 2576 if (sc->xl_media & XL_MEDIAOPT_BT) { 2577 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 2578 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 2579 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2580 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2581 } 2582 2583 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 2584 /* 2585 * Check for a 10baseFL board in disguise. 2586 */ 2587 if (sc->xl_type == XL_TYPE_905B && 2588 sc->xl_media == XL_MEDIAOPT_10FL) { 2589 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL); 2590 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 2591 0, NULL); 2592 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2593 ifmedia_add(ifm, 2594 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); 2595 } else { 2596 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL); 2597 } 2598 } 2599 2600 if (sc->xl_media & XL_MEDIAOPT_BNC) { 2601 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL); 2602 } 2603 2604 if (sc->xl_media & XL_MEDIAOPT_BFX) { 2605 ifp->if_baudrate = 100000000; 2606 ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL); 2607 } 2608 2609 /* Choose a default media. */ 2610 switch(sc->xl_xcvr) { 2611 case XL_XCVR_10BT: 2612 media = IFM_ETHER|IFM_10_T; 2613 xl_setmode(sc, media); 2614 break; 2615 case XL_XCVR_AUI: 2616 if (sc->xl_type == XL_TYPE_905B && 2617 sc->xl_media == XL_MEDIAOPT_10FL) { 2618 media = IFM_ETHER|IFM_10_FL; 2619 xl_setmode(sc, media); 2620 } else { 2621 media = IFM_ETHER|IFM_10_5; 2622 xl_setmode(sc, media); 2623 } 2624 break; 2625 case XL_XCVR_COAX: 2626 media = IFM_ETHER|IFM_10_2; 2627 xl_setmode(sc, media); 2628 break; 2629 case XL_XCVR_AUTO: 2630 case XL_XCVR_100BTX: 2631 case XL_XCVR_MII: 2632 /* Chosen by miibus */ 2633 break; 2634 case XL_XCVR_100BFX: 2635 media = IFM_ETHER|IFM_100_FX; 2636 xl_setmode(sc, media); 2637 break; 2638 default: 2639 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, 2640 sc->xl_xcvr); 2641 /* 2642 * This will probably be wrong, but it prevents 2643 * the ifmedia code from panicking. 2644 */ 2645 media = IFM_ETHER | IFM_10_T; 2646 break; 2647 } 2648 2649 if (sc->xl_hasmii == 0) 2650 ifmedia_set(&sc->ifmedia, media); 2651 2652 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { 2653 XL_SEL_WIN(0); 2654 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); 2655 } 2656 2657 #ifndef SMALL_KERNEL 2658 /* Check availability of WOL. */ 2659 if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) { 2660 ifp->if_capabilities |= IFCAP_WOL; 2661 ifp->if_wol = xl_wol; 2662 xl_wol(ifp, 0); 2663 } 2664 #endif 2665 2666 /* 2667 * Call MI attach routines. 2668 */ 2669 if_attach(ifp); 2670 ether_ifattach(ifp); 2671 } 2672 2673 int 2674 xl_detach(struct xl_softc *sc) 2675 { 2676 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2677 extern void xl_freetxrx(struct xl_softc *); 2678 2679 /* Unhook our tick handler. */ 2680 timeout_del(&sc->xl_stsup_tmo); 2681 2682 xl_freetxrx(sc); 2683 2684 /* Detach all PHYs */ 2685 if (sc->xl_hasmii) 2686 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2687 2688 /* Delete all remaining media. */ 2689 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2690 2691 ether_ifdetach(ifp); 2692 if_detach(ifp); 2693 2694 return (0); 2695 } 2696 2697 #ifndef SMALL_KERNEL 2698 int 2699 xl_wol(struct ifnet *ifp, int enable) 2700 { 2701 struct xl_softc *sc = ifp->if_softc; 2702 2703 XL_SEL_WIN(7); 2704 if (enable) { 2705 if (!(ifp->if_flags & IFF_RUNNING)) 2706 xl_init(sc); 2707 CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC); 2708 sc->xl_flags |= XL_FLAG_WOL; 2709 } else { 2710 CSR_WRITE_2(sc, XL_W7_BM_PME, 0); 2711 sc->xl_flags &= ~XL_FLAG_WOL; 2712 } 2713 return (0); 2714 } 2715 #endif 2716 2717 struct cfdriver xl_cd = { 2718 0, "xl", DV_IFNET 2719 }; 2720