1 /* $OpenBSD: xl.c,v 1.118 2014/11/24 10:33:37 brad Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $ 35 */ 36 37 /* 38 * 3Com 3c90x Etherlink XL PCI NIC driver 39 * 40 * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI 41 * bus-master chips (3c90x cards and embedded controllers) including 42 * the following: 43 * 44 * 3Com 3c900-TPO 10Mbps/RJ-45 45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC 46 * 3Com 3c905-TX 10/100Mbps/RJ-45 47 * 3Com 3c905-T4 10/100Mbps/RJ-45 48 * 3Com 3c900B-TPO 10Mbps/RJ-45 49 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC 50 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC 51 * 3Com 3c900B-FL 10Mbps/Fiber-optic 52 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC 53 * 3Com 3c905B-TX 10/100Mbps/RJ-45 54 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic 55 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) 56 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) 57 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) 58 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) 59 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) 60 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) 61 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 62 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 63 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 64 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 65 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 66 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 67 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 68 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) 69 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 70 * Dell on-board 3c920 10/100Mbps/RJ-45 71 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 72 * Dell Latitude laptop docking station embedded 3c905-TX 73 * 74 * Written by Bill Paul <wpaul@ctr.columbia.edu> 75 * Electrical Engineering Department 76 * Columbia University, New York City 77 */ 78 79 /* 80 * The 3c90x series chips use a bus-master DMA interface for transferring 81 * packets to and from the controller chip. Some of the "vortex" cards 82 * (3c59x) also supported a bus master mode, however for those chips 83 * you could only DMA packets to/from a contiguous memory buffer. For 84 * transmission this would mean copying the contents of the queued mbuf 85 * chain into an mbuf cluster and then DMAing the cluster. This extra 86 * copy would sort of defeat the purpose of the bus master support for 87 * any packet that doesn't fit into a single mbuf. 88 * 89 * By contrast, the 3c90x cards support a fragment-based bus master 90 * mode where mbuf chains can be encapsulated using TX descriptors. 91 * This is similar to other PCI chips such as the Texas Instruments 92 * ThunderLAN and the Intel 82557/82558. 93 * 94 * The "vortex" driver (if_vx.c) happens to work for the "boomerang" 95 * bus master chips because they maintain the old PIO interface for 96 * backwards compatibility, but starting with the 3c905B and the 97 * "cyclone" chips, the compatibility interface has been dropped. 98 * Since using bus master DMA is a big win, we use this driver to 99 * support the PCI "boomerang" chips even though they work with the 100 * "vortex" driver in order to obtain better performance. 101 */ 102 103 #include "bpfilter.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/mbuf.h> 108 #include <sys/protosw.h> 109 #include <sys/socket.h> 110 #include <sys/ioctl.h> 111 #include <sys/errno.h> 112 #include <sys/malloc.h> 113 #include <sys/kernel.h> 114 #include <sys/device.h> 115 116 #include <net/if.h> 117 #include <net/if_dl.h> 118 #include <net/if_types.h> 119 #include <net/if_media.h> 120 121 #ifdef INET 122 #include <netinet/in.h> 123 #include <netinet/if_ether.h> 124 #endif 125 126 #include <dev/mii/mii.h> 127 #include <dev/mii/miivar.h> 128 129 #include <machine/bus.h> 130 131 #if NBPFILTER > 0 132 #include <net/bpf.h> 133 #endif 134 135 #include <dev/ic/xlreg.h> 136 137 /* 138 * TX Checksumming is disabled by default for two reasons: 139 * - TX Checksumming will occasionally produce corrupt packets 140 * - TX Checksumming seems to reduce performance 141 * 142 * Only 905B/C cards were reported to have this problem, it is possible 143 * that later chips _may_ be immune. 144 */ 145 #define XL905B_TXCSUM_BROKEN 1 146 147 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); 148 void xl_stats_update(void *); 149 int xl_encap(struct xl_softc *, struct xl_chain *, 150 struct mbuf * ); 151 void xl_rxeof(struct xl_softc *); 152 void xl_txeof(struct xl_softc *); 153 void xl_txeof_90xB(struct xl_softc *); 154 void xl_txeoc(struct xl_softc *); 155 int xl_intr(void *); 156 void xl_start(struct ifnet *); 157 void xl_start_90xB(struct ifnet *); 158 int xl_ioctl(struct ifnet *, u_long, caddr_t); 159 void xl_freetxrx(struct xl_softc *); 160 void xl_watchdog(struct ifnet *); 161 int xl_ifmedia_upd(struct ifnet *); 162 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 163 164 int xl_eeprom_wait(struct xl_softc *); 165 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); 166 void xl_mii_sync(struct xl_softc *); 167 void xl_mii_send(struct xl_softc *, u_int32_t, int); 168 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); 169 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); 170 171 void xl_setcfg(struct xl_softc *); 172 void xl_setmode(struct xl_softc *, int); 173 void xl_iff(struct xl_softc *); 174 void xl_iff_90x(struct xl_softc *); 175 void xl_iff_905b(struct xl_softc *); 176 int xl_list_rx_init(struct xl_softc *); 177 void xl_fill_rx_ring(struct xl_softc *); 178 int xl_list_tx_init(struct xl_softc *); 179 int xl_list_tx_init_90xB(struct xl_softc *); 180 void xl_wait(struct xl_softc *); 181 void xl_mediacheck(struct xl_softc *); 182 void xl_choose_xcvr(struct xl_softc *, int); 183 #ifdef notdef 184 void xl_testpacket(struct xl_softc *); 185 #endif 186 187 int xl_miibus_readreg(struct device *, int, int); 188 void xl_miibus_writereg(struct device *, int, int, int); 189 void xl_miibus_statchg(struct device *); 190 #ifndef SMALL_KERNEL 191 int xl_wol(struct ifnet *, int); 192 void xl_wol_power(struct xl_softc *); 193 #endif 194 195 int 196 xl_activate(struct device *self, int act) 197 { 198 struct xl_softc *sc = (struct xl_softc *)self; 199 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 200 int rv = 0; 201 202 switch (act) { 203 case DVACT_SUSPEND: 204 if (ifp->if_flags & IFF_RUNNING) { 205 xl_reset(sc); 206 xl_stop(sc); 207 } 208 rv = config_activate_children(self, act); 209 break; 210 case DVACT_RESUME: 211 xl_reset(sc); 212 if (ifp->if_flags & IFF_UP) 213 xl_init(sc); 214 break; 215 case DVACT_POWERDOWN: 216 rv = config_activate_children(self, act); 217 #ifndef SMALL_KERNEL 218 xl_wol_power(sc); 219 #endif 220 break; 221 default: 222 rv = config_activate_children(self, act); 223 break; 224 } 225 return (rv); 226 } 227 228 /* 229 * Murphy's law says that it's possible the chip can wedge and 230 * the 'command in progress' bit may never clear. Hence, we wait 231 * only a finite amount of time to avoid getting caught in an 232 * infinite loop. Normally this delay routine would be a macro, 233 * but it isn't called during normal operation so we can afford 234 * to make it a function. 235 */ 236 void 237 xl_wait(struct xl_softc *sc) 238 { 239 int i; 240 241 for (i = 0; i < XL_TIMEOUT; i++) { 242 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 243 break; 244 } 245 246 if (i == XL_TIMEOUT) 247 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 248 } 249 250 /* 251 * MII access routines are provided for adapters with external 252 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in 253 * autoneg logic that's faked up to look like a PHY (3c905B-TX). 254 * Note: if you don't perform the MDIO operations just right, 255 * it's possible to end up with code that works correctly with 256 * some chips/CPUs/processor speeds/bus speeds/etc but not 257 * with others. 258 */ 259 #define MII_SET(x) \ 260 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 261 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x)) 262 263 #define MII_CLR(x) \ 264 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 265 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x)) 266 267 /* 268 * Sync the PHYs by setting data bit and strobing the clock 32 times. 269 */ 270 void 271 xl_mii_sync(struct xl_softc *sc) 272 { 273 int i; 274 275 XL_SEL_WIN(4); 276 MII_SET(XL_MII_DIR|XL_MII_DATA); 277 278 for (i = 0; i < 32; i++) { 279 MII_SET(XL_MII_CLK); 280 MII_SET(XL_MII_DATA); 281 MII_SET(XL_MII_DATA); 282 MII_CLR(XL_MII_CLK); 283 MII_SET(XL_MII_DATA); 284 MII_SET(XL_MII_DATA); 285 } 286 } 287 288 /* 289 * Clock a series of bits through the MII. 290 */ 291 void 292 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt) 293 { 294 int i; 295 296 XL_SEL_WIN(4); 297 MII_CLR(XL_MII_CLK); 298 299 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 300 if (bits & i) { 301 MII_SET(XL_MII_DATA); 302 } else { 303 MII_CLR(XL_MII_DATA); 304 } 305 MII_CLR(XL_MII_CLK); 306 MII_SET(XL_MII_CLK); 307 } 308 } 309 310 /* 311 * Read an PHY register through the MII. 312 */ 313 int 314 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame) 315 { 316 int i, ack, s; 317 318 s = splnet(); 319 320 /* 321 * Set up frame for RX. 322 */ 323 frame->mii_stdelim = XL_MII_STARTDELIM; 324 frame->mii_opcode = XL_MII_READOP; 325 frame->mii_turnaround = 0; 326 frame->mii_data = 0; 327 328 /* 329 * Select register window 4. 330 */ 331 332 XL_SEL_WIN(4); 333 334 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); 335 /* 336 * Turn on data xmit. 337 */ 338 MII_SET(XL_MII_DIR); 339 340 xl_mii_sync(sc); 341 342 /* 343 * Send command/address info. 344 */ 345 xl_mii_send(sc, frame->mii_stdelim, 2); 346 xl_mii_send(sc, frame->mii_opcode, 2); 347 xl_mii_send(sc, frame->mii_phyaddr, 5); 348 xl_mii_send(sc, frame->mii_regaddr, 5); 349 350 /* Idle bit */ 351 MII_CLR((XL_MII_CLK|XL_MII_DATA)); 352 MII_SET(XL_MII_CLK); 353 354 /* Turn off xmit. */ 355 MII_CLR(XL_MII_DIR); 356 357 /* Check for ack */ 358 MII_CLR(XL_MII_CLK); 359 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; 360 MII_SET(XL_MII_CLK); 361 362 /* 363 * Now try reading data bits. If the ack failed, we still 364 * need to clock through 16 cycles to keep the PHY(s) in sync. 365 */ 366 if (ack) { 367 for(i = 0; i < 16; i++) { 368 MII_CLR(XL_MII_CLK); 369 MII_SET(XL_MII_CLK); 370 } 371 goto fail; 372 } 373 374 for (i = 0x8000; i; i >>= 1) { 375 MII_CLR(XL_MII_CLK); 376 if (!ack) { 377 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) 378 frame->mii_data |= i; 379 } 380 MII_SET(XL_MII_CLK); 381 } 382 383 fail: 384 385 MII_CLR(XL_MII_CLK); 386 MII_SET(XL_MII_CLK); 387 388 splx(s); 389 390 if (ack) 391 return (1); 392 return (0); 393 } 394 395 /* 396 * Write to a PHY register through the MII. 397 */ 398 int 399 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame) 400 { 401 int s; 402 403 s = splnet(); 404 405 /* 406 * Set up frame for TX. 407 */ 408 409 frame->mii_stdelim = XL_MII_STARTDELIM; 410 frame->mii_opcode = XL_MII_WRITEOP; 411 frame->mii_turnaround = XL_MII_TURNAROUND; 412 413 /* 414 * Select the window 4. 415 */ 416 XL_SEL_WIN(4); 417 418 /* 419 * Turn on data output. 420 */ 421 MII_SET(XL_MII_DIR); 422 423 xl_mii_sync(sc); 424 425 xl_mii_send(sc, frame->mii_stdelim, 2); 426 xl_mii_send(sc, frame->mii_opcode, 2); 427 xl_mii_send(sc, frame->mii_phyaddr, 5); 428 xl_mii_send(sc, frame->mii_regaddr, 5); 429 xl_mii_send(sc, frame->mii_turnaround, 2); 430 xl_mii_send(sc, frame->mii_data, 16); 431 432 /* Idle bit. */ 433 MII_SET(XL_MII_CLK); 434 MII_CLR(XL_MII_CLK); 435 436 /* 437 * Turn off xmit. 438 */ 439 MII_CLR(XL_MII_DIR); 440 441 splx(s); 442 443 return (0); 444 } 445 446 int 447 xl_miibus_readreg(struct device *self, int phy, int reg) 448 { 449 struct xl_softc *sc = (struct xl_softc *)self; 450 struct xl_mii_frame frame; 451 452 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 453 return (0); 454 455 bzero(&frame, sizeof(frame)); 456 457 frame.mii_phyaddr = phy; 458 frame.mii_regaddr = reg; 459 xl_mii_readreg(sc, &frame); 460 461 return (frame.mii_data); 462 } 463 464 void 465 xl_miibus_writereg(struct device *self, int phy, int reg, int data) 466 { 467 struct xl_softc *sc = (struct xl_softc *)self; 468 struct xl_mii_frame frame; 469 470 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 471 return; 472 473 bzero(&frame, sizeof(frame)); 474 475 frame.mii_phyaddr = phy; 476 frame.mii_regaddr = reg; 477 frame.mii_data = data; 478 479 xl_mii_writereg(sc, &frame); 480 } 481 482 void 483 xl_miibus_statchg(struct device *self) 484 { 485 struct xl_softc *sc = (struct xl_softc *)self; 486 487 xl_setcfg(sc); 488 489 /* Set ASIC's duplex mode to match the PHY. */ 490 XL_SEL_WIN(3); 491 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 492 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 493 else 494 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 495 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 496 } 497 498 /* 499 * The EEPROM is slow: give it time to come ready after issuing 500 * it a command. 501 */ 502 int 503 xl_eeprom_wait(struct xl_softc *sc) 504 { 505 int i; 506 507 for (i = 0; i < 100; i++) { 508 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) 509 DELAY(162); 510 else 511 break; 512 } 513 514 if (i == 100) { 515 printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname); 516 return (1); 517 } 518 519 return (0); 520 } 521 522 /* 523 * Read a sequence of words from the EEPROM. Note that ethernet address 524 * data is stored in the EEPROM in network byte order. 525 */ 526 int 527 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) 528 { 529 int err = 0, i; 530 u_int16_t word = 0, *ptr; 531 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) 532 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) 533 /* WARNING! DANGER! 534 * It's easy to accidentally overwrite the rom content! 535 * Note: the 3c575 uses 8bit EEPROM offsets. 536 */ 537 XL_SEL_WIN(0); 538 539 if (xl_eeprom_wait(sc)) 540 return (1); 541 542 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) 543 off += 0x30; 544 545 for (i = 0; i < cnt; i++) { 546 if (sc->xl_flags & XL_FLAG_8BITROM) 547 CSR_WRITE_2(sc, XL_W0_EE_CMD, 548 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); 549 else 550 CSR_WRITE_2(sc, XL_W0_EE_CMD, 551 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); 552 err = xl_eeprom_wait(sc); 553 if (err) 554 break; 555 word = CSR_READ_2(sc, XL_W0_EE_DATA); 556 ptr = (u_int16_t *)(dest + (i * 2)); 557 if (swap) 558 *ptr = ntohs(word); 559 else 560 *ptr = word; 561 } 562 563 return (err ? 1 : 0); 564 } 565 566 void 567 xl_iff(struct xl_softc *sc) 568 { 569 if (sc->xl_type == XL_TYPE_905B) 570 xl_iff_905b(sc); 571 else 572 xl_iff_90x(sc); 573 } 574 575 /* 576 * NICs older than the 3c905B have only one multicast option, which 577 * is to enable reception of all multicast frames. 578 */ 579 void 580 xl_iff_90x(struct xl_softc *sc) 581 { 582 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 583 struct arpcom *ac = &sc->sc_arpcom; 584 u_int8_t rxfilt; 585 586 XL_SEL_WIN(5); 587 588 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 589 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 590 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); 591 ifp->if_flags &= ~IFF_ALLMULTI; 592 593 /* 594 * Always accept broadcast frames. 595 * Always accept frames destined to our station address. 596 */ 597 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 598 599 if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) { 600 ifp->if_flags |= IFF_ALLMULTI; 601 if (ifp->if_flags & IFF_PROMISC) 602 rxfilt |= XL_RXFILTER_ALLFRAMES; 603 else 604 rxfilt |= XL_RXFILTER_ALLMULTI; 605 } 606 607 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 608 609 XL_SEL_WIN(7); 610 } 611 612 /* 613 * 3c905B adapters have a hash filter that we can program. 614 */ 615 void 616 xl_iff_905b(struct xl_softc *sc) 617 { 618 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 619 struct arpcom *ac = &sc->sc_arpcom; 620 int h = 0, i; 621 struct ether_multi *enm; 622 struct ether_multistep step; 623 u_int8_t rxfilt; 624 625 XL_SEL_WIN(5); 626 627 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 628 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 629 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | 630 XL_RXFILTER_MULTIHASH); 631 ifp->if_flags &= ~IFF_ALLMULTI; 632 633 /* 634 * Always accept broadcast frames. 635 * Always accept frames destined to our station address. 636 */ 637 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 638 639 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 640 ifp->if_flags |= IFF_ALLMULTI; 641 if (ifp->if_flags & IFF_PROMISC) 642 rxfilt |= XL_RXFILTER_ALLFRAMES; 643 else 644 rxfilt |= XL_RXFILTER_ALLMULTI; 645 } else { 646 rxfilt |= XL_RXFILTER_MULTIHASH; 647 648 /* first, zot all the existing hash bits */ 649 for (i = 0; i < XL_HASHFILT_SIZE; i++) 650 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); 651 652 /* now program new ones */ 653 ETHER_FIRST_MULTI(step, ac, enm); 654 while (enm != NULL) { 655 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 656 0x000000FF; 657 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | 658 XL_HASH_SET | h); 659 660 ETHER_NEXT_MULTI(step, enm); 661 } 662 } 663 664 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 665 666 XL_SEL_WIN(7); 667 } 668 669 #ifdef notdef 670 void 671 xl_testpacket(struct xl_softc *sc) 672 { 673 struct mbuf *m; 674 struct ifnet *ifp; 675 int error; 676 677 ifp = &sc->sc_arpcom.ac_if; 678 679 MGETHDR(m, M_DONTWAIT, MT_DATA); 680 681 if (m == NULL) 682 return; 683 684 bcopy(&sc->sc_arpcom.ac_enaddr, 685 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); 686 bcopy(&sc->sc_arpcom.ac_enaddr, 687 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); 688 mtod(m, struct ether_header *)->ether_type = htons(3); 689 mtod(m, unsigned char *)[14] = 0; 690 mtod(m, unsigned char *)[15] = 0; 691 mtod(m, unsigned char *)[16] = 0xE3; 692 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; 693 IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 694 xl_start(ifp); 695 } 696 #endif 697 698 void 699 xl_setcfg(struct xl_softc *sc) 700 { 701 u_int32_t icfg; 702 703 XL_SEL_WIN(3); 704 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 705 icfg &= ~XL_ICFG_CONNECTOR_MASK; 706 if (sc->xl_media & XL_MEDIAOPT_MII || 707 sc->xl_media & XL_MEDIAOPT_BT4) 708 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); 709 if (sc->xl_media & XL_MEDIAOPT_BTX) 710 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); 711 712 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 713 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 714 } 715 716 void 717 xl_setmode(struct xl_softc *sc, int media) 718 { 719 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 720 u_int32_t icfg; 721 u_int16_t mediastat; 722 723 XL_SEL_WIN(4); 724 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 725 XL_SEL_WIN(3); 726 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 727 728 if (sc->xl_media & XL_MEDIAOPT_BT) { 729 if (IFM_SUBTYPE(media) == IFM_10_T) { 730 ifp->if_baudrate = IF_Mbps(10); 731 sc->xl_xcvr = XL_XCVR_10BT; 732 icfg &= ~XL_ICFG_CONNECTOR_MASK; 733 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); 734 mediastat |= XL_MEDIASTAT_LINKBEAT| 735 XL_MEDIASTAT_JABGUARD; 736 mediastat &= ~XL_MEDIASTAT_SQEENB; 737 } 738 } 739 740 if (sc->xl_media & XL_MEDIAOPT_BFX) { 741 if (IFM_SUBTYPE(media) == IFM_100_FX) { 742 ifp->if_baudrate = IF_Mbps(100); 743 sc->xl_xcvr = XL_XCVR_100BFX; 744 icfg &= ~XL_ICFG_CONNECTOR_MASK; 745 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); 746 mediastat |= XL_MEDIASTAT_LINKBEAT; 747 mediastat &= ~XL_MEDIASTAT_SQEENB; 748 } 749 } 750 751 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 752 if (IFM_SUBTYPE(media) == IFM_10_5) { 753 ifp->if_baudrate = IF_Mbps(10); 754 sc->xl_xcvr = XL_XCVR_AUI; 755 icfg &= ~XL_ICFG_CONNECTOR_MASK; 756 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 757 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 758 XL_MEDIASTAT_JABGUARD); 759 mediastat |= ~XL_MEDIASTAT_SQEENB; 760 } 761 if (IFM_SUBTYPE(media) == IFM_10_FL) { 762 ifp->if_baudrate = IF_Mbps(10); 763 sc->xl_xcvr = XL_XCVR_AUI; 764 icfg &= ~XL_ICFG_CONNECTOR_MASK; 765 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 766 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 767 XL_MEDIASTAT_JABGUARD); 768 mediastat |= ~XL_MEDIASTAT_SQEENB; 769 } 770 } 771 772 if (sc->xl_media & XL_MEDIAOPT_BNC) { 773 if (IFM_SUBTYPE(media) == IFM_10_2) { 774 ifp->if_baudrate = IF_Mbps(10); 775 sc->xl_xcvr = XL_XCVR_COAX; 776 icfg &= ~XL_ICFG_CONNECTOR_MASK; 777 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); 778 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 779 XL_MEDIASTAT_JABGUARD| 780 XL_MEDIASTAT_SQEENB); 781 } 782 } 783 784 if ((media & IFM_GMASK) == IFM_FDX || 785 IFM_SUBTYPE(media) == IFM_100_FX) { 786 XL_SEL_WIN(3); 787 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 788 } else { 789 XL_SEL_WIN(3); 790 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 791 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 792 } 793 794 if (IFM_SUBTYPE(media) == IFM_10_2) 795 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 796 else 797 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 798 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 799 XL_SEL_WIN(4); 800 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); 801 DELAY(800); 802 XL_SEL_WIN(7); 803 } 804 805 void 806 xl_reset(struct xl_softc *sc) 807 { 808 int i; 809 810 XL_SEL_WIN(0); 811 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 812 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? 813 XL_RESETOPT_DISADVFD:0)); 814 815 /* 816 * Pause briefly after issuing the reset command before trying 817 * to access any other registers. With my 3c575C cardbus card, 818 * failing to do this results in the system locking up while 819 * trying to poll the command busy bit in the status register. 820 */ 821 DELAY(100000); 822 823 for (i = 0; i < XL_TIMEOUT; i++) { 824 DELAY(10); 825 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 826 break; 827 } 828 829 if (i == XL_TIMEOUT) 830 printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname); 831 832 /* Note: the RX reset takes an absurd amount of time 833 * on newer versions of the Tornado chips such as those 834 * on the 3c905CX and newer 3c908C cards. We wait an 835 * extra amount of time so that xl_wait() doesn't complain 836 * and annoy the users. 837 */ 838 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 839 DELAY(100000); 840 xl_wait(sc); 841 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 842 xl_wait(sc); 843 844 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || 845 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { 846 XL_SEL_WIN(2); 847 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, 848 XL_W2_RESET_OPTIONS) 849 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0) 850 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0) 851 ); 852 } 853 854 /* Wait a little while for the chip to get its brains in order. */ 855 DELAY(100000); 856 } 857 858 /* 859 * This routine is a kludge to work around possible hardware faults 860 * or manufacturing defects that can cause the media options register 861 * (or reset options register, as it's called for the first generation 862 * 3c90x adapters) to return an incorrect result. I have encountered 863 * one Dell Latitude laptop docking station with an integrated 3c905-TX 864 * which doesn't have any of the 'mediaopt' bits set. This screws up 865 * the attach routine pretty badly because it doesn't know what media 866 * to look for. If we find ourselves in this predicament, this routine 867 * will try to guess the media options values and warn the user of a 868 * possible manufacturing defect with his adapter/system/whatever. 869 */ 870 void 871 xl_mediacheck(struct xl_softc *sc) 872 { 873 /* 874 * If some of the media options bits are set, assume they are 875 * correct. If not, try to figure it out down below. 876 * XXX I should check for 10baseFL, but I don't have an adapter 877 * to test with. 878 */ 879 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { 880 /* 881 * Check the XCVR value. If it's not in the normal range 882 * of values, we need to fake it up here. 883 */ 884 if (sc->xl_xcvr <= XL_XCVR_AUTO) 885 return; 886 else { 887 printf("%s: bogus xcvr value " 888 "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr); 889 printf("%s: choosing new default based " 890 "on card type\n", sc->sc_dev.dv_xname); 891 } 892 } else { 893 if (sc->xl_type == XL_TYPE_905B && 894 sc->xl_media & XL_MEDIAOPT_10FL) 895 return; 896 printf("%s: WARNING: no media options bits set in " 897 "the media options register!!\n", sc->sc_dev.dv_xname); 898 printf("%s: this could be a manufacturing defect in " 899 "your adapter or system\n", sc->sc_dev.dv_xname); 900 printf("%s: attempting to guess media type; you " 901 "should probably consult your vendor\n", sc->sc_dev.dv_xname); 902 } 903 904 xl_choose_xcvr(sc, 1); 905 } 906 907 void 908 xl_choose_xcvr(struct xl_softc *sc, int verbose) 909 { 910 u_int16_t devid; 911 912 /* 913 * Read the device ID from the EEPROM. 914 * This is what's loaded into the PCI device ID register, so it has 915 * to be correct otherwise we wouldn't have gotten this far. 916 */ 917 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); 918 919 switch(devid) { 920 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ 921 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ 922 sc->xl_media = XL_MEDIAOPT_BT; 923 sc->xl_xcvr = XL_XCVR_10BT; 924 if (verbose) 925 printf("%s: guessing 10BaseT transceiver\n", 926 sc->sc_dev.dv_xname); 927 break; 928 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ 929 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ 930 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 931 sc->xl_xcvr = XL_XCVR_10BT; 932 if (verbose) 933 printf("%s: guessing COMBO (AUI/BNC/TP)\n", 934 sc->sc_dev.dv_xname); 935 break; 936 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ 937 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; 938 sc->xl_xcvr = XL_XCVR_10BT; 939 if (verbose) 940 printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname); 941 break; 942 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ 943 sc->xl_media = XL_MEDIAOPT_10FL; 944 sc->xl_xcvr = XL_XCVR_AUI; 945 if (verbose) 946 printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname); 947 break; 948 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ 949 case TC_DEVICEID_HURRICANE_555: /* 3c555 */ 950 case TC_DEVICEID_HURRICANE_556: /* 3c556 */ 951 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ 952 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ 953 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ 954 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ 955 case TC_DEVICEID_HURRICANE_656: /* 3c656 */ 956 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ 957 case TC_DEVICEID_TORNADO_656C: /* 3c656C */ 958 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ 959 sc->xl_media = XL_MEDIAOPT_MII; 960 sc->xl_xcvr = XL_XCVR_MII; 961 if (verbose) 962 printf("%s: guessing MII\n", sc->sc_dev.dv_xname); 963 break; 964 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ 965 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ 966 sc->xl_media = XL_MEDIAOPT_BT4; 967 sc->xl_xcvr = XL_XCVR_MII; 968 if (verbose) 969 printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname); 970 break; 971 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ 972 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */ 973 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ 974 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ 975 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ 976 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ 977 sc->xl_media = XL_MEDIAOPT_BTX; 978 sc->xl_xcvr = XL_XCVR_AUTO; 979 if (verbose) 980 printf("%s: guessing 10/100 internal\n", 981 sc->sc_dev.dv_xname); 982 break; 983 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ 984 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 985 sc->xl_xcvr = XL_XCVR_AUTO; 986 if (verbose) 987 printf("%s: guessing 10/100 plus BNC/AUI\n", 988 sc->sc_dev.dv_xname); 989 break; 990 default: 991 printf("%s: unknown device ID: %x -- " 992 "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid); 993 sc->xl_media = XL_MEDIAOPT_BT; 994 break; 995 } 996 } 997 998 /* 999 * Initialize the transmit descriptors. 1000 */ 1001 int 1002 xl_list_tx_init(struct xl_softc *sc) 1003 { 1004 struct xl_chain_data *cd; 1005 struct xl_list_data *ld; 1006 int i; 1007 1008 cd = &sc->xl_cdata; 1009 ld = sc->xl_ldata; 1010 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1011 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1012 if (i == (XL_TX_LIST_CNT - 1)) 1013 cd->xl_tx_chain[i].xl_next = NULL; 1014 else 1015 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; 1016 } 1017 1018 cd->xl_tx_free = &cd->xl_tx_chain[0]; 1019 cd->xl_tx_tail = cd->xl_tx_head = NULL; 1020 1021 return (0); 1022 } 1023 1024 /* 1025 * Initialize the transmit descriptors. 1026 */ 1027 int 1028 xl_list_tx_init_90xB(struct xl_softc *sc) 1029 { 1030 struct xl_chain_data *cd; 1031 struct xl_list_data *ld; 1032 int i, next, prev; 1033 1034 cd = &sc->xl_cdata; 1035 ld = sc->xl_ldata; 1036 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1037 if (i == (XL_TX_LIST_CNT - 1)) 1038 next = 0; 1039 else 1040 next = i + 1; 1041 if (i == 0) 1042 prev = XL_TX_LIST_CNT - 1; 1043 else 1044 prev = i - 1; 1045 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1046 cd->xl_tx_chain[i].xl_phys = 1047 sc->sc_listmap->dm_segs[0].ds_addr + 1048 offsetof(struct xl_list_data, xl_tx_list[i]); 1049 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next]; 1050 cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev]; 1051 } 1052 1053 bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT); 1054 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); 1055 1056 cd->xl_tx_prod = 1; 1057 cd->xl_tx_cons = 1; 1058 cd->xl_tx_cnt = 0; 1059 1060 return (0); 1061 } 1062 1063 /* 1064 * Initialize the RX descriptors and allocate mbufs for them. Note that 1065 * we arrange the descriptors in a closed ring, so that the last descriptor 1066 * points back to the first. 1067 */ 1068 int 1069 xl_list_rx_init(struct xl_softc *sc) 1070 { 1071 struct xl_chain_data *cd; 1072 struct xl_list_data *ld; 1073 int i, n; 1074 bus_addr_t next; 1075 1076 cd = &sc->xl_cdata; 1077 ld = sc->xl_ldata; 1078 1079 for (i = 0; i < XL_RX_LIST_CNT; i++) { 1080 cd->xl_rx_chain[i].xl_ptr = 1081 (struct xl_list_onefrag *)&ld->xl_rx_list[i]; 1082 if (i == (XL_RX_LIST_CNT - 1)) 1083 n = 0; 1084 else 1085 n = i + 1; 1086 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n]; 1087 next = sc->sc_listmap->dm_segs[0].ds_addr + 1088 offsetof(struct xl_list_data, xl_rx_list[n]); 1089 ld->xl_rx_list[i].xl_next = htole32(next); 1090 } 1091 1092 cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0]; 1093 if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1); 1094 xl_fill_rx_ring(sc); 1095 return (0); 1096 } 1097 1098 void 1099 xl_fill_rx_ring(struct xl_softc *sc) 1100 { 1101 struct xl_chain_data *cd; 1102 u_int slots; 1103 1104 cd = &sc->xl_cdata; 1105 1106 for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT); 1107 slots > 0; slots--) { 1108 if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS) 1109 break; 1110 cd->xl_rx_prod = cd->xl_rx_prod->xl_next; 1111 } 1112 if_rxr_put(&cd->xl_rx_ring, slots); 1113 } 1114 1115 /* 1116 * Initialize an RX descriptor and attach an MBUF cluster. 1117 */ 1118 int 1119 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) 1120 { 1121 struct mbuf *m_new = NULL; 1122 bus_dmamap_t map; 1123 1124 m_new = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1125 if (!m_new) 1126 return (ENOBUFS); 1127 1128 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1129 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap, 1130 mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) { 1131 m_freem(m_new); 1132 return (ENOBUFS); 1133 } 1134 1135 /* sync the old map, and unload it (if necessary) */ 1136 if (c->map->dm_nsegs != 0) { 1137 bus_dmamap_sync(sc->sc_dmat, c->map, 1138 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1139 bus_dmamap_unload(sc->sc_dmat, c->map); 1140 } 1141 1142 map = c->map; 1143 c->map = sc->sc_rx_sparemap; 1144 sc->sc_rx_sparemap = map; 1145 1146 /* Force longword alignment for packet payload. */ 1147 m_adj(m_new, ETHER_ALIGN); 1148 1149 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1150 BUS_DMASYNC_PREREAD); 1151 1152 c->xl_mbuf = m_new; 1153 c->xl_ptr->xl_frag.xl_addr = 1154 htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN); 1155 c->xl_ptr->xl_frag.xl_len = 1156 htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG); 1157 c->xl_ptr->xl_status = htole32(0); 1158 1159 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1160 ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list), 1161 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1162 1163 return (0); 1164 } 1165 1166 /* 1167 * A frame has been uploaded: pass the resulting mbuf chain up to 1168 * the higher level protocols. 1169 */ 1170 void 1171 xl_rxeof(struct xl_softc *sc) 1172 { 1173 struct mbuf *m; 1174 struct ifnet *ifp; 1175 struct xl_chain_onefrag *cur_rx; 1176 int total_len = 0; 1177 u_int32_t rxstat; 1178 u_int16_t sumflags = 0; 1179 1180 ifp = &sc->sc_arpcom.ac_if; 1181 1182 again: 1183 1184 while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) { 1185 cur_rx = sc->xl_cdata.xl_rx_cons; 1186 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1187 ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva), 1188 sizeof(struct xl_list), 1189 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1190 if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0) 1191 break; 1192 m = cur_rx->xl_mbuf; 1193 cur_rx->xl_mbuf = NULL; 1194 sc->xl_cdata.xl_rx_cons = cur_rx->xl_next; 1195 if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1); 1196 total_len = rxstat & XL_RXSTAT_LENMASK; 1197 1198 /* 1199 * Since we have told the chip to allow large frames, 1200 * we need to trap giant frame errors in software. We allow 1201 * a little more than the normal frame size to account for 1202 * frames with VLAN tags. 1203 */ 1204 if (total_len > XL_MAX_FRAMELEN) 1205 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); 1206 1207 /* 1208 * If an error occurs, update stats, clear the 1209 * status word and leave the mbuf cluster in place: 1210 * it should simply get re-used next time this descriptor 1211 * comes up in the ring. 1212 */ 1213 if (rxstat & XL_RXSTAT_UP_ERROR) { 1214 ifp->if_ierrors++; 1215 cur_rx->xl_ptr->xl_status = htole32(0); 1216 m_freem(m); 1217 continue; 1218 } 1219 1220 /* 1221 * If the error bit was not set, the upload complete 1222 * bit should be set which means we have a valid packet. 1223 * If not, something truly strange has happened. 1224 */ 1225 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { 1226 printf("%s: bad receive status -- " 1227 "packet dropped\n", sc->sc_dev.dv_xname); 1228 ifp->if_ierrors++; 1229 cur_rx->xl_ptr->xl_status = htole32(0); 1230 m_freem(m); 1231 continue; 1232 } 1233 1234 ifp->if_ipackets++; 1235 m->m_pkthdr.rcvif = ifp; 1236 m->m_pkthdr.len = m->m_len = total_len; 1237 #if NBPFILTER > 0 1238 /* 1239 * Handle BPF listeners. Let the BPF user see the packet. 1240 */ 1241 if (ifp->if_bpf) { 1242 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1243 } 1244 #endif 1245 1246 if (sc->xl_type == XL_TYPE_905B) { 1247 if (!(rxstat & XL_RXSTAT_IPCKERR) && 1248 (rxstat & XL_RXSTAT_IPCKOK)) 1249 sumflags |= M_IPV4_CSUM_IN_OK; 1250 1251 if (!(rxstat & XL_RXSTAT_TCPCKERR) && 1252 (rxstat & XL_RXSTAT_TCPCKOK)) 1253 sumflags |= M_TCP_CSUM_IN_OK; 1254 1255 if (!(rxstat & XL_RXSTAT_UDPCKERR) && 1256 (rxstat & XL_RXSTAT_UDPCKOK)) 1257 sumflags |= M_UDP_CSUM_IN_OK; 1258 1259 m->m_pkthdr.csum_flags = sumflags; 1260 } 1261 1262 ether_input_mbuf(ifp, m); 1263 } 1264 1265 xl_fill_rx_ring(sc); 1266 1267 /* 1268 * Handle the 'end of channel' condition. When the upload 1269 * engine hits the end of the RX ring, it will stall. This 1270 * is our cue to flush the RX ring, reload the uplist pointer 1271 * register and unstall the engine. 1272 * XXX This is actually a little goofy. With the ThunderLAN 1273 * chip, you get an interrupt when the receiver hits the end 1274 * of the receive ring, which tells you exactly when you 1275 * you need to reload the ring pointer. Here we have to 1276 * fake it. I'm mad at myself for not being clever enough 1277 * to avoid the use of a goto here. 1278 */ 1279 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || 1280 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { 1281 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 1282 xl_wait(sc); 1283 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 1284 xl_fill_rx_ring(sc); 1285 goto again; 1286 } 1287 } 1288 1289 /* 1290 * A frame was downloaded to the chip. It's safe for us to clean up 1291 * the list buffers. 1292 */ 1293 void 1294 xl_txeof(struct xl_softc *sc) 1295 { 1296 struct xl_chain *cur_tx; 1297 struct ifnet *ifp; 1298 1299 ifp = &sc->sc_arpcom.ac_if; 1300 1301 /* 1302 * Go through our tx list and free mbufs for those 1303 * frames that have been uploaded. Note: the 3c905B 1304 * sets a special bit in the status word to let us 1305 * know that a frame has been downloaded, but the 1306 * original 3c900/3c905 adapters don't do that. 1307 * Consequently, we have to use a different test if 1308 * xl_type != XL_TYPE_905B. 1309 */ 1310 while (sc->xl_cdata.xl_tx_head != NULL) { 1311 cur_tx = sc->xl_cdata.xl_tx_head; 1312 1313 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1314 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva), 1315 sizeof(struct xl_list), 1316 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1317 1318 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1319 break; 1320 1321 sc->xl_cdata.xl_tx_head = cur_tx->xl_next; 1322 ifp->if_opackets++; 1323 if (cur_tx->map->dm_nsegs != 0) { 1324 bus_dmamap_t map = cur_tx->map; 1325 1326 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1327 BUS_DMASYNC_POSTWRITE); 1328 bus_dmamap_unload(sc->sc_dmat, map); 1329 } 1330 if (cur_tx->xl_mbuf != NULL) { 1331 m_freem(cur_tx->xl_mbuf); 1332 cur_tx->xl_mbuf = NULL; 1333 } 1334 cur_tx->xl_next = sc->xl_cdata.xl_tx_free; 1335 sc->xl_cdata.xl_tx_free = cur_tx; 1336 } 1337 1338 if (sc->xl_cdata.xl_tx_head == NULL) { 1339 ifp->if_flags &= ~IFF_OACTIVE; 1340 /* Clear the timeout timer. */ 1341 ifp->if_timer = 0; 1342 sc->xl_cdata.xl_tx_tail = NULL; 1343 } else { 1344 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || 1345 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { 1346 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1347 sc->sc_listmap->dm_segs[0].ds_addr + 1348 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1349 sc->sc_listkva)); 1350 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1351 } 1352 } 1353 } 1354 1355 void 1356 xl_txeof_90xB(struct xl_softc *sc) 1357 { 1358 struct xl_chain *cur_tx = NULL; 1359 struct ifnet *ifp; 1360 int idx; 1361 1362 ifp = &sc->sc_arpcom.ac_if; 1363 1364 idx = sc->xl_cdata.xl_tx_cons; 1365 while (idx != sc->xl_cdata.xl_tx_prod) { 1366 1367 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1368 1369 if ((cur_tx->xl_ptr->xl_status & 1370 htole32(XL_TXSTAT_DL_COMPLETE)) == 0) 1371 break; 1372 1373 if (cur_tx->xl_mbuf != NULL) { 1374 m_freem(cur_tx->xl_mbuf); 1375 cur_tx->xl_mbuf = NULL; 1376 } 1377 1378 if (cur_tx->map->dm_nsegs != 0) { 1379 bus_dmamap_sync(sc->sc_dmat, cur_tx->map, 1380 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1381 bus_dmamap_unload(sc->sc_dmat, cur_tx->map); 1382 } 1383 1384 ifp->if_opackets++; 1385 1386 sc->xl_cdata.xl_tx_cnt--; 1387 XL_INC(idx, XL_TX_LIST_CNT); 1388 } 1389 1390 sc->xl_cdata.xl_tx_cons = idx; 1391 1392 if (cur_tx != NULL) 1393 ifp->if_flags &= ~IFF_OACTIVE; 1394 if (sc->xl_cdata.xl_tx_cnt == 0) 1395 ifp->if_timer = 0; 1396 } 1397 1398 /* 1399 * TX 'end of channel' interrupt handler. Actually, we should 1400 * only get a 'TX complete' interrupt if there's a transmit error, 1401 * so this is really TX error handler. 1402 */ 1403 void 1404 xl_txeoc(struct xl_softc *sc) 1405 { 1406 u_int8_t txstat; 1407 1408 while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { 1409 if (txstat & XL_TXSTATUS_UNDERRUN || 1410 txstat & XL_TXSTATUS_JABBER || 1411 txstat & XL_TXSTATUS_RECLAIM) { 1412 if (txstat != 0x90) { 1413 printf("%s: transmission error: %x\n", 1414 sc->sc_dev.dv_xname, txstat); 1415 } 1416 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1417 xl_wait(sc); 1418 if (sc->xl_type == XL_TYPE_905B) { 1419 if (sc->xl_cdata.xl_tx_cnt) { 1420 int i; 1421 struct xl_chain *c; 1422 1423 i = sc->xl_cdata.xl_tx_cons; 1424 c = &sc->xl_cdata.xl_tx_chain[i]; 1425 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1426 c->xl_phys); 1427 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 1428 } 1429 } else { 1430 if (sc->xl_cdata.xl_tx_head != NULL) 1431 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1432 sc->sc_listmap->dm_segs[0].ds_addr + 1433 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1434 sc->sc_listkva)); 1435 } 1436 /* 1437 * Remember to set this for the 1438 * first generation 3c90X chips. 1439 */ 1440 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1441 if (txstat & XL_TXSTATUS_UNDERRUN && 1442 sc->xl_tx_thresh < XL_PACKET_SIZE) { 1443 sc->xl_tx_thresh += XL_MIN_FRAMELEN; 1444 #ifdef notdef 1445 printf("%s: tx underrun, increasing tx start" 1446 " threshold to %d\n", sc->sc_dev.dv_xname, 1447 sc->xl_tx_thresh); 1448 #endif 1449 } 1450 CSR_WRITE_2(sc, XL_COMMAND, 1451 XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1452 if (sc->xl_type == XL_TYPE_905B) { 1453 CSR_WRITE_2(sc, XL_COMMAND, 1454 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1455 } 1456 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1457 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1458 } else { 1459 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1460 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1461 } 1462 /* 1463 * Write an arbitrary byte to the TX_STATUS register 1464 * to clear this interrupt/error and advance to the next. 1465 */ 1466 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); 1467 } 1468 } 1469 1470 int 1471 xl_intr(void *arg) 1472 { 1473 struct xl_softc *sc; 1474 struct ifnet *ifp; 1475 u_int16_t status; 1476 int claimed = 0; 1477 1478 sc = arg; 1479 ifp = &sc->sc_arpcom.ac_if; 1480 1481 while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { 1482 1483 claimed = 1; 1484 1485 CSR_WRITE_2(sc, XL_COMMAND, 1486 XL_CMD_INTR_ACK|(status & XL_INTRS)); 1487 1488 if (sc->intr_ack) 1489 (*sc->intr_ack)(sc); 1490 1491 if (!(ifp->if_flags & IFF_RUNNING)) 1492 return (claimed); 1493 1494 if (status & XL_STAT_UP_COMPLETE) 1495 xl_rxeof(sc); 1496 1497 if (status & XL_STAT_DOWN_COMPLETE) { 1498 if (sc->xl_type == XL_TYPE_905B) 1499 xl_txeof_90xB(sc); 1500 else 1501 xl_txeof(sc); 1502 } 1503 1504 if (status & XL_STAT_TX_COMPLETE) { 1505 ifp->if_oerrors++; 1506 xl_txeoc(sc); 1507 } 1508 1509 if (status & XL_STAT_ADFAIL) { 1510 xl_reset(sc); 1511 xl_init(sc); 1512 } 1513 1514 if (status & XL_STAT_STATSOFLOW) { 1515 sc->xl_stats_no_timeout = 1; 1516 xl_stats_update(sc); 1517 sc->xl_stats_no_timeout = 0; 1518 } 1519 } 1520 1521 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1522 (*ifp->if_start)(ifp); 1523 1524 return (claimed); 1525 } 1526 1527 void 1528 xl_stats_update(void *xsc) 1529 { 1530 struct xl_softc *sc; 1531 struct ifnet *ifp; 1532 struct xl_stats xl_stats; 1533 u_int8_t *p; 1534 int i; 1535 struct mii_data *mii = NULL; 1536 1537 bzero(&xl_stats, sizeof(struct xl_stats)); 1538 1539 sc = xsc; 1540 ifp = &sc->sc_arpcom.ac_if; 1541 if (sc->xl_hasmii) 1542 mii = &sc->sc_mii; 1543 1544 p = (u_int8_t *)&xl_stats; 1545 1546 /* Read all the stats registers. */ 1547 XL_SEL_WIN(6); 1548 1549 for (i = 0; i < 16; i++) 1550 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); 1551 1552 ifp->if_ierrors += xl_stats.xl_rx_overrun; 1553 1554 ifp->if_collisions += xl_stats.xl_tx_multi_collision + 1555 xl_stats.xl_tx_single_collision + 1556 xl_stats.xl_tx_late_collision; 1557 1558 /* 1559 * Boomerang and cyclone chips have an extra stats counter 1560 * in window 4 (BadSSD). We have to read this too in order 1561 * to clear out all the stats registers and avoid a statsoflow 1562 * interrupt. 1563 */ 1564 XL_SEL_WIN(4); 1565 CSR_READ_1(sc, XL_W4_BADSSD); 1566 1567 if (mii != NULL && (!sc->xl_stats_no_timeout)) 1568 mii_tick(mii); 1569 1570 XL_SEL_WIN(7); 1571 1572 if (!sc->xl_stats_no_timeout) 1573 timeout_add_sec(&sc->xl_stsup_tmo, 1); 1574 } 1575 1576 /* 1577 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1578 * pointers to the fragment pointers. 1579 */ 1580 int 1581 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head) 1582 { 1583 int error, frag, total_len; 1584 u_int32_t status; 1585 bus_dmamap_t map; 1586 1587 map = sc->sc_tx_sparemap; 1588 1589 reload: 1590 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1591 m_head, BUS_DMA_NOWAIT); 1592 1593 if (error && error != EFBIG) { 1594 m_freem(m_head); 1595 return (1); 1596 } 1597 1598 /* 1599 * Start packing the mbufs in this chain into 1600 * the fragment pointers. Stop when we run out 1601 * of fragments or hit the end of the mbuf chain. 1602 */ 1603 for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) { 1604 if (frag == XL_MAXFRAGS) 1605 break; 1606 total_len += map->dm_segs[frag].ds_len; 1607 c->xl_ptr->xl_frag[frag].xl_addr = 1608 htole32(map->dm_segs[frag].ds_addr); 1609 c->xl_ptr->xl_frag[frag].xl_len = 1610 htole32(map->dm_segs[frag].ds_len); 1611 } 1612 1613 /* 1614 * Handle special case: we used up all 63 fragments, 1615 * but we have more mbufs left in the chain. Copy the 1616 * data into an mbuf cluster. Note that we don't 1617 * bother clearing the values in the other fragment 1618 * pointers/counters; it wouldn't gain us anything, 1619 * and would waste cycles. 1620 */ 1621 if (error) { 1622 struct mbuf *m_new = NULL; 1623 1624 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1625 if (m_new == NULL) { 1626 m_freem(m_head); 1627 return (1); 1628 } 1629 if (m_head->m_pkthdr.len > MHLEN) { 1630 MCLGET(m_new, M_DONTWAIT); 1631 if (!(m_new->m_flags & M_EXT)) { 1632 m_freem(m_new); 1633 m_freem(m_head); 1634 return (1); 1635 } 1636 } 1637 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1638 mtod(m_new, caddr_t)); 1639 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1640 m_freem(m_head); 1641 m_head = m_new; 1642 goto reload; 1643 } 1644 1645 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1646 BUS_DMASYNC_PREWRITE); 1647 1648 if (c->map->dm_nsegs != 0) { 1649 bus_dmamap_sync(sc->sc_dmat, c->map, 1650 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1651 bus_dmamap_unload(sc->sc_dmat, c->map); 1652 } 1653 1654 c->xl_mbuf = m_head; 1655 sc->sc_tx_sparemap = c->map; 1656 c->map = map; 1657 c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG); 1658 c->xl_ptr->xl_status = htole32(total_len); 1659 c->xl_ptr->xl_next = 0; 1660 1661 if (sc->xl_type == XL_TYPE_905B) { 1662 status = XL_TXSTAT_RND_DEFEAT; 1663 1664 #ifndef XL905B_TXCSUM_BROKEN 1665 if (m_head->m_pkthdr.csum_flags) { 1666 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1667 status |= XL_TXSTAT_IPCKSUM; 1668 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1669 status |= XL_TXSTAT_TCPCKSUM; 1670 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1671 status |= XL_TXSTAT_UDPCKSUM; 1672 } 1673 #endif 1674 c->xl_ptr->xl_status = htole32(status); 1675 } 1676 1677 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1678 offsetof(struct xl_list_data, xl_tx_list[0]), 1679 sizeof(struct xl_list) * XL_TX_LIST_CNT, 1680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1681 1682 return (0); 1683 } 1684 1685 /* 1686 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1687 * to the mbuf data regions directly in the transmit lists. We also save a 1688 * copy of the pointers since the transmit list fragment pointers are 1689 * physical addresses. 1690 */ 1691 void 1692 xl_start(struct ifnet *ifp) 1693 { 1694 struct xl_softc *sc; 1695 struct mbuf *m_head = NULL; 1696 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1697 struct xl_chain *prev_tx; 1698 int error; 1699 1700 sc = ifp->if_softc; 1701 1702 /* 1703 * Check for an available queue slot. If there are none, 1704 * punt. 1705 */ 1706 if (sc->xl_cdata.xl_tx_free == NULL) { 1707 xl_txeoc(sc); 1708 xl_txeof(sc); 1709 if (sc->xl_cdata.xl_tx_free == NULL) { 1710 ifp->if_flags |= IFF_OACTIVE; 1711 return; 1712 } 1713 } 1714 1715 start_tx = sc->xl_cdata.xl_tx_free; 1716 1717 while (sc->xl_cdata.xl_tx_free != NULL) { 1718 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1719 if (m_head == NULL) 1720 break; 1721 1722 /* Pick a descriptor off the free list. */ 1723 prev_tx = cur_tx; 1724 cur_tx = sc->xl_cdata.xl_tx_free; 1725 1726 /* Pack the data into the descriptor. */ 1727 error = xl_encap(sc, cur_tx, m_head); 1728 if (error) { 1729 cur_tx = prev_tx; 1730 continue; 1731 } 1732 1733 sc->xl_cdata.xl_tx_free = cur_tx->xl_next; 1734 cur_tx->xl_next = NULL; 1735 1736 /* Chain it together. */ 1737 if (prev != NULL) { 1738 prev->xl_next = cur_tx; 1739 prev->xl_ptr->xl_next = 1740 sc->sc_listmap->dm_segs[0].ds_addr + 1741 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva); 1742 1743 } 1744 prev = cur_tx; 1745 1746 #if NBPFILTER > 0 1747 /* 1748 * If there's a BPF listener, bounce a copy of this frame 1749 * to him. 1750 */ 1751 if (ifp->if_bpf) 1752 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1753 BPF_DIRECTION_OUT); 1754 #endif 1755 } 1756 1757 /* 1758 * If there are no packets queued, bail. 1759 */ 1760 if (cur_tx == NULL) 1761 return; 1762 1763 /* 1764 * Place the request for the upload interrupt 1765 * in the last descriptor in the chain. This way, if 1766 * we're chaining several packets at once, we'll only 1767 * get an interrupt once for the whole chain rather than 1768 * once for each packet. 1769 */ 1770 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1771 1772 /* 1773 * Queue the packets. If the TX channel is clear, update 1774 * the downlist pointer register. 1775 */ 1776 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 1777 xl_wait(sc); 1778 1779 if (sc->xl_cdata.xl_tx_head != NULL) { 1780 sc->xl_cdata.xl_tx_tail->xl_next = start_tx; 1781 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = 1782 sc->sc_listmap->dm_segs[0].ds_addr + 1783 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva); 1784 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= 1785 htole32(~XL_TXSTAT_DL_INTR); 1786 sc->xl_cdata.xl_tx_tail = cur_tx; 1787 } else { 1788 sc->xl_cdata.xl_tx_head = start_tx; 1789 sc->xl_cdata.xl_tx_tail = cur_tx; 1790 } 1791 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1792 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1793 sc->sc_listmap->dm_segs[0].ds_addr + 1794 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva)); 1795 1796 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1797 1798 XL_SEL_WIN(7); 1799 1800 /* 1801 * Set a timeout in case the chip goes out to lunch. 1802 */ 1803 ifp->if_timer = 5; 1804 1805 /* 1806 * XXX Under certain conditions, usually on slower machines 1807 * where interrupts may be dropped, it's possible for the 1808 * adapter to chew up all the buffers in the receive ring 1809 * and stall, without us being able to do anything about it. 1810 * To guard against this, we need to make a pass over the 1811 * RX queue to make sure there aren't any packets pending. 1812 * Doing it here means we can flush the receive ring at the 1813 * same time the chip is DMAing the transmit descriptors we 1814 * just gave it. 1815 * 1816 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) 1817 * nature of their chips in all their marketing literature; 1818 * we may as well take advantage of it. :) 1819 */ 1820 xl_rxeof(sc); 1821 } 1822 1823 void 1824 xl_start_90xB(struct ifnet *ifp) 1825 { 1826 struct xl_softc *sc; 1827 struct mbuf *m_head = NULL; 1828 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1829 struct xl_chain *prev_tx; 1830 int error, idx; 1831 1832 sc = ifp->if_softc; 1833 1834 if (ifp->if_flags & IFF_OACTIVE) 1835 return; 1836 1837 idx = sc->xl_cdata.xl_tx_prod; 1838 start_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1839 1840 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { 1841 1842 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { 1843 ifp->if_flags |= IFF_OACTIVE; 1844 break; 1845 } 1846 1847 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1848 if (m_head == NULL) 1849 break; 1850 1851 prev_tx = cur_tx; 1852 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1853 1854 /* Pack the data into the descriptor. */ 1855 error = xl_encap(sc, cur_tx, m_head); 1856 if (error) { 1857 cur_tx = prev_tx; 1858 continue; 1859 } 1860 1861 /* Chain it together. */ 1862 if (prev != NULL) 1863 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); 1864 prev = cur_tx; 1865 1866 #if NBPFILTER > 0 1867 /* 1868 * If there's a BPF listener, bounce a copy of this frame 1869 * to him. 1870 */ 1871 if (ifp->if_bpf) 1872 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1873 BPF_DIRECTION_OUT); 1874 #endif 1875 1876 XL_INC(idx, XL_TX_LIST_CNT); 1877 sc->xl_cdata.xl_tx_cnt++; 1878 } 1879 1880 /* 1881 * If there are no packets queued, bail. 1882 */ 1883 if (cur_tx == NULL) 1884 return; 1885 1886 /* 1887 * Place the request for the upload interrupt 1888 * in the last descriptor in the chain. This way, if 1889 * we're chaining several packets at once, we'll only 1890 * get an interrupt once for the whole chain rather than 1891 * once for each packet. 1892 */ 1893 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1894 1895 /* Start transmission */ 1896 sc->xl_cdata.xl_tx_prod = idx; 1897 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); 1898 1899 /* 1900 * Set a timeout in case the chip goes out to lunch. 1901 */ 1902 ifp->if_timer = 5; 1903 } 1904 1905 void 1906 xl_init(void *xsc) 1907 { 1908 struct xl_softc *sc = xsc; 1909 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1910 int s, i; 1911 struct mii_data *mii = NULL; 1912 1913 s = splnet(); 1914 1915 /* 1916 * Cancel pending I/O and free all RX/TX buffers. 1917 */ 1918 xl_stop(sc); 1919 1920 if (sc->xl_hasmii) 1921 mii = &sc->sc_mii; 1922 1923 if (mii == NULL) { 1924 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1925 xl_wait(sc); 1926 } 1927 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1928 xl_wait(sc); 1929 DELAY(10000); 1930 1931 /* Init our MAC address */ 1932 XL_SEL_WIN(2); 1933 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1934 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, 1935 sc->sc_arpcom.ac_enaddr[i]); 1936 } 1937 1938 /* Clear the station mask. */ 1939 for (i = 0; i < 3; i++) 1940 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); 1941 #ifdef notdef 1942 /* Reset TX and RX. */ 1943 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1944 xl_wait(sc); 1945 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1946 xl_wait(sc); 1947 #endif 1948 /* Init circular RX list. */ 1949 if (xl_list_rx_init(sc) == ENOBUFS) { 1950 printf("%s: initialization failed: no " 1951 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1952 xl_stop(sc); 1953 splx(s); 1954 return; 1955 } 1956 1957 /* Init TX descriptors. */ 1958 if (sc->xl_type == XL_TYPE_905B) 1959 xl_list_tx_init_90xB(sc); 1960 else 1961 xl_list_tx_init(sc); 1962 1963 /* 1964 * Set the TX freethresh value. 1965 * Note that this has no effect on 3c905B "cyclone" 1966 * cards but is required for 3c900/3c905 "boomerang" 1967 * cards in order to enable the download engine. 1968 */ 1969 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1970 1971 /* Set the TX start threshold for best performance. */ 1972 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1973 1974 /* 1975 * If this is a 3c905B, also set the tx reclaim threshold. 1976 * This helps cut down on the number of tx reclaim errors 1977 * that could happen on a busy network. The chip multiplies 1978 * the register value by 16 to obtain the actual threshold 1979 * in bytes, so we divide by 16 when setting the value here. 1980 * The existing threshold value can be examined by reading 1981 * the register at offset 9 in window 5. 1982 */ 1983 if (sc->xl_type == XL_TYPE_905B) { 1984 CSR_WRITE_2(sc, XL_COMMAND, 1985 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1986 } 1987 1988 /* Program promiscuous mode and multicast filters. */ 1989 xl_iff(sc); 1990 1991 /* 1992 * Load the address of the RX list. We have to 1993 * stall the upload engine before we can manipulate 1994 * the uplist pointer register, then unstall it when 1995 * we're finished. We also have to wait for the 1996 * stall command to complete before proceeding. 1997 * Note that we have to do this after any RX resets 1998 * have completed since the uplist register is cleared 1999 * by a reset. 2000 */ 2001 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 2002 xl_wait(sc); 2003 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr + 2004 offsetof(struct xl_list_data, xl_rx_list[0])); 2005 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 2006 xl_wait(sc); 2007 2008 if (sc->xl_type == XL_TYPE_905B) { 2009 /* Set polling interval */ 2010 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 2011 /* Load the address of the TX list */ 2012 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 2013 xl_wait(sc); 2014 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 2015 sc->sc_listmap->dm_segs[0].ds_addr + 2016 offsetof(struct xl_list_data, xl_tx_list[0])); 2017 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 2018 xl_wait(sc); 2019 } 2020 2021 /* 2022 * If the coax transceiver is on, make sure to enable 2023 * the DC-DC converter. 2024 */ 2025 XL_SEL_WIN(3); 2026 if (sc->xl_xcvr == XL_XCVR_COAX) 2027 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 2028 else 2029 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2030 2031 /* 2032 * increase packet size to allow reception of 802.1q or ISL packets. 2033 * For the 3c90x chip, set the 'allow large packets' bit in the MAC 2034 * control register. For 3c90xB/C chips, use the RX packet size 2035 * register. 2036 */ 2037 2038 if (sc->xl_type == XL_TYPE_905B) 2039 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); 2040 else { 2041 u_int8_t macctl; 2042 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); 2043 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; 2044 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); 2045 } 2046 2047 /* Clear out the stats counters. */ 2048 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2049 sc->xl_stats_no_timeout = 1; 2050 xl_stats_update(sc); 2051 sc->xl_stats_no_timeout = 0; 2052 XL_SEL_WIN(4); 2053 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); 2054 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); 2055 2056 /* 2057 * Enable interrupts. 2058 */ 2059 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); 2060 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); 2061 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); 2062 2063 if (sc->intr_ack) 2064 (*sc->intr_ack)(sc); 2065 2066 /* Set the RX early threshold */ 2067 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); 2068 CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); 2069 2070 /* Enable receiver and transmitter. */ 2071 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 2072 xl_wait(sc); 2073 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2074 xl_wait(sc); 2075 2076 /* Restore state of BMCR */ 2077 if (mii != NULL) 2078 mii_mediachg(mii); 2079 2080 /* Select window 7 for normal operations. */ 2081 XL_SEL_WIN(7); 2082 2083 ifp->if_flags |= IFF_RUNNING; 2084 ifp->if_flags &= ~IFF_OACTIVE; 2085 2086 splx(s); 2087 2088 timeout_add_sec(&sc->xl_stsup_tmo, 1); 2089 } 2090 2091 /* 2092 * Set media options. 2093 */ 2094 int 2095 xl_ifmedia_upd(struct ifnet *ifp) 2096 { 2097 struct xl_softc *sc; 2098 struct ifmedia *ifm = NULL; 2099 struct mii_data *mii = NULL; 2100 2101 sc = ifp->if_softc; 2102 2103 if (sc->xl_hasmii) 2104 mii = &sc->sc_mii; 2105 if (mii == NULL) 2106 ifm = &sc->ifmedia; 2107 else 2108 ifm = &mii->mii_media; 2109 2110 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2111 case IFM_100_FX: 2112 case IFM_10_FL: 2113 case IFM_10_2: 2114 case IFM_10_5: 2115 xl_setmode(sc, ifm->ifm_media); 2116 return (0); 2117 break; 2118 default: 2119 break; 2120 } 2121 2122 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2123 || sc->xl_media & XL_MEDIAOPT_BT4) { 2124 xl_init(sc); 2125 } else { 2126 xl_setmode(sc, ifm->ifm_media); 2127 } 2128 2129 return (0); 2130 } 2131 2132 /* 2133 * Report current media status. 2134 */ 2135 void 2136 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2137 { 2138 struct xl_softc *sc; 2139 u_int32_t icfg; 2140 u_int16_t status = 0; 2141 struct mii_data *mii = NULL; 2142 2143 sc = ifp->if_softc; 2144 if (sc->xl_hasmii != 0) 2145 mii = &sc->sc_mii; 2146 2147 XL_SEL_WIN(4); 2148 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2149 2150 XL_SEL_WIN(3); 2151 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; 2152 icfg >>= XL_ICFG_CONNECTOR_BITS; 2153 2154 ifmr->ifm_active = IFM_ETHER; 2155 ifmr->ifm_status = IFM_AVALID; 2156 2157 if ((status & XL_MEDIASTAT_CARRIER) == 0) 2158 ifmr->ifm_status |= IFM_ACTIVE; 2159 2160 switch(icfg) { 2161 case XL_XCVR_10BT: 2162 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2163 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2164 ifmr->ifm_active |= IFM_FDX; 2165 else 2166 ifmr->ifm_active |= IFM_HDX; 2167 break; 2168 case XL_XCVR_AUI: 2169 if (sc->xl_type == XL_TYPE_905B && 2170 sc->xl_media == XL_MEDIAOPT_10FL) { 2171 ifmr->ifm_active = IFM_ETHER|IFM_10_FL; 2172 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2173 ifmr->ifm_active |= IFM_FDX; 2174 else 2175 ifmr->ifm_active |= IFM_HDX; 2176 } else 2177 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2178 break; 2179 case XL_XCVR_COAX: 2180 ifmr->ifm_active = IFM_ETHER|IFM_10_2; 2181 break; 2182 /* 2183 * XXX MII and BTX/AUTO should be separate cases. 2184 */ 2185 2186 case XL_XCVR_100BTX: 2187 case XL_XCVR_AUTO: 2188 case XL_XCVR_MII: 2189 if (mii != NULL) { 2190 mii_pollstat(mii); 2191 ifmr->ifm_active = mii->mii_media_active; 2192 ifmr->ifm_status = mii->mii_media_status; 2193 } 2194 break; 2195 case XL_XCVR_100BFX: 2196 ifmr->ifm_active = IFM_ETHER|IFM_100_FX; 2197 break; 2198 default: 2199 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg); 2200 break; 2201 } 2202 } 2203 2204 int 2205 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2206 { 2207 struct xl_softc *sc = ifp->if_softc; 2208 struct ifreq *ifr = (struct ifreq *)data; 2209 struct ifaddr *ifa = (struct ifaddr *)data; 2210 int s, error = 0; 2211 struct mii_data *mii = NULL; 2212 2213 s = splnet(); 2214 2215 switch(command) { 2216 case SIOCSIFADDR: 2217 ifp->if_flags |= IFF_UP; 2218 if (!(ifp->if_flags & IFF_RUNNING)) 2219 xl_init(sc); 2220 #ifdef INET 2221 if (ifa->ifa_addr->sa_family == AF_INET) 2222 arp_ifinit(&sc->sc_arpcom, ifa); 2223 #endif 2224 break; 2225 2226 case SIOCSIFFLAGS: 2227 if (ifp->if_flags & IFF_UP) { 2228 if (ifp->if_flags & IFF_RUNNING) 2229 error = ENETRESET; 2230 else 2231 xl_init(sc); 2232 } else { 2233 if (ifp->if_flags & IFF_RUNNING) 2234 xl_stop(sc); 2235 } 2236 break; 2237 2238 case SIOCGIFMEDIA: 2239 case SIOCSIFMEDIA: 2240 if (sc->xl_hasmii != 0) 2241 mii = &sc->sc_mii; 2242 if (mii == NULL) 2243 error = ifmedia_ioctl(ifp, ifr, 2244 &sc->ifmedia, command); 2245 else 2246 error = ifmedia_ioctl(ifp, ifr, 2247 &mii->mii_media, command); 2248 break; 2249 2250 case SIOCGIFRXR: 2251 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2252 NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring); 2253 break; 2254 2255 default: 2256 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2257 } 2258 2259 if (error == ENETRESET) { 2260 if (ifp->if_flags & IFF_RUNNING) 2261 xl_iff(sc); 2262 error = 0; 2263 } 2264 2265 splx(s); 2266 return (error); 2267 } 2268 2269 void 2270 xl_watchdog(struct ifnet *ifp) 2271 { 2272 struct xl_softc *sc; 2273 u_int16_t status = 0; 2274 2275 sc = ifp->if_softc; 2276 2277 ifp->if_oerrors++; 2278 XL_SEL_WIN(4); 2279 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2280 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2281 2282 if (status & XL_MEDIASTAT_CARRIER) 2283 printf("%s: no carrier - transceiver cable problem?\n", 2284 sc->sc_dev.dv_xname); 2285 xl_txeoc(sc); 2286 xl_txeof(sc); 2287 xl_rxeof(sc); 2288 xl_reset(sc); 2289 xl_init(sc); 2290 2291 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2292 (*ifp->if_start)(ifp); 2293 } 2294 2295 void 2296 xl_freetxrx(struct xl_softc *sc) 2297 { 2298 bus_dmamap_t map; 2299 int i; 2300 2301 /* 2302 * Free data in the RX lists. 2303 */ 2304 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2305 if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) { 2306 map = sc->xl_cdata.xl_rx_chain[i].map; 2307 2308 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2309 BUS_DMASYNC_POSTREAD); 2310 bus_dmamap_unload(sc->sc_dmat, map); 2311 } 2312 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { 2313 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); 2314 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; 2315 } 2316 } 2317 bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list)); 2318 /* 2319 * Free the TX list buffers. 2320 */ 2321 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2322 if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) { 2323 map = sc->xl_cdata.xl_tx_chain[i].map; 2324 2325 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2326 BUS_DMASYNC_POSTWRITE); 2327 bus_dmamap_unload(sc->sc_dmat, map); 2328 } 2329 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { 2330 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); 2331 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; 2332 } 2333 } 2334 bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list)); 2335 } 2336 2337 /* 2338 * Stop the adapter and free any mbufs allocated to the 2339 * RX and TX lists. 2340 */ 2341 void 2342 xl_stop(struct xl_softc *sc) 2343 { 2344 struct ifnet *ifp; 2345 2346 /* Stop the stats updater. */ 2347 timeout_del(&sc->xl_stsup_tmo); 2348 2349 ifp = &sc->sc_arpcom.ac_if; 2350 2351 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2352 ifp->if_timer = 0; 2353 2354 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); 2355 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2356 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); 2357 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); 2358 xl_wait(sc); 2359 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); 2360 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2361 DELAY(800); 2362 2363 #ifdef foo 2364 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2365 xl_wait(sc); 2366 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2367 xl_wait(sc); 2368 #endif 2369 2370 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); 2371 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); 2372 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); 2373 2374 if (sc->intr_ack) 2375 (*sc->intr_ack)(sc); 2376 2377 xl_freetxrx(sc); 2378 } 2379 2380 #ifndef SMALL_KERNEL 2381 void 2382 xl_wol_power(struct xl_softc *sc) 2383 { 2384 /* Re-enable RX and call upper layer WOL power routine 2385 * if WOL is enabled. */ 2386 if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) { 2387 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2388 sc->wol_power(sc->wol_power_arg); 2389 } 2390 } 2391 #endif 2392 2393 void 2394 xl_attach(struct xl_softc *sc) 2395 { 2396 u_int8_t enaddr[ETHER_ADDR_LEN]; 2397 u_int16_t xcvr[2]; 2398 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2399 int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX; 2400 struct ifmedia *ifm; 2401 2402 i = splnet(); 2403 xl_reset(sc); 2404 splx(i); 2405 2406 /* 2407 * Get station address from the EEPROM. 2408 */ 2409 if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) { 2410 printf("\n%s: failed to read station address\n", 2411 sc->sc_dev.dv_xname); 2412 return; 2413 } 2414 bcopy(enaddr, &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 2415 2416 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data), 2417 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 2418 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 2419 printf(": can't alloc list mem\n"); 2420 return; 2421 } 2422 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 2423 sizeof(struct xl_list_data), &sc->sc_listkva, 2424 BUS_DMA_NOWAIT) != 0) { 2425 printf(": can't map list mem\n"); 2426 return; 2427 } 2428 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1, 2429 sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT, 2430 &sc->sc_listmap) != 0) { 2431 printf(": can't alloc list map\n"); 2432 return; 2433 } 2434 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 2435 sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 2436 printf(": can't load list map\n"); 2437 return; 2438 } 2439 sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva; 2440 2441 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2442 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 2443 0, BUS_DMA_NOWAIT, 2444 &sc->xl_cdata.xl_rx_chain[i].map) != 0) { 2445 printf(": can't create rx map\n"); 2446 return; 2447 } 2448 } 2449 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 2450 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 2451 printf(": can't create rx spare map\n"); 2452 return; 2453 } 2454 2455 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2456 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2457 XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 2458 &sc->xl_cdata.xl_tx_chain[i].map) != 0) { 2459 printf(": can't create tx map\n"); 2460 return; 2461 } 2462 } 2463 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3, 2464 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 2465 printf(": can't create tx spare map\n"); 2466 return; 2467 } 2468 2469 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 2470 2471 if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) { 2472 u_int16_t n; 2473 2474 XL_SEL_WIN(2); 2475 n = CSR_READ_2(sc, 12); 2476 2477 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR) 2478 n |= 0x0010; 2479 2480 if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) 2481 n |= 0x4000; 2482 2483 CSR_WRITE_2(sc, 12, n); 2484 } 2485 2486 /* 2487 * Figure out the card type. 3c905B adapters have the 2488 * 'supportsNoTxLength' bit set in the capabilities 2489 * word in the EEPROM. 2490 * Note: my 3c575C cardbus card lies. It returns a value 2491 * of 0x1578 for its capabilities word, which is somewhat 2492 * nonsensical. Another way to distinguish a 3c90x chip 2493 * from a 3c90xB/C chip is to check for the 'supportsLargePackets' 2494 * bit. This will only be set for 3c90x boomerage chips. 2495 */ 2496 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); 2497 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || 2498 !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) 2499 sc->xl_type = XL_TYPE_905B; 2500 else 2501 sc->xl_type = XL_TYPE_90X; 2502 2503 /* Set the TX start threshold for best performance. */ 2504 sc->xl_tx_thresh = XL_MIN_FRAMELEN; 2505 2506 timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc); 2507 2508 ifp->if_softc = sc; 2509 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2510 ifp->if_ioctl = xl_ioctl; 2511 if (sc->xl_type == XL_TYPE_905B) 2512 ifp->if_start = xl_start_90xB; 2513 else 2514 ifp->if_start = xl_start; 2515 ifp->if_watchdog = xl_watchdog; 2516 ifp->if_baudrate = 10000000; 2517 IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); 2518 IFQ_SET_READY(&ifp->if_snd); 2519 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 2520 2521 ifp->if_capabilities = IFCAP_VLAN_MTU; 2522 2523 #ifndef XL905B_TXCSUM_BROKEN 2524 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 2525 IFCAP_CSUM_UDPv4; 2526 #endif 2527 2528 XL_SEL_WIN(3); 2529 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); 2530 2531 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); 2532 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; 2533 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; 2534 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; 2535 2536 xl_mediacheck(sc); 2537 2538 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2539 || sc->xl_media & XL_MEDIAOPT_BT4) { 2540 ifmedia_init(&sc->sc_mii.mii_media, 0, 2541 xl_ifmedia_upd, xl_ifmedia_sts); 2542 sc->xl_hasmii = 1; 2543 sc->sc_mii.mii_ifp = ifp; 2544 sc->sc_mii.mii_readreg = xl_miibus_readreg; 2545 sc->sc_mii.mii_writereg = xl_miibus_writereg; 2546 sc->sc_mii.mii_statchg = xl_miibus_statchg; 2547 xl_setcfg(sc); 2548 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, 2549 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2550 2551 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2552 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 2553 0, NULL); 2554 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2555 } 2556 else { 2557 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2558 } 2559 ifm = &sc->sc_mii.mii_media; 2560 } 2561 else { 2562 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); 2563 sc->xl_hasmii = 0; 2564 ifm = &sc->ifmedia; 2565 } 2566 2567 /* 2568 * Sanity check. If the user has selected "auto" and this isn't 2569 * a 10/100 card of some kind, we need to force the transceiver 2570 * type to something sane. 2571 */ 2572 if (sc->xl_xcvr == XL_XCVR_AUTO) { 2573 xl_choose_xcvr(sc, 0); 2574 i = splnet(); 2575 xl_reset(sc); 2576 splx(i); 2577 } 2578 2579 if (sc->xl_media & XL_MEDIAOPT_BT) { 2580 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 2581 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 2582 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2583 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2584 } 2585 2586 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 2587 /* 2588 * Check for a 10baseFL board in disguise. 2589 */ 2590 if (sc->xl_type == XL_TYPE_905B && 2591 sc->xl_media == XL_MEDIAOPT_10FL) { 2592 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL); 2593 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 2594 0, NULL); 2595 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2596 ifmedia_add(ifm, 2597 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); 2598 } else { 2599 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL); 2600 } 2601 } 2602 2603 if (sc->xl_media & XL_MEDIAOPT_BNC) { 2604 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL); 2605 } 2606 2607 if (sc->xl_media & XL_MEDIAOPT_BFX) { 2608 ifp->if_baudrate = 100000000; 2609 ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL); 2610 } 2611 2612 /* Choose a default media. */ 2613 switch(sc->xl_xcvr) { 2614 case XL_XCVR_10BT: 2615 media = IFM_ETHER|IFM_10_T; 2616 xl_setmode(sc, media); 2617 break; 2618 case XL_XCVR_AUI: 2619 if (sc->xl_type == XL_TYPE_905B && 2620 sc->xl_media == XL_MEDIAOPT_10FL) { 2621 media = IFM_ETHER|IFM_10_FL; 2622 xl_setmode(sc, media); 2623 } else { 2624 media = IFM_ETHER|IFM_10_5; 2625 xl_setmode(sc, media); 2626 } 2627 break; 2628 case XL_XCVR_COAX: 2629 media = IFM_ETHER|IFM_10_2; 2630 xl_setmode(sc, media); 2631 break; 2632 case XL_XCVR_AUTO: 2633 case XL_XCVR_100BTX: 2634 case XL_XCVR_MII: 2635 /* Chosen by miibus */ 2636 break; 2637 case XL_XCVR_100BFX: 2638 media = IFM_ETHER|IFM_100_FX; 2639 xl_setmode(sc, media); 2640 break; 2641 default: 2642 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, 2643 sc->xl_xcvr); 2644 /* 2645 * This will probably be wrong, but it prevents 2646 * the ifmedia code from panicking. 2647 */ 2648 media = IFM_ETHER | IFM_10_T; 2649 break; 2650 } 2651 2652 if (sc->xl_hasmii == 0) 2653 ifmedia_set(&sc->ifmedia, media); 2654 2655 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { 2656 XL_SEL_WIN(0); 2657 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); 2658 } 2659 2660 #ifndef SMALL_KERNEL 2661 /* Check availability of WOL. */ 2662 if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) { 2663 ifp->if_capabilities |= IFCAP_WOL; 2664 ifp->if_wol = xl_wol; 2665 xl_wol(ifp, 0); 2666 } 2667 #endif 2668 2669 /* 2670 * Call MI attach routines. 2671 */ 2672 if_attach(ifp); 2673 ether_ifattach(ifp); 2674 } 2675 2676 int 2677 xl_detach(struct xl_softc *sc) 2678 { 2679 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2680 extern void xl_freetxrx(struct xl_softc *); 2681 2682 /* Unhook our tick handler. */ 2683 timeout_del(&sc->xl_stsup_tmo); 2684 2685 xl_freetxrx(sc); 2686 2687 /* Detach all PHYs */ 2688 if (sc->xl_hasmii) 2689 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2690 2691 /* Delete all remaining media. */ 2692 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2693 2694 ether_ifdetach(ifp); 2695 if_detach(ifp); 2696 2697 return (0); 2698 } 2699 2700 #ifndef SMALL_KERNEL 2701 int 2702 xl_wol(struct ifnet *ifp, int enable) 2703 { 2704 struct xl_softc *sc = ifp->if_softc; 2705 2706 XL_SEL_WIN(7); 2707 if (enable) { 2708 if (!(ifp->if_flags & IFF_RUNNING)) 2709 xl_init(sc); 2710 CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC); 2711 sc->xl_flags |= XL_FLAG_WOL; 2712 } else { 2713 CSR_WRITE_2(sc, XL_W7_BM_PME, 0); 2714 sc->xl_flags &= ~XL_FLAG_WOL; 2715 } 2716 return (0); 2717 } 2718 #endif 2719 2720 struct cfdriver xl_cd = { 2721 0, "xl", DV_IFNET 2722 }; 2723