1 /* $OpenBSD: re.c,v 1.163 2014/11/24 10:33:37 brad Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support Realtek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 124 #include <machine/bus.h> 125 126 #include <net/if.h> 127 #include <net/if_dl.h> 128 #include <net/if_media.h> 129 130 #ifdef INET 131 #include <netinet/in.h> 132 #include <netinet/if_ether.h> 133 #endif 134 135 #if NVLAN > 0 136 #include <net/if_types.h> 137 #include <net/if_vlan_var.h> 138 #endif 139 140 #if NBPFILTER > 0 141 #include <net/bpf.h> 142 #endif 143 144 #include <dev/mii/mii.h> 145 #include <dev/mii/miivar.h> 146 147 #include <dev/pci/pcidevs.h> 148 149 #include <dev/ic/rtl81x9reg.h> 150 #include <dev/ic/revar.h> 151 152 #ifdef RE_DEBUG 153 int redebug = 0; 154 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 155 #else 156 #define DPRINTF(x) 157 #endif 158 159 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 160 161 int re_encap(struct rl_softc *, struct mbuf *, int *); 162 163 int re_newbuf(struct rl_softc *); 164 int re_rx_list_init(struct rl_softc *); 165 void re_rx_list_fill(struct rl_softc *); 166 int re_tx_list_init(struct rl_softc *); 167 int re_rxeof(struct rl_softc *); 168 int re_txeof(struct rl_softc *); 169 void re_tick(void *); 170 void re_start(struct ifnet *); 171 int re_ioctl(struct ifnet *, u_long, caddr_t); 172 void re_watchdog(struct ifnet *); 173 int re_ifmedia_upd(struct ifnet *); 174 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 175 176 void re_eeprom_putbyte(struct rl_softc *, int); 177 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 178 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 179 180 int re_gmii_readreg(struct device *, int, int); 181 void re_gmii_writereg(struct device *, int, int, int); 182 183 int re_miibus_readreg(struct device *, int, int); 184 void re_miibus_writereg(struct device *, int, int, int); 185 void re_miibus_statchg(struct device *); 186 187 void re_iff(struct rl_softc *); 188 189 void re_setup_hw_im(struct rl_softc *); 190 void re_setup_sim_im(struct rl_softc *); 191 void re_disable_hw_im(struct rl_softc *); 192 void re_disable_sim_im(struct rl_softc *); 193 void re_config_imtype(struct rl_softc *, int); 194 void re_setup_intr(struct rl_softc *, int, int); 195 #ifndef SMALL_KERNEL 196 int re_wol(struct ifnet*, int); 197 #endif 198 199 struct cfdriver re_cd = { 200 0, "re", DV_IFNET 201 }; 202 203 #define EE_SET(x) \ 204 CSR_WRITE_1(sc, RL_EECMD, \ 205 CSR_READ_1(sc, RL_EECMD) | x) 206 207 #define EE_CLR(x) \ 208 CSR_WRITE_1(sc, RL_EECMD, \ 209 CSR_READ_1(sc, RL_EECMD) & ~x) 210 211 static const struct re_revision { 212 u_int32_t re_chipid; 213 const char *re_name; 214 } re_revisions[] = { 215 { RL_HWREV_8100, "RTL8100" }, 216 { RL_HWREV_8100E, "RTL8100E" }, 217 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 218 { RL_HWREV_8101, "RTL8101" }, 219 { RL_HWREV_8101E, "RTL8101E" }, 220 { RL_HWREV_8102E, "RTL8102E" }, 221 { RL_HWREV_8106E, "RTL8106E" }, 222 { RL_HWREV_8401E, "RTL8401E" }, 223 { RL_HWREV_8402, "RTL8402" }, 224 { RL_HWREV_8411, "RTL8411" }, 225 { RL_HWREV_8411B, "RTL8411B" }, 226 { RL_HWREV_8102EL, "RTL8102EL" }, 227 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 228 { RL_HWREV_8103E, "RTL8103E" }, 229 { RL_HWREV_8110S, "RTL8110S" }, 230 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 231 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 232 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 233 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 234 { RL_HWREV_8168C, "RTL8168C/8111C" }, 235 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 236 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 237 { RL_HWREV_8168F, "RTL8168F/8111F" }, 238 { RL_HWREV_8168G, "RTL8168G/8111G" }, 239 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 240 { RL_HWREV_8105E, "RTL8105E" }, 241 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 242 { RL_HWREV_8168D, "RTL8168D/8111D" }, 243 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 244 { RL_HWREV_8168E, "RTL8168E/8111E" }, 245 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 246 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 247 { RL_HWREV_8169, "RTL8169" }, 248 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 249 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 250 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 251 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 252 { RL_HWREV_8169S, "RTL8169S" }, 253 254 { 0, NULL } 255 }; 256 257 258 static inline void 259 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 260 { 261 d->rl_bufaddr_lo = htole32((uint32_t)addr); 262 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 263 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 264 else 265 d->rl_bufaddr_hi = 0; 266 } 267 268 /* 269 * Send a read command and address to the EEPROM, check for ACK. 270 */ 271 void 272 re_eeprom_putbyte(struct rl_softc *sc, int addr) 273 { 274 int d, i; 275 276 d = addr | (RL_9346_READ << sc->rl_eewidth); 277 278 /* 279 * Feed in each bit and strobe the clock. 280 */ 281 282 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 283 if (d & i) 284 EE_SET(RL_EE_DATAIN); 285 else 286 EE_CLR(RL_EE_DATAIN); 287 DELAY(100); 288 EE_SET(RL_EE_CLK); 289 DELAY(150); 290 EE_CLR(RL_EE_CLK); 291 DELAY(100); 292 } 293 } 294 295 /* 296 * Read a word of data stored in the EEPROM at address 'addr.' 297 */ 298 void 299 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 300 { 301 int i; 302 u_int16_t word = 0; 303 304 /* 305 * Send address of word we want to read. 306 */ 307 re_eeprom_putbyte(sc, addr); 308 309 /* 310 * Start reading bits from EEPROM. 311 */ 312 for (i = 0x8000; i; i >>= 1) { 313 EE_SET(RL_EE_CLK); 314 DELAY(100); 315 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 316 word |= i; 317 EE_CLR(RL_EE_CLK); 318 DELAY(100); 319 } 320 321 *dest = word; 322 } 323 324 /* 325 * Read a sequence of words from the EEPROM. 326 */ 327 void 328 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 329 { 330 int i; 331 u_int16_t word = 0, *ptr; 332 333 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 334 335 DELAY(100); 336 337 for (i = 0; i < cnt; i++) { 338 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 339 re_eeprom_getword(sc, off + i, &word); 340 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 341 ptr = (u_int16_t *)(dest + (i * 2)); 342 *ptr = word; 343 } 344 345 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 346 } 347 348 int 349 re_gmii_readreg(struct device *self, int phy, int reg) 350 { 351 struct rl_softc *sc = (struct rl_softc *)self; 352 u_int32_t rval; 353 int i; 354 355 if (phy != 7) 356 return (0); 357 358 /* Let the rgephy driver read the GMEDIASTAT register */ 359 360 if (reg == RL_GMEDIASTAT) { 361 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 362 return (rval); 363 } 364 365 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 366 367 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 368 rval = CSR_READ_4(sc, RL_PHYAR); 369 if (rval & RL_PHYAR_BUSY) 370 break; 371 DELAY(25); 372 } 373 374 if (i == RL_PHY_TIMEOUT) { 375 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 376 return (0); 377 } 378 379 DELAY(20); 380 381 return (rval & RL_PHYAR_PHYDATA); 382 } 383 384 void 385 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 386 { 387 struct rl_softc *sc = (struct rl_softc *)dev; 388 u_int32_t rval; 389 int i; 390 391 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 392 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 393 394 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 395 rval = CSR_READ_4(sc, RL_PHYAR); 396 if (!(rval & RL_PHYAR_BUSY)) 397 break; 398 DELAY(25); 399 } 400 401 if (i == RL_PHY_TIMEOUT) 402 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 403 404 DELAY(20); 405 } 406 407 int 408 re_miibus_readreg(struct device *dev, int phy, int reg) 409 { 410 struct rl_softc *sc = (struct rl_softc *)dev; 411 u_int16_t rval = 0; 412 u_int16_t re8139_reg = 0; 413 int s; 414 415 s = splnet(); 416 417 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 418 rval = re_gmii_readreg(dev, phy, reg); 419 splx(s); 420 return (rval); 421 } 422 423 /* Pretend the internal PHY is only at address 0 */ 424 if (phy) { 425 splx(s); 426 return (0); 427 } 428 switch(reg) { 429 case MII_BMCR: 430 re8139_reg = RL_BMCR; 431 break; 432 case MII_BMSR: 433 re8139_reg = RL_BMSR; 434 break; 435 case MII_ANAR: 436 re8139_reg = RL_ANAR; 437 break; 438 case MII_ANER: 439 re8139_reg = RL_ANER; 440 break; 441 case MII_ANLPAR: 442 re8139_reg = RL_LPAR; 443 break; 444 case MII_PHYIDR1: 445 case MII_PHYIDR2: 446 splx(s); 447 return (0); 448 /* 449 * Allow the rlphy driver to read the media status 450 * register. If we have a link partner which does not 451 * support NWAY, this is the register which will tell 452 * us the results of parallel detection. 453 */ 454 case RL_MEDIASTAT: 455 rval = CSR_READ_1(sc, RL_MEDIASTAT); 456 splx(s); 457 return (rval); 458 default: 459 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 460 splx(s); 461 return (0); 462 } 463 rval = CSR_READ_2(sc, re8139_reg); 464 if (re8139_reg == RL_BMCR) { 465 /* 8139C+ has different bit layout. */ 466 rval &= ~(BMCR_LOOP | BMCR_ISO); 467 } 468 splx(s); 469 return (rval); 470 } 471 472 void 473 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 474 { 475 struct rl_softc *sc = (struct rl_softc *)dev; 476 u_int16_t re8139_reg = 0; 477 int s; 478 479 s = splnet(); 480 481 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 482 re_gmii_writereg(dev, phy, reg, data); 483 splx(s); 484 return; 485 } 486 487 /* Pretend the internal PHY is only at address 0 */ 488 if (phy) { 489 splx(s); 490 return; 491 } 492 switch(reg) { 493 case MII_BMCR: 494 re8139_reg = RL_BMCR; 495 /* 8139C+ has different bit layout. */ 496 data &= ~(BMCR_LOOP | BMCR_ISO); 497 break; 498 case MII_BMSR: 499 re8139_reg = RL_BMSR; 500 break; 501 case MII_ANAR: 502 re8139_reg = RL_ANAR; 503 break; 504 case MII_ANER: 505 re8139_reg = RL_ANER; 506 break; 507 case MII_ANLPAR: 508 re8139_reg = RL_LPAR; 509 break; 510 case MII_PHYIDR1: 511 case MII_PHYIDR2: 512 splx(s); 513 return; 514 break; 515 default: 516 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 517 splx(s); 518 return; 519 } 520 CSR_WRITE_2(sc, re8139_reg, data); 521 splx(s); 522 } 523 524 void 525 re_miibus_statchg(struct device *dev) 526 { 527 struct rl_softc *sc = (struct rl_softc *)dev; 528 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 529 struct mii_data *mii = &sc->sc_mii; 530 531 if ((ifp->if_flags & IFF_RUNNING) == 0) 532 return; 533 534 sc->rl_flags &= ~RL_FLAG_LINK; 535 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 536 (IFM_ACTIVE | IFM_AVALID)) { 537 switch (IFM_SUBTYPE(mii->mii_media_active)) { 538 case IFM_10_T: 539 case IFM_100_TX: 540 sc->rl_flags |= RL_FLAG_LINK; 541 break; 542 case IFM_1000_T: 543 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 544 break; 545 sc->rl_flags |= RL_FLAG_LINK; 546 break; 547 default: 548 break; 549 } 550 } 551 552 /* 553 * Realtek controllers do not provide an interface to 554 * Tx/Rx MACs for resolved speed, duplex and flow-control 555 * parameters. 556 */ 557 } 558 559 void 560 re_iff(struct rl_softc *sc) 561 { 562 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 563 int h = 0; 564 u_int32_t hashes[2]; 565 u_int32_t rxfilt; 566 struct arpcom *ac = &sc->sc_arpcom; 567 struct ether_multi *enm; 568 struct ether_multistep step; 569 570 rxfilt = CSR_READ_4(sc, RL_RXCFG); 571 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 572 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 573 ifp->if_flags &= ~IFF_ALLMULTI; 574 575 /* 576 * Always accept frames destined to our station address. 577 * Always accept broadcast frames. 578 */ 579 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 580 581 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 582 ifp->if_flags |= IFF_ALLMULTI; 583 rxfilt |= RL_RXCFG_RX_MULTI; 584 if (ifp->if_flags & IFF_PROMISC) 585 rxfilt |= RL_RXCFG_RX_ALLPHYS; 586 hashes[0] = hashes[1] = 0xFFFFFFFF; 587 } else { 588 rxfilt |= RL_RXCFG_RX_MULTI; 589 /* Program new filter. */ 590 bzero(hashes, sizeof(hashes)); 591 592 ETHER_FIRST_MULTI(step, ac, enm); 593 while (enm != NULL) { 594 h = ether_crc32_be(enm->enm_addrlo, 595 ETHER_ADDR_LEN) >> 26; 596 597 if (h < 32) 598 hashes[0] |= (1 << h); 599 else 600 hashes[1] |= (1 << (h - 32)); 601 602 ETHER_NEXT_MULTI(step, enm); 603 } 604 } 605 606 /* 607 * For some unfathomable reason, Realtek decided to reverse 608 * the order of the multicast hash registers in the PCI Express 609 * parts. This means we have to write the hash pattern in reverse 610 * order for those devices. 611 */ 612 if (sc->rl_flags & RL_FLAG_PCIE) { 613 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 614 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 615 } else { 616 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 617 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 618 } 619 620 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 621 } 622 623 void 624 re_reset(struct rl_softc *sc) 625 { 626 int i; 627 628 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 629 630 for (i = 0; i < RL_TIMEOUT; i++) { 631 DELAY(10); 632 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 633 break; 634 } 635 if (i == RL_TIMEOUT) 636 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 637 638 if (sc->rl_flags & RL_FLAG_MACRESET) 639 CSR_WRITE_1(sc, RL_LDPS, 1); 640 } 641 642 #ifdef __armish__ 643 /* 644 * Thecus N2100 doesn't store the full mac address in eeprom 645 * so we read the old mac address from the device before the reset 646 * in hopes that the proper mac address is already there. 647 */ 648 union { 649 u_int32_t eaddr_word[2]; 650 u_char eaddr[ETHER_ADDR_LEN]; 651 } boot_eaddr; 652 int boot_eaddr_valid; 653 #endif /* __armish__ */ 654 /* 655 * Attach the interface. Allocate softc structures, do ifmedia 656 * setup and ethernet/BPF attach. 657 */ 658 int 659 re_attach(struct rl_softc *sc, const char *intrstr) 660 { 661 u_char eaddr[ETHER_ADDR_LEN]; 662 u_int16_t as[ETHER_ADDR_LEN / 2]; 663 struct ifnet *ifp; 664 u_int16_t re_did = 0; 665 int error = 0, i; 666 const struct re_revision *rr; 667 const char *re_name = NULL; 668 669 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 670 671 switch (sc->sc_hwrev) { 672 case RL_HWREV_8139CPLUS: 673 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 674 sc->rl_max_mtu = RL_MTU; 675 break; 676 case RL_HWREV_8100E: 677 case RL_HWREV_8100E_SPIN2: 678 case RL_HWREV_8101E: 679 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 680 sc->rl_max_mtu = RL_MTU; 681 break; 682 case RL_HWREV_8103E: 683 sc->rl_flags |= RL_FLAG_MACSLEEP; 684 /* FALLTHROUGH */ 685 case RL_HWREV_8102E: 686 case RL_HWREV_8102EL: 687 case RL_HWREV_8102EL_SPIN1: 688 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 689 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | 690 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 691 sc->rl_max_mtu = RL_MTU; 692 break; 693 case RL_HWREV_8401E: 694 case RL_HWREV_8105E: 695 case RL_HWREV_8105E_SPIN1: 696 case RL_HWREV_8106E: 697 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 698 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 699 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 700 sc->rl_max_mtu = RL_MTU; 701 break; 702 case RL_HWREV_8402: 703 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 704 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 705 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 706 RL_FLAG_CMDSTOP_WAIT_TXQ; 707 sc->rl_max_mtu = RL_MTU; 708 break; 709 case RL_HWREV_8168B_SPIN1: 710 case RL_HWREV_8168B_SPIN2: 711 sc->rl_flags |= RL_FLAG_WOLRXENB; 712 /* FALLTHROUGH */ 713 case RL_HWREV_8168B_SPIN3: 714 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 715 sc->rl_max_mtu = RL_MTU; 716 break; 717 case RL_HWREV_8168C_SPIN2: 718 sc->rl_flags |= RL_FLAG_MACSLEEP; 719 /* FALLTHROUGH */ 720 case RL_HWREV_8168C: 721 case RL_HWREV_8168CP: 722 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 723 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 724 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 725 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 726 break; 727 case RL_HWREV_8168D: 728 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 729 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 730 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 731 RL_FLAG_WOL_MANLINK; 732 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 733 break; 734 case RL_HWREV_8168DP: 735 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 736 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 737 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 738 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 739 break; 740 case RL_HWREV_8168E: 741 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 742 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 743 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 744 RL_FLAG_WOL_MANLINK; 745 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 746 break; 747 case RL_HWREV_8168E_VL: 748 sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR | 749 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 750 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 751 RL_FLAG_WOL_MANLINK; 752 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 753 break; 754 case RL_HWREV_8168F: 755 sc->rl_flags |= RL_FLAG_EARLYOFF; 756 /* FALLTHROUGH */ 757 case RL_HWREV_8411: 758 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 759 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 760 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 761 RL_FLAG_WOL_MANLINK; 762 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 763 break; 764 case RL_HWREV_8168EP: 765 case RL_HWREV_8168G: 766 case RL_HWREV_8411B: 767 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 768 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 769 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 770 RL_FLAG_WOL_MANLINK | RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 771 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 772 break; 773 case RL_HWREV_8168GU: 774 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) { 775 /* RTL8106EUS */ 776 sc->rl_flags |= RL_FLAG_FASTETHER; 777 sc->rl_max_mtu = RL_MTU; 778 } else { 779 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 780 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 781 } 782 783 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 784 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 785 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 786 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 787 break; 788 case RL_HWREV_8169_8110SB: 789 case RL_HWREV_8169_8110SBL: 790 case RL_HWREV_8169_8110SCd: 791 case RL_HWREV_8169_8110SCe: 792 sc->rl_flags |= RL_FLAG_PHYWAKE; 793 /* FALLTHROUGH */ 794 case RL_HWREV_8169: 795 case RL_HWREV_8169S: 796 case RL_HWREV_8110S: 797 sc->rl_flags |= RL_FLAG_MACRESET; 798 sc->rl_max_mtu = RL_JUMBO_MTU_7K; 799 break; 800 default: 801 break; 802 } 803 804 /* Reset the adapter. */ 805 re_reset(sc); 806 807 sc->rl_tx_time = 5; /* 125us */ 808 sc->rl_rx_time = 2; /* 50us */ 809 if (sc->rl_flags & RL_FLAG_PCIE) 810 sc->rl_sim_time = 75; /* 75us */ 811 else 812 sc->rl_sim_time = 125; /* 125us */ 813 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 814 815 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 816 sc->rl_bus_speed = 33; /* XXX */ 817 else if (sc->rl_flags & RL_FLAG_PCIE) 818 sc->rl_bus_speed = 125; 819 else { 820 u_int8_t cfg2; 821 822 cfg2 = CSR_READ_1(sc, RL_CFG2); 823 switch (cfg2 & RL_CFG2_PCI_MASK) { 824 case RL_CFG2_PCI_33MHZ: 825 sc->rl_bus_speed = 33; 826 break; 827 case RL_CFG2_PCI_66MHZ: 828 sc->rl_bus_speed = 66; 829 break; 830 default: 831 printf("%s: unknown bus speed, assume 33MHz\n", 832 sc->sc_dev.dv_xname); 833 sc->rl_bus_speed = 33; 834 break; 835 } 836 837 if (cfg2 & RL_CFG2_PCI_64BIT) 838 sc->rl_flags |= RL_FLAG_PCI64; 839 } 840 841 re_config_imtype(sc, sc->rl_imtype); 842 843 if (sc->rl_flags & RL_FLAG_PAR) { 844 /* 845 * XXX Should have a better way to extract station 846 * address from EEPROM. 847 */ 848 for (i = 0; i < ETHER_ADDR_LEN; i++) 849 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 850 } else { 851 sc->rl_eewidth = RL_9356_ADDR_LEN; 852 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 853 if (re_did != 0x8129) 854 sc->rl_eewidth = RL_9346_ADDR_LEN; 855 856 /* 857 * Get station address from the EEPROM. 858 */ 859 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 860 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 861 as[i] = letoh16(as[i]); 862 bcopy(as, eaddr, sizeof(eaddr)); 863 864 #ifdef __armish__ 865 /* 866 * On the Thecus N2100, the MAC address in the EEPROM is 867 * always 00:14:fd:10:00:00. The proper MAC address is 868 * stored in flash. Fortunately RedBoot configures the 869 * proper MAC address (for the first onboard interface) 870 * which we can read from the IDR. 871 */ 872 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 873 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 874 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 875 if (boot_eaddr_valid == 0) { 876 boot_eaddr.eaddr_word[1] = 877 letoh32(CSR_READ_4(sc, RL_IDR4)); 878 boot_eaddr.eaddr_word[0] = 879 letoh32(CSR_READ_4(sc, RL_IDR0)); 880 boot_eaddr_valid = 1; 881 } 882 883 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 884 eaddr[5] += sc->sc_dev.dv_unit; 885 } 886 #endif 887 } 888 889 /* 890 * Set RX length mask, TX poll request register 891 * and TX descriptor count. 892 */ 893 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 894 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 895 sc->rl_txstart = RL_TXSTART; 896 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139; 897 } else { 898 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 899 sc->rl_txstart = RL_GTXSTART; 900 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169; 901 } 902 903 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 904 905 for (rr = re_revisions; rr->re_name != NULL; rr++) { 906 if (rr->re_chipid == sc->sc_hwrev) 907 re_name = rr->re_name; 908 } 909 910 if (re_name == NULL) 911 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 912 else 913 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 914 915 printf(", %s, address %s\n", intrstr, 916 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 917 918 if (sc->rl_ldata.rl_tx_desc_cnt > 919 PAGE_SIZE / sizeof(struct rl_desc)) { 920 sc->rl_ldata.rl_tx_desc_cnt = 921 PAGE_SIZE / sizeof(struct rl_desc); 922 } 923 924 /* Allocate DMA'able memory for the TX ring */ 925 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 926 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 927 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 928 BUS_DMA_ZERO)) != 0) { 929 printf("%s: can't allocate tx listseg, error = %d\n", 930 sc->sc_dev.dv_xname, error); 931 goto fail_0; 932 } 933 934 /* Load the map for the TX ring. */ 935 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 936 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 937 (caddr_t *)&sc->rl_ldata.rl_tx_list, 938 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 939 printf("%s: can't map tx list, error = %d\n", 940 sc->sc_dev.dv_xname, error); 941 goto fail_1; 942 } 943 944 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 945 RL_TX_LIST_SZ(sc), 0, 0, 946 &sc->rl_ldata.rl_tx_list_map)) != 0) { 947 printf("%s: can't create tx list map, error = %d\n", 948 sc->sc_dev.dv_xname, error); 949 goto fail_2; 950 } 951 952 if ((error = bus_dmamap_load(sc->sc_dmat, 953 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 954 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 955 printf("%s: can't load tx list, error = %d\n", 956 sc->sc_dev.dv_xname, error); 957 goto fail_3; 958 } 959 960 /* Create DMA maps for TX buffers */ 961 for (i = 0; i < RL_TX_QLEN; i++) { 962 error = bus_dmamap_create(sc->sc_dmat, 963 RL_JUMBO_FRAMELEN, RL_NTXSEGS, RL_JUMBO_FRAMELEN, 964 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 965 if (error) { 966 printf("%s: can't create DMA map for TX\n", 967 sc->sc_dev.dv_xname); 968 goto fail_4; 969 } 970 } 971 972 /* Allocate DMA'able memory for the RX ring */ 973 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ, 974 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 975 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 976 BUS_DMA_ZERO)) != 0) { 977 printf("%s: can't allocate rx listnseg, error = %d\n", 978 sc->sc_dev.dv_xname, error); 979 goto fail_4; 980 } 981 982 /* Load the map for the RX ring. */ 983 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 984 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ, 985 (caddr_t *)&sc->rl_ldata.rl_rx_list, 986 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 987 printf("%s: can't map rx list, error = %d\n", 988 sc->sc_dev.dv_xname, error); 989 goto fail_5; 990 991 } 992 993 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1, 994 RL_RX_DMAMEM_SZ, 0, 0, 995 &sc->rl_ldata.rl_rx_list_map)) != 0) { 996 printf("%s: can't create rx list map, error = %d\n", 997 sc->sc_dev.dv_xname, error); 998 goto fail_6; 999 } 1000 1001 if ((error = bus_dmamap_load(sc->sc_dmat, 1002 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1003 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 1004 printf("%s: can't load rx list, error = %d\n", 1005 sc->sc_dev.dv_xname, error); 1006 goto fail_7; 1007 } 1008 1009 /* Create DMA maps for RX buffers */ 1010 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1011 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1012 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1013 if (error) { 1014 printf("%s: can't create DMA map for RX\n", 1015 sc->sc_dev.dv_xname); 1016 goto fail_8; 1017 } 1018 } 1019 1020 ifp = &sc->sc_arpcom.ac_if; 1021 ifp->if_softc = sc; 1022 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1023 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1024 ifp->if_ioctl = re_ioctl; 1025 ifp->if_start = re_start; 1026 ifp->if_watchdog = re_watchdog; 1027 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0) 1028 ifp->if_hardmtu = sc->rl_max_mtu; 1029 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 1030 IFQ_SET_READY(&ifp->if_snd); 1031 1032 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 1033 IFCAP_CSUM_UDPv4; 1034 1035 /* 1036 * RTL8168/8111C generates wrong IP checksummed frame if the 1037 * packet has IP options so disable TX IP checksum offloading. 1038 */ 1039 switch (sc->sc_hwrev) { 1040 case RL_HWREV_8168C: 1041 case RL_HWREV_8168C_SPIN2: 1042 case RL_HWREV_8168CP: 1043 break; 1044 default: 1045 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1046 } 1047 1048 #if NVLAN > 0 1049 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1050 #endif 1051 1052 #ifndef SMALL_KERNEL 1053 ifp->if_capabilities |= IFCAP_WOL; 1054 ifp->if_wol = re_wol; 1055 re_wol(ifp, 0); 1056 #endif 1057 timeout_set(&sc->timer_handle, re_tick, sc); 1058 1059 /* Take PHY out of power down mode. */ 1060 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1061 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1062 if (sc->sc_hwrev == RL_HWREV_8401E) 1063 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1064 } 1065 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1066 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1067 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1068 } 1069 1070 /* Do MII setup */ 1071 sc->sc_mii.mii_ifp = ifp; 1072 sc->sc_mii.mii_readreg = re_miibus_readreg; 1073 sc->sc_mii.mii_writereg = re_miibus_writereg; 1074 sc->sc_mii.mii_statchg = re_miibus_statchg; 1075 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1076 re_ifmedia_sts); 1077 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1078 MII_OFFSET_ANY, MIIF_DOPAUSE); 1079 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1080 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1081 ifmedia_add(&sc->sc_mii.mii_media, 1082 IFM_ETHER|IFM_NONE, 0, NULL); 1083 ifmedia_set(&sc->sc_mii.mii_media, 1084 IFM_ETHER|IFM_NONE); 1085 } else 1086 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1087 1088 /* 1089 * Call MI attach routine. 1090 */ 1091 if_attach(ifp); 1092 ether_ifattach(ifp); 1093 1094 return (0); 1095 1096 fail_8: 1097 /* Destroy DMA maps for RX buffers. */ 1098 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1099 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1100 bus_dmamap_destroy(sc->sc_dmat, 1101 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1102 } 1103 1104 /* Free DMA'able memory for the RX ring. */ 1105 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1106 fail_7: 1107 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1108 fail_6: 1109 bus_dmamem_unmap(sc->sc_dmat, 1110 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ); 1111 fail_5: 1112 bus_dmamem_free(sc->sc_dmat, 1113 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1114 1115 fail_4: 1116 /* Destroy DMA maps for TX buffers. */ 1117 for (i = 0; i < RL_TX_QLEN; i++) { 1118 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1119 bus_dmamap_destroy(sc->sc_dmat, 1120 sc->rl_ldata.rl_txq[i].txq_dmamap); 1121 } 1122 1123 /* Free DMA'able memory for the TX ring. */ 1124 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1125 fail_3: 1126 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1127 fail_2: 1128 bus_dmamem_unmap(sc->sc_dmat, 1129 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1130 fail_1: 1131 bus_dmamem_free(sc->sc_dmat, 1132 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1133 fail_0: 1134 return (1); 1135 } 1136 1137 1138 int 1139 re_newbuf(struct rl_softc *sc) 1140 { 1141 struct mbuf *m; 1142 bus_dmamap_t map; 1143 struct rl_desc *d; 1144 struct rl_rxsoft *rxs; 1145 u_int32_t cmdstat; 1146 int error, idx; 1147 1148 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1149 if (!m) 1150 return (ENOBUFS); 1151 1152 /* 1153 * Initialize mbuf length fields and fixup 1154 * alignment so that the frame payload is 1155 * longword aligned on strict alignment archs. 1156 */ 1157 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN; 1158 m->m_data += RE_ETHER_ALIGN; 1159 1160 idx = sc->rl_ldata.rl_rx_prodidx; 1161 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1162 map = rxs->rxs_dmamap; 1163 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1164 BUS_DMA_READ|BUS_DMA_NOWAIT); 1165 if (error) { 1166 m_freem(m); 1167 return (ENOBUFS); 1168 } 1169 1170 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1171 BUS_DMASYNC_PREREAD); 1172 1173 d = &sc->rl_ldata.rl_rx_list[idx]; 1174 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1175 cmdstat = letoh32(d->rl_cmdstat); 1176 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1177 if (cmdstat & RL_RDESC_STAT_OWN) { 1178 printf("%s: tried to map busy RX descriptor\n", 1179 sc->sc_dev.dv_xname); 1180 m_freem(m); 1181 return (ENOBUFS); 1182 } 1183 1184 rxs->rxs_mbuf = m; 1185 1186 d->rl_vlanctl = 0; 1187 cmdstat = map->dm_segs[0].ds_len; 1188 if (idx == (RL_RX_DESC_CNT - 1)) 1189 cmdstat |= RL_RDESC_CMD_EOR; 1190 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1191 d->rl_cmdstat = htole32(cmdstat); 1192 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1193 cmdstat |= RL_RDESC_CMD_OWN; 1194 d->rl_cmdstat = htole32(cmdstat); 1195 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1196 1197 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1198 1199 return (0); 1200 } 1201 1202 1203 int 1204 re_tx_list_init(struct rl_softc *sc) 1205 { 1206 int i; 1207 1208 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1209 for (i = 0; i < RL_TX_QLEN; i++) { 1210 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1211 } 1212 1213 bus_dmamap_sync(sc->sc_dmat, 1214 sc->rl_ldata.rl_tx_list_map, 0, 1215 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1216 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1217 sc->rl_ldata.rl_txq_prodidx = 0; 1218 sc->rl_ldata.rl_txq_considx = 0; 1219 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc); 1220 sc->rl_ldata.rl_tx_nextfree = 0; 1221 1222 return (0); 1223 } 1224 1225 int 1226 re_rx_list_init(struct rl_softc *sc) 1227 { 1228 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1229 1230 sc->rl_ldata.rl_rx_prodidx = 0; 1231 sc->rl_ldata.rl_rx_considx = 0; 1232 sc->rl_head = sc->rl_tail = NULL; 1233 1234 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, RL_RX_DESC_CNT); 1235 re_rx_list_fill(sc); 1236 1237 return (0); 1238 } 1239 1240 void 1241 re_rx_list_fill(struct rl_softc *sc) 1242 { 1243 u_int slots; 1244 1245 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, RL_RX_DESC_CNT); 1246 slots > 0; slots--) { 1247 if (re_newbuf(sc) == ENOBUFS) 1248 break; 1249 } 1250 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1251 } 1252 1253 /* 1254 * RX handler for C+ and 8169. For the gigE chips, we support 1255 * the reception of jumbo frames that have been fragmented 1256 * across multiple 2K mbuf cluster buffers. 1257 */ 1258 int 1259 re_rxeof(struct rl_softc *sc) 1260 { 1261 struct mbuf *m; 1262 struct ifnet *ifp; 1263 int i, total_len, rx = 0; 1264 struct rl_desc *cur_rx; 1265 struct rl_rxsoft *rxs; 1266 u_int32_t rxstat, rxvlan; 1267 1268 ifp = &sc->sc_arpcom.ac_if; 1269 1270 for (i = sc->rl_ldata.rl_rx_considx; 1271 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1272 i = RL_NEXT_RX_DESC(sc, i)) { 1273 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1274 RL_RXDESCSYNC(sc, i, 1275 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1276 rxstat = letoh32(cur_rx->rl_cmdstat); 1277 rxvlan = letoh32(cur_rx->rl_vlanctl); 1278 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1279 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1280 break; 1281 total_len = rxstat & sc->rl_rxlenmask; 1282 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1283 m = rxs->rxs_mbuf; 1284 rxs->rxs_mbuf = NULL; 1285 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1286 rx = 1; 1287 1288 /* Invalidate the RX mbuf and unload its map */ 1289 1290 bus_dmamap_sync(sc->sc_dmat, 1291 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1292 BUS_DMASYNC_POSTREAD); 1293 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1294 1295 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1296 m->m_len = RE_RX_DESC_BUFLEN; 1297 if (sc->rl_head == NULL) 1298 sc->rl_head = sc->rl_tail = m; 1299 else { 1300 m->m_flags &= ~M_PKTHDR; 1301 sc->rl_tail->m_next = m; 1302 sc->rl_tail = m; 1303 } 1304 continue; 1305 } 1306 1307 /* 1308 * NOTE: for the 8139C+, the frame length field 1309 * is always 12 bits in size, but for the gigE chips, 1310 * it is 13 bits (since the max RX frame length is 16K). 1311 * Unfortunately, all 32 bits in the status word 1312 * were already used, so to make room for the extra 1313 * length bit, Realtek took out the 'frame alignment 1314 * error' bit and shifted the other status bits 1315 * over one slot. The OWN, EOR, FS and LS bits are 1316 * still in the same places. We have already extracted 1317 * the frame length and checked the OWN bit, so rather 1318 * than using an alternate bit mapping, we shift the 1319 * status bits one space to the right so we can evaluate 1320 * them using the 8169 status as though it was in the 1321 * same format as that of the 8139C+. 1322 */ 1323 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1324 rxstat >>= 1; 1325 1326 /* 1327 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1328 * set, but if CRC is clear, it will still be a valid frame. 1329 */ 1330 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1331 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1332 ifp->if_ierrors++; 1333 /* 1334 * If this is part of a multi-fragment packet, 1335 * discard all the pieces. 1336 */ 1337 if (sc->rl_head != NULL) { 1338 m_freem(sc->rl_head); 1339 sc->rl_head = sc->rl_tail = NULL; 1340 } 1341 continue; 1342 } 1343 1344 if (sc->rl_head != NULL) { 1345 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1346 if (m->m_len == 0) 1347 m->m_len = RE_RX_DESC_BUFLEN; 1348 /* 1349 * Special case: if there's 4 bytes or less 1350 * in this buffer, the mbuf can be discarded: 1351 * the last 4 bytes is the CRC, which we don't 1352 * care about anyway. 1353 */ 1354 if (m->m_len <= ETHER_CRC_LEN) { 1355 sc->rl_tail->m_len -= 1356 (ETHER_CRC_LEN - m->m_len); 1357 m_freem(m); 1358 } else { 1359 m->m_len -= ETHER_CRC_LEN; 1360 m->m_flags &= ~M_PKTHDR; 1361 sc->rl_tail->m_next = m; 1362 } 1363 m = sc->rl_head; 1364 sc->rl_head = sc->rl_tail = NULL; 1365 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1366 } else 1367 m->m_pkthdr.len = m->m_len = 1368 (total_len - ETHER_CRC_LEN); 1369 1370 ifp->if_ipackets++; 1371 m->m_pkthdr.rcvif = ifp; 1372 1373 /* Do RX checksumming */ 1374 1375 if (sc->rl_flags & RL_FLAG_DESCV2) { 1376 /* Check IP header checksum */ 1377 if ((rxvlan & RL_RDESC_IPV4) && 1378 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1379 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1380 1381 /* Check TCP/UDP checksum */ 1382 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1383 (((rxstat & RL_RDESC_STAT_TCP) && 1384 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1385 ((rxstat & RL_RDESC_STAT_UDP) && 1386 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1387 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1388 M_UDP_CSUM_IN_OK; 1389 } else { 1390 /* Check IP header checksum */ 1391 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1392 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1393 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1394 1395 /* Check TCP/UDP checksum */ 1396 if ((RL_TCPPKT(rxstat) && 1397 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1398 (RL_UDPPKT(rxstat) && 1399 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1400 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1401 M_UDP_CSUM_IN_OK; 1402 } 1403 #if NVLAN > 0 1404 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1405 m->m_pkthdr.ether_vtag = 1406 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1407 m->m_flags |= M_VLANTAG; 1408 } 1409 #endif 1410 1411 #if NBPFILTER > 0 1412 if (ifp->if_bpf) 1413 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1414 #endif 1415 ether_input_mbuf(ifp, m); 1416 } 1417 1418 sc->rl_ldata.rl_rx_considx = i; 1419 re_rx_list_fill(sc); 1420 1421 return (rx); 1422 } 1423 1424 int 1425 re_txeof(struct rl_softc *sc) 1426 { 1427 struct ifnet *ifp; 1428 struct rl_txq *txq; 1429 uint32_t txstat; 1430 int idx, descidx, tx = 0; 1431 1432 ifp = &sc->sc_arpcom.ac_if; 1433 1434 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) { 1435 txq = &sc->rl_ldata.rl_txq[idx]; 1436 1437 if (txq->txq_mbuf == NULL) { 1438 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx); 1439 break; 1440 } 1441 1442 descidx = txq->txq_descidx; 1443 RL_TXDESCSYNC(sc, descidx, 1444 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1445 txstat = 1446 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1447 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1448 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1449 if (txstat & RL_TDESC_CMD_OWN) 1450 break; 1451 1452 tx = 1; 1453 sc->rl_ldata.rl_tx_free += txq->txq_nsegs; 1454 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc)); 1455 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1456 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1457 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1458 m_freem(txq->txq_mbuf); 1459 txq->txq_mbuf = NULL; 1460 1461 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1462 ifp->if_collisions++; 1463 if (txstat & RL_TDESC_STAT_TXERRSUM) 1464 ifp->if_oerrors++; 1465 else 1466 ifp->if_opackets++; 1467 } 1468 1469 sc->rl_ldata.rl_txq_considx = idx; 1470 1471 ifp->if_flags &= ~IFF_OACTIVE; 1472 1473 /* 1474 * Some chips will ignore a second TX request issued while an 1475 * existing transmission is in progress. If the transmitter goes 1476 * idle but there are still packets waiting to be sent, we need 1477 * to restart the channel here to flush them out. This only 1478 * seems to be required with the PCIe devices. 1479 */ 1480 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) 1481 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1482 else 1483 ifp->if_timer = 0; 1484 1485 return (tx); 1486 } 1487 1488 void 1489 re_tick(void *xsc) 1490 { 1491 struct rl_softc *sc = xsc; 1492 struct mii_data *mii; 1493 struct ifnet *ifp; 1494 int s; 1495 1496 ifp = &sc->sc_arpcom.ac_if; 1497 mii = &sc->sc_mii; 1498 1499 s = splnet(); 1500 1501 mii_tick(mii); 1502 1503 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1504 re_miibus_statchg(&sc->sc_dev); 1505 1506 splx(s); 1507 1508 timeout_add_sec(&sc->timer_handle, 1); 1509 } 1510 1511 int 1512 re_intr(void *arg) 1513 { 1514 struct rl_softc *sc = arg; 1515 struct ifnet *ifp; 1516 u_int16_t status; 1517 int claimed = 0, rx, tx; 1518 1519 ifp = &sc->sc_arpcom.ac_if; 1520 1521 if (!(ifp->if_flags & IFF_RUNNING)) 1522 return (0); 1523 1524 /* Disable interrupts. */ 1525 CSR_WRITE_2(sc, RL_IMR, 0); 1526 1527 rx = tx = 0; 1528 status = CSR_READ_2(sc, RL_ISR); 1529 /* If the card has gone away the read returns 0xffff. */ 1530 if (status == 0xffff) 1531 return (0); 1532 if (status) 1533 CSR_WRITE_2(sc, RL_ISR, status); 1534 1535 if (status & RL_ISR_TIMEOUT_EXPIRED) 1536 claimed = 1; 1537 1538 if (status & RL_INTRS_CPLUS) { 1539 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1540 rx |= re_rxeof(sc); 1541 claimed = 1; 1542 } 1543 1544 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1545 tx |= re_txeof(sc); 1546 claimed = 1; 1547 } 1548 1549 if (status & RL_ISR_SYSTEM_ERR) { 1550 re_init(ifp); 1551 claimed = 1; 1552 } 1553 1554 if (status & RL_ISR_LINKCHG) { 1555 timeout_del(&sc->timer_handle); 1556 re_tick(sc); 1557 claimed = 1; 1558 } 1559 } 1560 1561 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1562 if ((sc->rl_flags & RL_FLAG_TIMERINTR)) { 1563 if ((tx | rx) == 0) { 1564 /* 1565 * Nothing needs to be processed, fallback 1566 * to use TX/RX interrupts. 1567 */ 1568 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1569 1570 /* 1571 * Recollect, mainly to avoid the possible 1572 * race introduced by changing interrupt 1573 * masks. 1574 */ 1575 re_rxeof(sc); 1576 tx = re_txeof(sc); 1577 } else 1578 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1579 } else if (tx | rx) { 1580 /* 1581 * Assume that using simulated interrupt moderation 1582 * (hardware timer based) could reduce the interrupt 1583 * rate. 1584 */ 1585 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1586 } 1587 } 1588 1589 if (tx && !IFQ_IS_EMPTY(&ifp->if_snd)) 1590 re_start(ifp); 1591 1592 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1593 1594 return (claimed); 1595 } 1596 1597 int 1598 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx) 1599 { 1600 bus_dmamap_t map; 1601 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1602 struct rl_desc *d; 1603 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1604 struct rl_txq *txq; 1605 1606 /* 1607 * Set up checksum offload. Note: checksum offload bits must 1608 * appear in all descriptors of a multi-descriptor transmit 1609 * attempt. This is according to testing done with an 8169 1610 * chip. This is a requirement. 1611 */ 1612 1613 /* 1614 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1615 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1616 * RL_TDESC_CMD_UDPCSUM does not take affect. 1617 */ 1618 1619 if ((m->m_pkthdr.csum_flags & 1620 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1621 if (sc->rl_flags & RL_FLAG_DESCV2) { 1622 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1623 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1624 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1625 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1626 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1627 } else { 1628 csum_flags |= RL_TDESC_CMD_IPCSUM; 1629 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1630 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1631 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1632 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1633 } 1634 } 1635 1636 txq = &sc->rl_ldata.rl_txq[*idx]; 1637 map = txq->txq_dmamap; 1638 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1639 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1640 if (error) { 1641 /* XXX try to defrag if EFBIG? */ 1642 printf("%s: can't map mbuf (error %d)\n", 1643 sc->sc_dev.dv_xname, error); 1644 return (error); 1645 } 1646 1647 nsegs = map->dm_nsegs; 1648 pad = 0; 1649 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 && 1650 m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN && 1651 (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) { 1652 pad = 1; 1653 nsegs++; 1654 } 1655 1656 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 1657 error = EFBIG; 1658 goto fail_unload; 1659 } 1660 1661 /* 1662 * Make sure that the caches are synchronized before we 1663 * ask the chip to start DMA for the packet data. 1664 */ 1665 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1666 BUS_DMASYNC_PREWRITE); 1667 1668 /* 1669 * Set up hardware VLAN tagging. Note: vlan tag info must 1670 * appear in all descriptors of a multi-descriptor 1671 * transmission attempt. 1672 */ 1673 #if NVLAN > 0 1674 if (m->m_flags & M_VLANTAG) 1675 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1676 RL_TDESC_VLANCTL_TAG; 1677 #endif 1678 1679 /* 1680 * Map the segment array into descriptors. Note that we set the 1681 * start-of-frame and end-of-frame markers for either TX or RX, but 1682 * they really only have meaning in the TX case. (In the RX case, 1683 * it's the chip that tells us where packets begin and end.) 1684 * We also keep track of the end of the ring and set the 1685 * end-of-ring bits as needed, and we set the ownership bits 1686 * in all except the very first descriptor. (The caller will 1687 * set this descriptor later when it start transmission or 1688 * reception.) 1689 */ 1690 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1691 lastidx = -1; 1692 for (seg = 0; seg < map->dm_nsegs; 1693 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1694 d = &sc->rl_ldata.rl_tx_list[curidx]; 1695 RL_TXDESCSYNC(sc, curidx, 1696 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1697 cmdstat = letoh32(d->rl_cmdstat); 1698 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1699 if (cmdstat & RL_TDESC_STAT_OWN) { 1700 printf("%s: tried to map busy TX descriptor\n", 1701 sc->sc_dev.dv_xname); 1702 for (; seg > 0; seg --) { 1703 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) % 1704 RL_TX_DESC_CNT(sc); 1705 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1706 RL_TXDESCSYNC(sc, uidx, 1707 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1708 } 1709 error = ENOBUFS; 1710 goto fail_unload; 1711 } 1712 1713 d->rl_vlanctl = htole32(vlanctl); 1714 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1715 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1716 if (seg == 0) 1717 cmdstat |= RL_TDESC_CMD_SOF; 1718 else 1719 cmdstat |= RL_TDESC_CMD_OWN; 1720 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1721 cmdstat |= RL_TDESC_CMD_EOR; 1722 if (seg == nsegs - 1) { 1723 cmdstat |= RL_TDESC_CMD_EOF; 1724 lastidx = curidx; 1725 } 1726 d->rl_cmdstat = htole32(cmdstat); 1727 RL_TXDESCSYNC(sc, curidx, 1728 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1729 } 1730 if (pad) { 1731 d = &sc->rl_ldata.rl_tx_list[curidx]; 1732 d->rl_vlanctl = htole32(vlanctl); 1733 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1734 cmdstat = csum_flags | 1735 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1736 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1737 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1738 cmdstat |= RL_TDESC_CMD_EOR; 1739 d->rl_cmdstat = htole32(cmdstat); 1740 RL_TXDESCSYNC(sc, curidx, 1741 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1742 lastidx = curidx; 1743 curidx = RL_NEXT_TX_DESC(sc, curidx); 1744 } 1745 KASSERT(lastidx != -1); 1746 1747 /* Transfer ownership of packet to the chip. */ 1748 1749 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1750 htole32(RL_TDESC_CMD_OWN); 1751 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1752 1753 /* update info of TX queue and descriptors */ 1754 txq->txq_mbuf = m; 1755 txq->txq_descidx = lastidx; 1756 txq->txq_nsegs = nsegs; 1757 1758 sc->rl_ldata.rl_tx_free -= nsegs; 1759 sc->rl_ldata.rl_tx_nextfree = curidx; 1760 1761 *idx = RL_NEXT_TXQ(sc, *idx); 1762 1763 return (0); 1764 1765 fail_unload: 1766 bus_dmamap_unload(sc->sc_dmat, map); 1767 1768 return (error); 1769 } 1770 1771 /* 1772 * Main transmit routine for C+ and gigE NICs. 1773 */ 1774 1775 void 1776 re_start(struct ifnet *ifp) 1777 { 1778 struct rl_softc *sc; 1779 int idx, queued = 0; 1780 1781 sc = ifp->if_softc; 1782 1783 if (ifp->if_flags & IFF_OACTIVE) 1784 return; 1785 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1786 return; 1787 1788 idx = sc->rl_ldata.rl_txq_prodidx; 1789 for (;;) { 1790 struct mbuf *m; 1791 int error; 1792 1793 IFQ_POLL(&ifp->if_snd, m); 1794 if (m == NULL) 1795 break; 1796 1797 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) { 1798 KASSERT(idx == sc->rl_ldata.rl_txq_considx); 1799 ifp->if_flags |= IFF_OACTIVE; 1800 break; 1801 } 1802 1803 error = re_encap(sc, m, &idx); 1804 if (error == EFBIG && 1805 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) { 1806 IFQ_DEQUEUE(&ifp->if_snd, m); 1807 m_freem(m); 1808 ifp->if_oerrors++; 1809 continue; 1810 } 1811 if (error) { 1812 ifp->if_flags |= IFF_OACTIVE; 1813 break; 1814 } 1815 1816 IFQ_DEQUEUE(&ifp->if_snd, m); 1817 queued++; 1818 1819 #if NBPFILTER > 0 1820 /* 1821 * If there's a BPF listener, bounce a copy of this frame 1822 * to him. 1823 */ 1824 if (ifp->if_bpf) 1825 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1826 #endif 1827 } 1828 1829 if (queued == 0) 1830 return; 1831 1832 sc->rl_ldata.rl_txq_prodidx = idx; 1833 1834 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1835 1836 /* 1837 * Set a timeout in case the chip goes out to lunch. 1838 */ 1839 ifp->if_timer = 5; 1840 } 1841 1842 int 1843 re_init(struct ifnet *ifp) 1844 { 1845 struct rl_softc *sc = ifp->if_softc; 1846 u_int16_t cfg; 1847 uint32_t rxcfg; 1848 int s; 1849 union { 1850 u_int32_t align_dummy; 1851 u_char eaddr[ETHER_ADDR_LEN]; 1852 } eaddr; 1853 1854 s = splnet(); 1855 1856 /* 1857 * Cancel pending I/O and free all RX/TX buffers. 1858 */ 1859 re_stop(ifp); 1860 1861 /* Put controller into known state. */ 1862 re_reset(sc); 1863 1864 /* 1865 * Enable C+ RX and TX mode, as well as VLAN stripping and 1866 * RX checksum offload. We must configure the C+ register 1867 * before all others. 1868 */ 1869 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1870 RL_CPLUSCMD_RXCSUM_ENB; 1871 1872 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1873 cfg |= RL_CPLUSCMD_VLANSTRIP; 1874 1875 if (sc->rl_flags & RL_FLAG_MACSTAT) 1876 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1877 else 1878 cfg |= RL_CPLUSCMD_RXENB; 1879 1880 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1881 1882 /* 1883 * Init our MAC address. Even though the chipset 1884 * documentation doesn't mention it, we need to enter "Config 1885 * register write enable" mode to modify the ID registers. 1886 */ 1887 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1888 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1889 CSR_WRITE_4(sc, RL_IDR4, 1890 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1891 CSR_WRITE_4(sc, RL_IDR0, 1892 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1893 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1894 1895 /* 1896 * For C+ mode, initialize the RX descriptors and mbufs. 1897 */ 1898 re_rx_list_init(sc); 1899 re_tx_list_init(sc); 1900 1901 /* 1902 * Load the addresses of the RX and TX lists into the chip. 1903 */ 1904 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1905 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1906 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1907 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1908 1909 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1910 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1911 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1912 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1913 1914 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1915 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1916 ~0x00080000); 1917 1918 /* 1919 * Enable transmit and receive. 1920 */ 1921 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1922 1923 /* 1924 * Set the initial TX and RX configuration. 1925 */ 1926 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1927 1928 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1929 1930 rxcfg = RL_RXCFG_CONFIG; 1931 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1932 rxcfg |= RL_RXCFG_EARLYOFF; 1933 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1934 rxcfg |= RL_RXCFG_EARLYOFFV2; 1935 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1936 1937 /* Program promiscuous mode and multicast filters. */ 1938 re_iff(sc); 1939 1940 /* 1941 * Enable interrupts. 1942 */ 1943 re_setup_intr(sc, 1, sc->rl_imtype); 1944 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 1945 1946 /* Start RX/TX process. */ 1947 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1948 #ifdef notdef 1949 /* Enable receiver and transmitter. */ 1950 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1951 #endif 1952 1953 /* 1954 * For 8169 gigE NICs, set the max allowed RX packet 1955 * size so we can receive jumbo frames. 1956 */ 1957 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 1958 if (sc->rl_flags & RL_FLAG_PCIE) 1959 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 1960 else 1961 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 1962 } 1963 1964 mii_mediachg(&sc->sc_mii); 1965 1966 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 1967 1968 ifp->if_flags |= IFF_RUNNING; 1969 ifp->if_flags &= ~IFF_OACTIVE; 1970 1971 splx(s); 1972 1973 sc->rl_flags &= ~RL_FLAG_LINK; 1974 1975 timeout_add_sec(&sc->timer_handle, 1); 1976 1977 return (0); 1978 } 1979 1980 /* 1981 * Set media options. 1982 */ 1983 int 1984 re_ifmedia_upd(struct ifnet *ifp) 1985 { 1986 struct rl_softc *sc; 1987 1988 sc = ifp->if_softc; 1989 1990 return (mii_mediachg(&sc->sc_mii)); 1991 } 1992 1993 /* 1994 * Report current media status. 1995 */ 1996 void 1997 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1998 { 1999 struct rl_softc *sc; 2000 2001 sc = ifp->if_softc; 2002 2003 mii_pollstat(&sc->sc_mii); 2004 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2005 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2006 } 2007 2008 int 2009 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2010 { 2011 struct rl_softc *sc = ifp->if_softc; 2012 struct ifreq *ifr = (struct ifreq *) data; 2013 struct ifaddr *ifa = (struct ifaddr *)data; 2014 int s, error = 0; 2015 2016 s = splnet(); 2017 2018 switch(command) { 2019 case SIOCSIFADDR: 2020 ifp->if_flags |= IFF_UP; 2021 if (!(ifp->if_flags & IFF_RUNNING)) 2022 re_init(ifp); 2023 #ifdef INET 2024 if (ifa->ifa_addr->sa_family == AF_INET) 2025 arp_ifinit(&sc->sc_arpcom, ifa); 2026 #endif /* INET */ 2027 break; 2028 case SIOCSIFFLAGS: 2029 if (ifp->if_flags & IFF_UP) { 2030 if (ifp->if_flags & IFF_RUNNING) 2031 error = ENETRESET; 2032 else 2033 re_init(ifp); 2034 } else { 2035 if (ifp->if_flags & IFF_RUNNING) 2036 re_stop(ifp); 2037 } 2038 break; 2039 case SIOCGIFMEDIA: 2040 case SIOCSIFMEDIA: 2041 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2042 break; 2043 case SIOCGIFRXR: 2044 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2045 NULL, MCLBYTES, &sc->rl_ldata.rl_rx_ring); 2046 break; 2047 default: 2048 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2049 } 2050 2051 if (error == ENETRESET) { 2052 if (ifp->if_flags & IFF_RUNNING) 2053 re_iff(sc); 2054 error = 0; 2055 } 2056 2057 splx(s); 2058 return (error); 2059 } 2060 2061 void 2062 re_watchdog(struct ifnet *ifp) 2063 { 2064 struct rl_softc *sc; 2065 int s; 2066 2067 sc = ifp->if_softc; 2068 s = splnet(); 2069 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2070 ifp->if_oerrors++; 2071 2072 re_txeof(sc); 2073 re_rxeof(sc); 2074 2075 re_init(ifp); 2076 2077 splx(s); 2078 } 2079 2080 /* 2081 * Stop the adapter and free any mbufs allocated to the 2082 * RX and TX lists. 2083 */ 2084 void 2085 re_stop(struct ifnet *ifp) 2086 { 2087 struct rl_softc *sc; 2088 int i; 2089 2090 sc = ifp->if_softc; 2091 2092 ifp->if_timer = 0; 2093 sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR); 2094 2095 timeout_del(&sc->timer_handle); 2096 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2097 2098 mii_down(&sc->sc_mii); 2099 2100 /* 2101 * Disable accepting frames to put RX MAC into idle state. 2102 * Otherwise it's possible to get frames while stop command 2103 * execution is in progress and controller can DMA the frame 2104 * to already freed RX buffer during that period. 2105 */ 2106 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 2107 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV | 2108 RL_RXCFG_RX_MULTI)); 2109 2110 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) { 2111 for (i = RL_TIMEOUT; i > 0; i--) { 2112 if ((CSR_READ_1(sc, sc->rl_txstart) & 2113 RL_TXSTART_START) == 0) 2114 break; 2115 DELAY(20); 2116 } 2117 if (i == 0) 2118 printf("%s: stopping TX poll timed out!\n", 2119 sc->sc_dev.dv_xname); 2120 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2121 } else if (sc->rl_flags & RL_FLAG_CMDSTOP) { 2122 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2123 RL_CMD_RX_ENB); 2124 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) { 2125 for (i = RL_TIMEOUT; i > 0; i--) { 2126 if ((CSR_READ_4(sc, RL_TXCFG) & 2127 RL_TXCFG_QUEUE_EMPTY) != 0) 2128 break; 2129 DELAY(100); 2130 } 2131 if (i == 0) 2132 printf("%s: stopping TXQ timed out!\n", 2133 sc->sc_dev.dv_xname); 2134 } 2135 } else 2136 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2137 DELAY(1000); 2138 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2139 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2140 2141 if (sc->rl_head != NULL) { 2142 m_freem(sc->rl_head); 2143 sc->rl_head = sc->rl_tail = NULL; 2144 } 2145 2146 /* Free the TX list buffers. */ 2147 for (i = 0; i < RL_TX_QLEN; i++) { 2148 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2149 bus_dmamap_unload(sc->sc_dmat, 2150 sc->rl_ldata.rl_txq[i].txq_dmamap); 2151 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2152 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2153 } 2154 } 2155 2156 /* Free the RX list buffers. */ 2157 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2158 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2159 bus_dmamap_unload(sc->sc_dmat, 2160 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2161 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2162 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2163 } 2164 } 2165 } 2166 2167 void 2168 re_setup_hw_im(struct rl_softc *sc) 2169 { 2170 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2171 2172 /* 2173 * Interrupt moderation 2174 * 2175 * 0xABCD 2176 * A - unknown (maybe TX related) 2177 * B - TX timer (unit: 25us) 2178 * C - unknown (maybe RX related) 2179 * D - RX timer (unit: 25us) 2180 * 2181 * 2182 * re(4)'s interrupt moderation is actually controlled by 2183 * two variables, like most other NICs (bge, bnx etc.) 2184 * o timer 2185 * o number of packets [P] 2186 * 2187 * The logic relationship between these two variables is 2188 * similar to other NICs too: 2189 * if (timer expire || packets > [P]) 2190 * Interrupt is delivered 2191 * 2192 * Currently we only know how to set 'timer', but not 2193 * 'number of packets', which should be ~30, as far as I 2194 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2195 */ 2196 CSR_WRITE_2(sc, RL_IM, 2197 RL_IM_RXTIME(sc->rl_rx_time) | 2198 RL_IM_TXTIME(sc->rl_tx_time) | 2199 RL_IM_MAGIC); 2200 } 2201 2202 void 2203 re_disable_hw_im(struct rl_softc *sc) 2204 { 2205 if (sc->rl_flags & RL_FLAG_HWIM) 2206 CSR_WRITE_2(sc, RL_IM, 0); 2207 } 2208 2209 void 2210 re_setup_sim_im(struct rl_softc *sc) 2211 { 2212 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2213 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2214 else { 2215 u_int32_t ticks; 2216 2217 /* 2218 * Datasheet says tick decreases at bus speed, 2219 * but it seems the clock runs a little bit 2220 * faster, so we do some compensation here. 2221 */ 2222 ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2223 CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks); 2224 } 2225 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2226 sc->rl_flags |= RL_FLAG_TIMERINTR; 2227 } 2228 2229 void 2230 re_disable_sim_im(struct rl_softc *sc) 2231 { 2232 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2233 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2234 else 2235 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2236 sc->rl_flags &= ~RL_FLAG_TIMERINTR; 2237 } 2238 2239 void 2240 re_config_imtype(struct rl_softc *sc, int imtype) 2241 { 2242 switch (imtype) { 2243 case RL_IMTYPE_HW: 2244 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2245 /* FALLTHROUGH */ 2246 case RL_IMTYPE_NONE: 2247 sc->rl_intrs = RL_INTRS_CPLUS; 2248 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2249 RL_ISR_RX_OVERRUN; 2250 sc->rl_tx_ack = RL_ISR_TX_OK; 2251 break; 2252 2253 case RL_IMTYPE_SIM: 2254 sc->rl_intrs = RL_INTRS_TIMER; 2255 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2256 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2257 break; 2258 2259 default: 2260 panic("%s: unknown imtype %d", 2261 sc->sc_dev.dv_xname, imtype); 2262 } 2263 } 2264 2265 void 2266 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2267 { 2268 re_config_imtype(sc, imtype); 2269 2270 if (enable_intrs) 2271 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2272 else 2273 CSR_WRITE_2(sc, RL_IMR, 0); 2274 2275 switch (imtype) { 2276 case RL_IMTYPE_NONE: 2277 re_disable_sim_im(sc); 2278 re_disable_hw_im(sc); 2279 break; 2280 2281 case RL_IMTYPE_HW: 2282 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2283 re_disable_sim_im(sc); 2284 re_setup_hw_im(sc); 2285 break; 2286 2287 case RL_IMTYPE_SIM: 2288 re_disable_hw_im(sc); 2289 re_setup_sim_im(sc); 2290 break; 2291 2292 default: 2293 panic("%s: unknown imtype %d", 2294 sc->sc_dev.dv_xname, imtype); 2295 } 2296 } 2297 2298 #ifndef SMALL_KERNEL 2299 int 2300 re_wol(struct ifnet *ifp, int enable) 2301 { 2302 struct rl_softc *sc = ifp->if_softc; 2303 int i; 2304 u_int8_t val; 2305 struct re_wolcfg { 2306 u_int8_t enable; 2307 u_int8_t reg; 2308 u_int8_t bit; 2309 } re_wolcfg[] = { 2310 /* Always disable all wake events expect magic packet. */ 2311 { 0, RL_CFG5, RL_CFG5_WOL_UCAST }, 2312 { 0, RL_CFG5, RL_CFG5_WOL_MCAST }, 2313 { 0, RL_CFG5, RL_CFG5_WOL_BCAST }, 2314 { 1, RL_CFG3, RL_CFG3_WOL_MAGIC }, 2315 { 0, RL_CFG3, RL_CFG3_WOL_LINK } 2316 }; 2317 2318 if (enable) { 2319 if ((CSR_READ_1(sc, RL_CFG1) & RL_CFG1_PME) == 0) { 2320 printf("%s: power management is disabled, " 2321 "cannot do WOL\n", sc->sc_dev.dv_xname); 2322 return (ENOTSUP); 2323 } 2324 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_AUXPWR) == 0) 2325 printf("%s: no auxiliary power, cannot do WOL from D3 " 2326 "(power-off) state\n", sc->sc_dev.dv_xname); 2327 } 2328 2329 re_iff(sc); 2330 2331 /* Temporarily enable write to configuration registers. */ 2332 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2333 2334 for (i = 0; i < nitems(re_wolcfg); i++) { 2335 val = CSR_READ_1(sc, re_wolcfg[i].reg); 2336 if (enable && re_wolcfg[i].enable) 2337 val |= re_wolcfg[i].bit; 2338 else 2339 val &= ~re_wolcfg[i].bit; 2340 CSR_WRITE_1(sc, re_wolcfg[i].reg, val); 2341 } 2342 2343 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2344 2345 return (0); 2346 } 2347 #endif 2348