1 /* $OpenBSD: re.c,v 1.206 2020/07/10 13:26:37 patrick Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support Realtek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 #include <sys/atomic.h> 124 125 #include <machine/bus.h> 126 127 #include <net/if.h> 128 #include <net/if_media.h> 129 130 #include <netinet/in.h> 131 #include <netinet/ip.h> 132 #include <netinet/if_ether.h> 133 134 #if NBPFILTER > 0 135 #include <net/bpf.h> 136 #endif 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 141 #include <dev/pci/pcidevs.h> 142 143 #include <dev/ic/rtl81x9reg.h> 144 #include <dev/ic/revar.h> 145 146 #ifdef RE_DEBUG 147 int redebug = 0; 148 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 149 #else 150 #define DPRINTF(x) 151 #endif 152 153 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 154 155 int re_encap(struct rl_softc *, unsigned int, struct mbuf *); 156 157 int re_newbuf(struct rl_softc *); 158 int re_rx_list_init(struct rl_softc *); 159 void re_rx_list_fill(struct rl_softc *); 160 int re_tx_list_init(struct rl_softc *); 161 int re_rxeof(struct rl_softc *); 162 int re_txeof(struct rl_softc *); 163 void re_tick(void *); 164 void re_start(struct ifqueue *); 165 void re_txstart(void *); 166 int re_ioctl(struct ifnet *, u_long, caddr_t); 167 void re_watchdog(struct ifnet *); 168 int re_ifmedia_upd(struct ifnet *); 169 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 170 171 void re_set_jumbo(struct rl_softc *); 172 173 void re_eeprom_putbyte(struct rl_softc *, int); 174 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 175 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 176 177 int re_gmii_readreg(struct device *, int, int); 178 void re_gmii_writereg(struct device *, int, int, int); 179 180 int re_miibus_readreg(struct device *, int, int); 181 void re_miibus_writereg(struct device *, int, int, int); 182 void re_miibus_statchg(struct device *); 183 184 void re_iff(struct rl_softc *); 185 186 void re_setup_hw_im(struct rl_softc *); 187 void re_setup_sim_im(struct rl_softc *); 188 void re_disable_hw_im(struct rl_softc *); 189 void re_disable_sim_im(struct rl_softc *); 190 void re_config_imtype(struct rl_softc *, int); 191 void re_setup_intr(struct rl_softc *, int, int); 192 #ifndef SMALL_KERNEL 193 int re_wol(struct ifnet*, int); 194 #endif 195 196 void in_delayed_cksum(struct mbuf *); 197 198 struct cfdriver re_cd = { 199 0, "re", DV_IFNET 200 }; 201 202 extern char *hw_vendor, *hw_prod; 203 204 #define EE_SET(x) \ 205 CSR_WRITE_1(sc, RL_EECMD, \ 206 CSR_READ_1(sc, RL_EECMD) | x) 207 208 #define EE_CLR(x) \ 209 CSR_WRITE_1(sc, RL_EECMD, \ 210 CSR_READ_1(sc, RL_EECMD) & ~x) 211 212 #define RL_FRAMELEN(mtu) \ 213 (mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + \ 214 ETHER_VLAN_ENCAP_LEN) 215 216 static const struct re_revision { 217 u_int32_t re_chipid; 218 const char *re_name; 219 } re_revisions[] = { 220 { RL_HWREV_8100, "RTL8100" }, 221 { RL_HWREV_8100E, "RTL8100E" }, 222 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 223 { RL_HWREV_8101, "RTL8101" }, 224 { RL_HWREV_8101E, "RTL8101E" }, 225 { RL_HWREV_8102E, "RTL8102E" }, 226 { RL_HWREV_8106E, "RTL8106E" }, 227 { RL_HWREV_8401E, "RTL8401E" }, 228 { RL_HWREV_8402, "RTL8402" }, 229 { RL_HWREV_8411, "RTL8411" }, 230 { RL_HWREV_8411B, "RTL8411B" }, 231 { RL_HWREV_8102EL, "RTL8102EL" }, 232 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 233 { RL_HWREV_8103E, "RTL8103E" }, 234 { RL_HWREV_8110S, "RTL8110S" }, 235 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 236 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 237 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 238 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 239 { RL_HWREV_8168C, "RTL8168C/8111C" }, 240 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 241 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 242 { RL_HWREV_8168F, "RTL8168F/8111F" }, 243 { RL_HWREV_8168G, "RTL8168G/8111G" }, 244 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 245 { RL_HWREV_8168H, "RTL8168H/8111H" }, 246 { RL_HWREV_8105E, "RTL8105E" }, 247 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 248 { RL_HWREV_8168D, "RTL8168D/8111D" }, 249 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 250 { RL_HWREV_8168E, "RTL8168E/8111E" }, 251 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 252 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 253 { RL_HWREV_8169, "RTL8169" }, 254 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 255 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 256 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 257 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 258 { RL_HWREV_8169S, "RTL8169S" }, 259 260 { 0, NULL } 261 }; 262 263 264 static inline void 265 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 266 { 267 d->rl_bufaddr_lo = htole32((uint32_t)addr); 268 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 269 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 270 else 271 d->rl_bufaddr_hi = 0; 272 } 273 274 /* 275 * Send a read command and address to the EEPROM, check for ACK. 276 */ 277 void 278 re_eeprom_putbyte(struct rl_softc *sc, int addr) 279 { 280 int d, i; 281 282 d = addr | (RL_9346_READ << sc->rl_eewidth); 283 284 /* 285 * Feed in each bit and strobe the clock. 286 */ 287 288 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 289 if (d & i) 290 EE_SET(RL_EE_DATAIN); 291 else 292 EE_CLR(RL_EE_DATAIN); 293 DELAY(100); 294 EE_SET(RL_EE_CLK); 295 DELAY(150); 296 EE_CLR(RL_EE_CLK); 297 DELAY(100); 298 } 299 } 300 301 /* 302 * Read a word of data stored in the EEPROM at address 'addr.' 303 */ 304 void 305 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 306 { 307 int i; 308 u_int16_t word = 0; 309 310 /* 311 * Send address of word we want to read. 312 */ 313 re_eeprom_putbyte(sc, addr); 314 315 /* 316 * Start reading bits from EEPROM. 317 */ 318 for (i = 0x8000; i; i >>= 1) { 319 EE_SET(RL_EE_CLK); 320 DELAY(100); 321 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 322 word |= i; 323 EE_CLR(RL_EE_CLK); 324 DELAY(100); 325 } 326 327 *dest = word; 328 } 329 330 /* 331 * Read a sequence of words from the EEPROM. 332 */ 333 void 334 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 335 { 336 int i; 337 u_int16_t word = 0, *ptr; 338 339 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 340 341 DELAY(100); 342 343 for (i = 0; i < cnt; i++) { 344 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 345 re_eeprom_getword(sc, off + i, &word); 346 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 347 ptr = (u_int16_t *)(dest + (i * 2)); 348 *ptr = word; 349 } 350 351 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 352 } 353 354 int 355 re_gmii_readreg(struct device *self, int phy, int reg) 356 { 357 struct rl_softc *sc = (struct rl_softc *)self; 358 u_int32_t rval; 359 int i; 360 361 if (phy != 7) 362 return (0); 363 364 /* Let the rgephy driver read the GMEDIASTAT register */ 365 366 if (reg == RL_GMEDIASTAT) { 367 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 368 return (rval); 369 } 370 371 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 372 373 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 374 rval = CSR_READ_4(sc, RL_PHYAR); 375 if (rval & RL_PHYAR_BUSY) 376 break; 377 DELAY(25); 378 } 379 380 if (i == RL_PHY_TIMEOUT) { 381 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 382 return (0); 383 } 384 385 DELAY(20); 386 387 return (rval & RL_PHYAR_PHYDATA); 388 } 389 390 void 391 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 392 { 393 struct rl_softc *sc = (struct rl_softc *)dev; 394 u_int32_t rval; 395 int i; 396 397 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 398 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 399 400 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 401 rval = CSR_READ_4(sc, RL_PHYAR); 402 if (!(rval & RL_PHYAR_BUSY)) 403 break; 404 DELAY(25); 405 } 406 407 if (i == RL_PHY_TIMEOUT) 408 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 409 410 DELAY(20); 411 } 412 413 int 414 re_miibus_readreg(struct device *dev, int phy, int reg) 415 { 416 struct rl_softc *sc = (struct rl_softc *)dev; 417 u_int16_t rval = 0; 418 u_int16_t re8139_reg = 0; 419 int s; 420 421 s = splnet(); 422 423 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 424 rval = re_gmii_readreg(dev, phy, reg); 425 splx(s); 426 return (rval); 427 } 428 429 /* Pretend the internal PHY is only at address 0 */ 430 if (phy) { 431 splx(s); 432 return (0); 433 } 434 switch(reg) { 435 case MII_BMCR: 436 re8139_reg = RL_BMCR; 437 break; 438 case MII_BMSR: 439 re8139_reg = RL_BMSR; 440 break; 441 case MII_ANAR: 442 re8139_reg = RL_ANAR; 443 break; 444 case MII_ANER: 445 re8139_reg = RL_ANER; 446 break; 447 case MII_ANLPAR: 448 re8139_reg = RL_LPAR; 449 break; 450 case MII_PHYIDR1: 451 case MII_PHYIDR2: 452 splx(s); 453 return (0); 454 /* 455 * Allow the rlphy driver to read the media status 456 * register. If we have a link partner which does not 457 * support NWAY, this is the register which will tell 458 * us the results of parallel detection. 459 */ 460 case RL_MEDIASTAT: 461 rval = CSR_READ_1(sc, RL_MEDIASTAT); 462 splx(s); 463 return (rval); 464 default: 465 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 466 splx(s); 467 return (0); 468 } 469 rval = CSR_READ_2(sc, re8139_reg); 470 if (re8139_reg == RL_BMCR) { 471 /* 8139C+ has different bit layout. */ 472 rval &= ~(BMCR_LOOP | BMCR_ISO); 473 } 474 splx(s); 475 return (rval); 476 } 477 478 void 479 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 480 { 481 struct rl_softc *sc = (struct rl_softc *)dev; 482 u_int16_t re8139_reg = 0; 483 int s; 484 485 s = splnet(); 486 487 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 488 re_gmii_writereg(dev, phy, reg, data); 489 splx(s); 490 return; 491 } 492 493 /* Pretend the internal PHY is only at address 0 */ 494 if (phy) { 495 splx(s); 496 return; 497 } 498 switch(reg) { 499 case MII_BMCR: 500 re8139_reg = RL_BMCR; 501 /* 8139C+ has different bit layout. */ 502 data &= ~(BMCR_LOOP | BMCR_ISO); 503 break; 504 case MII_BMSR: 505 re8139_reg = RL_BMSR; 506 break; 507 case MII_ANAR: 508 re8139_reg = RL_ANAR; 509 break; 510 case MII_ANER: 511 re8139_reg = RL_ANER; 512 break; 513 case MII_ANLPAR: 514 re8139_reg = RL_LPAR; 515 break; 516 case MII_PHYIDR1: 517 case MII_PHYIDR2: 518 splx(s); 519 return; 520 break; 521 default: 522 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 523 splx(s); 524 return; 525 } 526 CSR_WRITE_2(sc, re8139_reg, data); 527 splx(s); 528 } 529 530 void 531 re_miibus_statchg(struct device *dev) 532 { 533 struct rl_softc *sc = (struct rl_softc *)dev; 534 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 535 struct mii_data *mii = &sc->sc_mii; 536 537 if ((ifp->if_flags & IFF_RUNNING) == 0) 538 return; 539 540 sc->rl_flags &= ~RL_FLAG_LINK; 541 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 542 (IFM_ACTIVE | IFM_AVALID)) { 543 switch (IFM_SUBTYPE(mii->mii_media_active)) { 544 case IFM_10_T: 545 case IFM_100_TX: 546 sc->rl_flags |= RL_FLAG_LINK; 547 break; 548 case IFM_1000_T: 549 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 550 break; 551 sc->rl_flags |= RL_FLAG_LINK; 552 break; 553 default: 554 break; 555 } 556 } 557 558 /* 559 * Realtek controllers do not provide an interface to 560 * Tx/Rx MACs for resolved speed, duplex and flow-control 561 * parameters. 562 */ 563 } 564 565 void 566 re_iff(struct rl_softc *sc) 567 { 568 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 569 int h = 0; 570 u_int32_t hashes[2]; 571 u_int32_t rxfilt; 572 struct arpcom *ac = &sc->sc_arpcom; 573 struct ether_multi *enm; 574 struct ether_multistep step; 575 576 rxfilt = CSR_READ_4(sc, RL_RXCFG); 577 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 578 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 579 ifp->if_flags &= ~IFF_ALLMULTI; 580 581 /* 582 * Always accept frames destined to our station address. 583 * Always accept broadcast frames. 584 */ 585 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 586 587 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 588 ifp->if_flags |= IFF_ALLMULTI; 589 rxfilt |= RL_RXCFG_RX_MULTI; 590 if (ifp->if_flags & IFF_PROMISC) 591 rxfilt |= RL_RXCFG_RX_ALLPHYS; 592 hashes[0] = hashes[1] = 0xFFFFFFFF; 593 } else { 594 rxfilt |= RL_RXCFG_RX_MULTI; 595 /* Program new filter. */ 596 bzero(hashes, sizeof(hashes)); 597 598 ETHER_FIRST_MULTI(step, ac, enm); 599 while (enm != NULL) { 600 h = ether_crc32_be(enm->enm_addrlo, 601 ETHER_ADDR_LEN) >> 26; 602 603 if (h < 32) 604 hashes[0] |= (1 << h); 605 else 606 hashes[1] |= (1 << (h - 32)); 607 608 ETHER_NEXT_MULTI(step, enm); 609 } 610 } 611 612 /* 613 * For some unfathomable reason, Realtek decided to reverse 614 * the order of the multicast hash registers in the PCI Express 615 * parts. This means we have to write the hash pattern in reverse 616 * order for those devices. 617 */ 618 if (sc->rl_flags & RL_FLAG_PCIE) { 619 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 620 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 621 } else { 622 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 623 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 624 } 625 626 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 627 } 628 629 void 630 re_reset(struct rl_softc *sc) 631 { 632 int i; 633 634 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 635 636 for (i = 0; i < RL_TIMEOUT; i++) { 637 DELAY(10); 638 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 639 break; 640 } 641 if (i == RL_TIMEOUT) 642 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 643 644 if (sc->rl_flags & RL_FLAG_MACRESET) 645 CSR_WRITE_1(sc, RL_LDPS, 1); 646 } 647 648 /* 649 * Attach the interface. Allocate softc structures, do ifmedia 650 * setup and ethernet/BPF attach. 651 */ 652 int 653 re_attach(struct rl_softc *sc, const char *intrstr) 654 { 655 u_char eaddr[ETHER_ADDR_LEN]; 656 u_int16_t as[ETHER_ADDR_LEN / 2]; 657 struct ifnet *ifp; 658 u_int16_t re_did = 0; 659 int error = 0, i; 660 const struct re_revision *rr; 661 const char *re_name = NULL; 662 663 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 664 665 switch (sc->sc_hwrev) { 666 case RL_HWREV_8139CPLUS: 667 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 668 sc->rl_max_mtu = RL_MTU; 669 break; 670 case RL_HWREV_8100E: 671 case RL_HWREV_8100E_SPIN2: 672 case RL_HWREV_8101E: 673 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 674 sc->rl_max_mtu = RL_MTU; 675 break; 676 case RL_HWREV_8103E: 677 sc->rl_flags |= RL_FLAG_MACSLEEP; 678 /* FALLTHROUGH */ 679 case RL_HWREV_8102E: 680 case RL_HWREV_8102EL: 681 case RL_HWREV_8102EL_SPIN1: 682 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 683 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | 684 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 685 sc->rl_max_mtu = RL_MTU; 686 break; 687 case RL_HWREV_8401E: 688 case RL_HWREV_8105E: 689 case RL_HWREV_8105E_SPIN1: 690 case RL_HWREV_8106E: 691 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 692 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 693 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 694 sc->rl_max_mtu = RL_MTU; 695 break; 696 case RL_HWREV_8402: 697 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 698 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 699 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 700 RL_FLAG_CMDSTOP_WAIT_TXQ; 701 sc->rl_max_mtu = RL_MTU; 702 break; 703 case RL_HWREV_8168B_SPIN1: 704 case RL_HWREV_8168B_SPIN2: 705 sc->rl_flags |= RL_FLAG_WOLRXENB; 706 /* FALLTHROUGH */ 707 case RL_HWREV_8168B_SPIN3: 708 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 709 sc->rl_max_mtu = RL_MTU; 710 break; 711 case RL_HWREV_8168C_SPIN2: 712 sc->rl_flags |= RL_FLAG_MACSLEEP; 713 /* FALLTHROUGH */ 714 case RL_HWREV_8168C: 715 case RL_HWREV_8168CP: 716 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 717 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 718 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 719 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 720 break; 721 case RL_HWREV_8168D: 722 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 723 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 724 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 725 RL_FLAG_WOL_MANLINK; 726 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 727 break; 728 case RL_HWREV_8168DP: 729 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 730 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 731 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 732 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 733 break; 734 case RL_HWREV_8168E: 735 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 736 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 737 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 738 RL_FLAG_WOL_MANLINK; 739 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 740 break; 741 case RL_HWREV_8168E_VL: 742 sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR | 743 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 744 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 745 RL_FLAG_WOL_MANLINK; 746 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 747 break; 748 case RL_HWREV_8168F: 749 sc->rl_flags |= RL_FLAG_EARLYOFF; 750 /* FALLTHROUGH */ 751 case RL_HWREV_8411: 752 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 753 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 754 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 755 RL_FLAG_WOL_MANLINK; 756 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 757 break; 758 case RL_HWREV_8168EP: 759 case RL_HWREV_8168G: 760 case RL_HWREV_8168GU: 761 case RL_HWREV_8168H: 762 case RL_HWREV_8411B: 763 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) { 764 /* RTL8106EUS */ 765 sc->rl_flags |= RL_FLAG_FASTETHER; 766 sc->rl_max_mtu = RL_MTU; 767 } else { 768 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 769 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 770 } 771 772 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 773 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 774 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 775 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 776 break; 777 case RL_HWREV_8169_8110SB: 778 case RL_HWREV_8169_8110SBL: 779 case RL_HWREV_8169_8110SCd: 780 case RL_HWREV_8169_8110SCe: 781 sc->rl_flags |= RL_FLAG_PHYWAKE; 782 /* FALLTHROUGH */ 783 case RL_HWREV_8169: 784 case RL_HWREV_8169S: 785 case RL_HWREV_8110S: 786 sc->rl_flags |= RL_FLAG_MACRESET; 787 sc->rl_max_mtu = RL_JUMBO_MTU_7K; 788 break; 789 default: 790 break; 791 } 792 793 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 794 sc->rl_cfg0 = RL_8139_CFG0; 795 sc->rl_cfg1 = RL_8139_CFG1; 796 sc->rl_cfg2 = 0; 797 sc->rl_cfg3 = RL_8139_CFG3; 798 sc->rl_cfg4 = RL_8139_CFG4; 799 sc->rl_cfg5 = RL_8139_CFG5; 800 } else { 801 sc->rl_cfg0 = RL_CFG0; 802 sc->rl_cfg1 = RL_CFG1; 803 sc->rl_cfg2 = RL_CFG2; 804 sc->rl_cfg3 = RL_CFG3; 805 sc->rl_cfg4 = RL_CFG4; 806 sc->rl_cfg5 = RL_CFG5; 807 } 808 809 /* Reset the adapter. */ 810 re_reset(sc); 811 812 sc->rl_tx_time = 5; /* 125us */ 813 sc->rl_rx_time = 2; /* 50us */ 814 if (sc->rl_flags & RL_FLAG_PCIE) 815 sc->rl_sim_time = 75; /* 75us */ 816 else 817 sc->rl_sim_time = 125; /* 125us */ 818 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 819 820 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 821 sc->rl_bus_speed = 33; /* XXX */ 822 else if (sc->rl_flags & RL_FLAG_PCIE) 823 sc->rl_bus_speed = 125; 824 else { 825 u_int8_t cfg2; 826 827 cfg2 = CSR_READ_1(sc, sc->rl_cfg2); 828 switch (cfg2 & RL_CFG2_PCI_MASK) { 829 case RL_CFG2_PCI_33MHZ: 830 sc->rl_bus_speed = 33; 831 break; 832 case RL_CFG2_PCI_66MHZ: 833 sc->rl_bus_speed = 66; 834 break; 835 default: 836 printf("%s: unknown bus speed, assume 33MHz\n", 837 sc->sc_dev.dv_xname); 838 sc->rl_bus_speed = 33; 839 break; 840 } 841 842 if (cfg2 & RL_CFG2_PCI_64BIT) 843 sc->rl_flags |= RL_FLAG_PCI64; 844 } 845 846 re_config_imtype(sc, sc->rl_imtype); 847 848 if (sc->rl_flags & RL_FLAG_PAR) { 849 /* 850 * XXX Should have a better way to extract station 851 * address from EEPROM. 852 */ 853 for (i = 0; i < ETHER_ADDR_LEN; i++) 854 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 855 } else { 856 sc->rl_eewidth = RL_9356_ADDR_LEN; 857 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 858 if (re_did != 0x8129) 859 sc->rl_eewidth = RL_9346_ADDR_LEN; 860 861 /* 862 * Get station address from the EEPROM. 863 */ 864 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 865 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 866 as[i] = letoh16(as[i]); 867 bcopy(as, eaddr, ETHER_ADDR_LEN); 868 } 869 870 /* 871 * Set RX length mask, TX poll request register 872 * and descriptor count. 873 */ 874 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 875 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 876 sc->rl_txstart = RL_TXSTART; 877 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 878 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 879 sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS; 880 } else { 881 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 882 sc->rl_txstart = RL_GTXSTART; 883 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 884 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 885 sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS; 886 } 887 888 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 889 890 for (rr = re_revisions; rr->re_name != NULL; rr++) { 891 if (rr->re_chipid == sc->sc_hwrev) 892 re_name = rr->re_name; 893 } 894 895 if (re_name == NULL) 896 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 897 else 898 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 899 900 printf(", %s, address %s\n", intrstr, 901 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 902 903 /* Allocate DMA'able memory for the TX ring */ 904 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 905 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 906 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 907 BUS_DMA_ZERO)) != 0) { 908 printf("%s: can't allocate tx listseg, error = %d\n", 909 sc->sc_dev.dv_xname, error); 910 goto fail_0; 911 } 912 913 /* Load the map for the TX ring. */ 914 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 915 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 916 (caddr_t *)&sc->rl_ldata.rl_tx_list, 917 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 918 printf("%s: can't map tx list, error = %d\n", 919 sc->sc_dev.dv_xname, error); 920 goto fail_1; 921 } 922 923 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 924 RL_TX_LIST_SZ(sc), 0, 0, 925 &sc->rl_ldata.rl_tx_list_map)) != 0) { 926 printf("%s: can't create tx list map, error = %d\n", 927 sc->sc_dev.dv_xname, error); 928 goto fail_2; 929 } 930 931 if ((error = bus_dmamap_load(sc->sc_dmat, 932 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 933 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 934 printf("%s: can't load tx list, error = %d\n", 935 sc->sc_dev.dv_xname, error); 936 goto fail_3; 937 } 938 939 /* Create DMA maps for TX buffers */ 940 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 941 error = bus_dmamap_create(sc->sc_dmat, 942 RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs, 943 RL_JUMBO_FRAMELEN, 0, 0, 944 &sc->rl_ldata.rl_txq[i].txq_dmamap); 945 if (error) { 946 printf("%s: can't create DMA map for TX\n", 947 sc->sc_dev.dv_xname); 948 goto fail_4; 949 } 950 } 951 952 /* Allocate DMA'able memory for the RX ring */ 953 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 954 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 955 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 956 BUS_DMA_ZERO)) != 0) { 957 printf("%s: can't allocate rx listnseg, error = %d\n", 958 sc->sc_dev.dv_xname, error); 959 goto fail_4; 960 } 961 962 /* Load the map for the RX ring. */ 963 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 964 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc), 965 (caddr_t *)&sc->rl_ldata.rl_rx_list, 966 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 967 printf("%s: can't map rx list, error = %d\n", 968 sc->sc_dev.dv_xname, error); 969 goto fail_5; 970 971 } 972 973 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1, 974 RL_RX_DMAMEM_SZ(sc), 0, 0, 975 &sc->rl_ldata.rl_rx_list_map)) != 0) { 976 printf("%s: can't create rx list map, error = %d\n", 977 sc->sc_dev.dv_xname, error); 978 goto fail_6; 979 } 980 981 if ((error = bus_dmamap_load(sc->sc_dmat, 982 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 983 RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 984 printf("%s: can't load rx list, error = %d\n", 985 sc->sc_dev.dv_xname, error); 986 goto fail_7; 987 } 988 989 /* Create DMA maps for RX buffers */ 990 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 991 error = bus_dmamap_create(sc->sc_dmat, 992 RL_FRAMELEN(sc->rl_max_mtu), 1, 993 RL_FRAMELEN(sc->rl_max_mtu), 0, 0, 994 &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 995 if (error) { 996 printf("%s: can't create DMA map for RX\n", 997 sc->sc_dev.dv_xname); 998 goto fail_8; 999 } 1000 } 1001 1002 ifp = &sc->sc_arpcom.ac_if; 1003 ifp->if_softc = sc; 1004 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1005 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1006 ifp->if_xflags = IFXF_MPSAFE; 1007 ifp->if_ioctl = re_ioctl; 1008 ifp->if_qstart = re_start; 1009 ifp->if_watchdog = re_watchdog; 1010 ifp->if_hardmtu = sc->rl_max_mtu; 1011 ifq_set_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt); 1012 1013 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 1014 IFCAP_CSUM_UDPv4; 1015 1016 /* 1017 * RTL8168/8111C generates wrong IP checksummed frame if the 1018 * packet has IP options so disable TX IP checksum offloading. 1019 */ 1020 switch (sc->sc_hwrev) { 1021 case RL_HWREV_8168C: 1022 case RL_HWREV_8168C_SPIN2: 1023 case RL_HWREV_8168CP: 1024 break; 1025 default: 1026 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1027 } 1028 1029 #if NVLAN > 0 1030 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1031 #endif 1032 1033 #ifndef SMALL_KERNEL 1034 ifp->if_capabilities |= IFCAP_WOL; 1035 ifp->if_wol = re_wol; 1036 re_wol(ifp, 0); 1037 #endif 1038 timeout_set(&sc->timer_handle, re_tick, sc); 1039 task_set(&sc->rl_start, re_txstart, sc); 1040 1041 /* Take PHY out of power down mode. */ 1042 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1043 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1044 if (sc->sc_hwrev == RL_HWREV_8401E) 1045 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1046 } 1047 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1048 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1049 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1050 } 1051 1052 /* Do MII setup */ 1053 sc->sc_mii.mii_ifp = ifp; 1054 sc->sc_mii.mii_readreg = re_miibus_readreg; 1055 sc->sc_mii.mii_writereg = re_miibus_writereg; 1056 sc->sc_mii.mii_statchg = re_miibus_statchg; 1057 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1058 re_ifmedia_sts); 1059 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1060 MII_OFFSET_ANY, MIIF_DOPAUSE); 1061 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1062 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1063 ifmedia_add(&sc->sc_mii.mii_media, 1064 IFM_ETHER|IFM_NONE, 0, NULL); 1065 ifmedia_set(&sc->sc_mii.mii_media, 1066 IFM_ETHER|IFM_NONE); 1067 } else 1068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1069 1070 /* 1071 * Call MI attach routine. 1072 */ 1073 if_attach(ifp); 1074 ether_ifattach(ifp); 1075 1076 return (0); 1077 1078 fail_8: 1079 /* Destroy DMA maps for RX buffers. */ 1080 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1081 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1082 bus_dmamap_destroy(sc->sc_dmat, 1083 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1084 } 1085 1086 /* Free DMA'able memory for the RX ring. */ 1087 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1088 fail_7: 1089 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1090 fail_6: 1091 bus_dmamem_unmap(sc->sc_dmat, 1092 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc)); 1093 fail_5: 1094 bus_dmamem_free(sc->sc_dmat, 1095 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1096 1097 fail_4: 1098 /* Destroy DMA maps for TX buffers. */ 1099 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1100 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1101 bus_dmamap_destroy(sc->sc_dmat, 1102 sc->rl_ldata.rl_txq[i].txq_dmamap); 1103 } 1104 1105 /* Free DMA'able memory for the TX ring. */ 1106 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1107 fail_3: 1108 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1109 fail_2: 1110 bus_dmamem_unmap(sc->sc_dmat, 1111 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1112 fail_1: 1113 bus_dmamem_free(sc->sc_dmat, 1114 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1115 fail_0: 1116 return (1); 1117 } 1118 1119 1120 int 1121 re_newbuf(struct rl_softc *sc) 1122 { 1123 struct mbuf *m; 1124 bus_dmamap_t map; 1125 struct rl_desc *d; 1126 struct rl_rxsoft *rxs; 1127 u_int32_t cmdstat; 1128 int error, idx; 1129 1130 m = MCLGETI(NULL, M_DONTWAIT, NULL, RL_FRAMELEN(sc->rl_max_mtu)); 1131 if (!m) 1132 return (ENOBUFS); 1133 1134 /* 1135 * Initialize mbuf length fields and fixup 1136 * alignment so that the frame payload is 1137 * longword aligned on strict alignment archs. 1138 */ 1139 m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu); 1140 m->m_data += RE_ETHER_ALIGN; 1141 1142 idx = sc->rl_ldata.rl_rx_prodidx; 1143 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1144 map = rxs->rxs_dmamap; 1145 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1146 BUS_DMA_READ|BUS_DMA_NOWAIT); 1147 if (error) { 1148 m_freem(m); 1149 return (ENOBUFS); 1150 } 1151 1152 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1153 BUS_DMASYNC_PREREAD); 1154 1155 d = &sc->rl_ldata.rl_rx_list[idx]; 1156 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1157 cmdstat = letoh32(d->rl_cmdstat); 1158 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1159 if (cmdstat & RL_RDESC_STAT_OWN) { 1160 printf("%s: tried to map busy RX descriptor\n", 1161 sc->sc_dev.dv_xname); 1162 m_freem(m); 1163 return (ENOBUFS); 1164 } 1165 1166 rxs->rxs_mbuf = m; 1167 1168 d->rl_vlanctl = 0; 1169 cmdstat = map->dm_segs[0].ds_len; 1170 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1171 cmdstat |= RL_RDESC_CMD_EOR; 1172 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1173 d->rl_cmdstat = htole32(cmdstat); 1174 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1175 cmdstat |= RL_RDESC_CMD_OWN; 1176 d->rl_cmdstat = htole32(cmdstat); 1177 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1178 1179 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1180 1181 return (0); 1182 } 1183 1184 1185 int 1186 re_tx_list_init(struct rl_softc *sc) 1187 { 1188 int i; 1189 1190 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1191 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1192 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1193 } 1194 1195 bus_dmamap_sync(sc->sc_dmat, 1196 sc->rl_ldata.rl_tx_list_map, 0, 1197 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1198 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1199 sc->rl_ldata.rl_txq_prodidx = 0; 1200 sc->rl_ldata.rl_txq_considx = 0; 1201 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1202 sc->rl_ldata.rl_tx_nextfree = 0; 1203 1204 return (0); 1205 } 1206 1207 int 1208 re_rx_list_init(struct rl_softc *sc) 1209 { 1210 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc)); 1211 1212 sc->rl_ldata.rl_rx_prodidx = 0; 1213 sc->rl_ldata.rl_rx_considx = 0; 1214 sc->rl_head = sc->rl_tail = NULL; 1215 1216 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, 1217 sc->rl_ldata.rl_rx_desc_cnt - 1); 1218 re_rx_list_fill(sc); 1219 1220 return (0); 1221 } 1222 1223 void 1224 re_rx_list_fill(struct rl_softc *sc) 1225 { 1226 u_int slots; 1227 1228 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, 1229 sc->rl_ldata.rl_rx_desc_cnt); 1230 slots > 0; slots--) { 1231 if (re_newbuf(sc) == ENOBUFS) 1232 break; 1233 } 1234 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1235 } 1236 1237 /* 1238 * RX handler for C+ and 8169. For the gigE chips, we support 1239 * the reception of jumbo frames that have been fragmented 1240 * across multiple 2K mbuf cluster buffers. 1241 */ 1242 int 1243 re_rxeof(struct rl_softc *sc) 1244 { 1245 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1246 struct mbuf *m; 1247 struct ifnet *ifp; 1248 int i, total_len, rx = 0; 1249 struct rl_desc *cur_rx; 1250 struct rl_rxsoft *rxs; 1251 u_int32_t rxstat, rxvlan; 1252 1253 ifp = &sc->sc_arpcom.ac_if; 1254 1255 for (i = sc->rl_ldata.rl_rx_considx; 1256 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1257 i = RL_NEXT_RX_DESC(sc, i)) { 1258 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1259 RL_RXDESCSYNC(sc, i, 1260 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1261 rxstat = letoh32(cur_rx->rl_cmdstat); 1262 rxvlan = letoh32(cur_rx->rl_vlanctl); 1263 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1264 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1265 break; 1266 total_len = rxstat & sc->rl_rxlenmask; 1267 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1268 m = rxs->rxs_mbuf; 1269 rxs->rxs_mbuf = NULL; 1270 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1271 rx = 1; 1272 1273 /* Invalidate the RX mbuf and unload its map */ 1274 1275 bus_dmamap_sync(sc->sc_dmat, 1276 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1277 BUS_DMASYNC_POSTREAD); 1278 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1279 1280 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 1281 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 1282 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 1283 continue; 1284 } else if (!(rxstat & RL_RDESC_STAT_EOF)) { 1285 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1286 if (sc->rl_head == NULL) 1287 sc->rl_head = sc->rl_tail = m; 1288 else { 1289 m->m_flags &= ~M_PKTHDR; 1290 sc->rl_tail->m_next = m; 1291 sc->rl_tail = m; 1292 } 1293 continue; 1294 } 1295 1296 /* 1297 * NOTE: for the 8139C+, the frame length field 1298 * is always 12 bits in size, but for the gigE chips, 1299 * it is 13 bits (since the max RX frame length is 16K). 1300 * Unfortunately, all 32 bits in the status word 1301 * were already used, so to make room for the extra 1302 * length bit, Realtek took out the 'frame alignment 1303 * error' bit and shifted the other status bits 1304 * over one slot. The OWN, EOR, FS and LS bits are 1305 * still in the same places. We have already extracted 1306 * the frame length and checked the OWN bit, so rather 1307 * than using an alternate bit mapping, we shift the 1308 * status bits one space to the right so we can evaluate 1309 * them using the 8169 status as though it was in the 1310 * same format as that of the 8139C+. 1311 */ 1312 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1313 rxstat >>= 1; 1314 1315 /* 1316 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1317 * set, but if CRC is clear, it will still be a valid frame. 1318 */ 1319 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 && 1320 !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1321 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) { 1322 ifp->if_ierrors++; 1323 /* 1324 * If this is part of a multi-fragment packet, 1325 * discard all the pieces. 1326 */ 1327 if (sc->rl_head != NULL) { 1328 m_freem(sc->rl_head); 1329 sc->rl_head = sc->rl_tail = NULL; 1330 } 1331 continue; 1332 } 1333 1334 if (sc->rl_head != NULL) { 1335 m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu); 1336 if (m->m_len == 0) 1337 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1338 /* 1339 * Special case: if there's 4 bytes or less 1340 * in this buffer, the mbuf can be discarded: 1341 * the last 4 bytes is the CRC, which we don't 1342 * care about anyway. 1343 */ 1344 if (m->m_len <= ETHER_CRC_LEN) { 1345 sc->rl_tail->m_len -= 1346 (ETHER_CRC_LEN - m->m_len); 1347 m_freem(m); 1348 } else { 1349 m->m_len -= ETHER_CRC_LEN; 1350 m->m_flags &= ~M_PKTHDR; 1351 sc->rl_tail->m_next = m; 1352 } 1353 m = sc->rl_head; 1354 sc->rl_head = sc->rl_tail = NULL; 1355 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1356 } else 1357 m->m_pkthdr.len = m->m_len = 1358 (total_len - ETHER_CRC_LEN); 1359 1360 /* Do RX checksumming */ 1361 1362 if (sc->rl_flags & RL_FLAG_DESCV2) { 1363 /* Check IP header checksum */ 1364 if ((rxvlan & RL_RDESC_IPV4) && 1365 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1366 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1367 1368 /* Check TCP/UDP checksum */ 1369 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1370 (((rxstat & RL_RDESC_STAT_TCP) && 1371 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1372 ((rxstat & RL_RDESC_STAT_UDP) && 1373 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1374 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1375 M_UDP_CSUM_IN_OK; 1376 } else { 1377 /* Check IP header checksum */ 1378 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1379 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1380 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1381 1382 /* Check TCP/UDP checksum */ 1383 if ((RL_TCPPKT(rxstat) && 1384 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1385 (RL_UDPPKT(rxstat) && 1386 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1387 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1388 M_UDP_CSUM_IN_OK; 1389 } 1390 #if NVLAN > 0 1391 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1392 m->m_pkthdr.ether_vtag = 1393 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1394 m->m_flags |= M_VLANTAG; 1395 } 1396 #endif 1397 1398 ml_enqueue(&ml, m); 1399 } 1400 1401 if (ifiq_input(&ifp->if_rcv, &ml)) 1402 if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring); 1403 1404 sc->rl_ldata.rl_rx_considx = i; 1405 re_rx_list_fill(sc); 1406 1407 1408 return (rx); 1409 } 1410 1411 int 1412 re_txeof(struct rl_softc *sc) 1413 { 1414 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1415 struct rl_txq *txq; 1416 uint32_t txstat; 1417 unsigned int prod, cons; 1418 unsigned int idx; 1419 int free = 0; 1420 1421 prod = sc->rl_ldata.rl_txq_prodidx; 1422 cons = sc->rl_ldata.rl_txq_considx; 1423 1424 while (prod != cons) { 1425 txq = &sc->rl_ldata.rl_txq[cons]; 1426 1427 idx = txq->txq_descidx; 1428 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD); 1429 txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1430 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1431 if (ISSET(txstat, RL_TDESC_CMD_OWN)) { 1432 free = 2; 1433 break; 1434 } 1435 1436 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1437 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1438 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1439 m_freem(txq->txq_mbuf); 1440 txq->txq_mbuf = NULL; 1441 1442 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1443 ifp->if_collisions++; 1444 if (txstat & RL_TDESC_STAT_TXERRSUM) 1445 ifp->if_oerrors++; 1446 1447 cons = RL_NEXT_TX_DESC(sc, idx); 1448 free = 1; 1449 } 1450 1451 if (free == 0) 1452 return (0); 1453 1454 sc->rl_ldata.rl_txq_considx = cons; 1455 1456 /* 1457 * Some chips will ignore a second TX request issued while an 1458 * existing transmission is in progress. If the transmitter goes 1459 * idle but there are still packets waiting to be sent, we need 1460 * to restart the channel here to flush them out. This only 1461 * seems to be required with the PCIe devices. 1462 */ 1463 if (ifq_is_oactive(&ifp->if_snd)) 1464 ifq_restart(&ifp->if_snd); 1465 else if (free == 2) 1466 ifq_serialize(&ifp->if_snd, &sc->rl_start); 1467 else 1468 ifp->if_timer = 0; 1469 1470 return (1); 1471 } 1472 1473 void 1474 re_tick(void *xsc) 1475 { 1476 struct rl_softc *sc = xsc; 1477 struct mii_data *mii; 1478 int s; 1479 1480 mii = &sc->sc_mii; 1481 1482 s = splnet(); 1483 1484 mii_tick(mii); 1485 1486 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1487 re_miibus_statchg(&sc->sc_dev); 1488 1489 splx(s); 1490 1491 timeout_add_sec(&sc->timer_handle, 1); 1492 } 1493 1494 int 1495 re_intr(void *arg) 1496 { 1497 struct rl_softc *sc = arg; 1498 struct ifnet *ifp; 1499 u_int16_t status; 1500 int claimed = 0, rx, tx; 1501 1502 ifp = &sc->sc_arpcom.ac_if; 1503 1504 if (!(ifp->if_flags & IFF_RUNNING)) 1505 return (0); 1506 1507 /* Disable interrupts. */ 1508 CSR_WRITE_2(sc, RL_IMR, 0); 1509 1510 rx = tx = 0; 1511 status = CSR_READ_2(sc, RL_ISR); 1512 /* If the card has gone away the read returns 0xffff. */ 1513 if (status == 0xffff) 1514 return (0); 1515 if (status) 1516 CSR_WRITE_2(sc, RL_ISR, status); 1517 1518 if (status & RL_ISR_TIMEOUT_EXPIRED) 1519 claimed = 1; 1520 1521 if (status & RL_INTRS_CPLUS) { 1522 if (status & 1523 (sc->rl_rx_ack | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW)) { 1524 rx |= re_rxeof(sc); 1525 claimed = 1; 1526 } 1527 1528 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1529 tx |= re_txeof(sc); 1530 claimed = 1; 1531 } 1532 1533 if (status & RL_ISR_SYSTEM_ERR) { 1534 KERNEL_LOCK(); 1535 re_init(ifp); 1536 KERNEL_UNLOCK(); 1537 claimed = 1; 1538 } 1539 } 1540 1541 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1542 if (sc->rl_timerintr) { 1543 if ((tx | rx) == 0) { 1544 /* 1545 * Nothing needs to be processed, fallback 1546 * to use TX/RX interrupts. 1547 */ 1548 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1549 1550 /* 1551 * Recollect, mainly to avoid the possible 1552 * race introduced by changing interrupt 1553 * masks. 1554 */ 1555 re_rxeof(sc); 1556 re_txeof(sc); 1557 } else 1558 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1559 } else if (tx | rx) { 1560 /* 1561 * Assume that using simulated interrupt moderation 1562 * (hardware timer based) could reduce the interrupt 1563 * rate. 1564 */ 1565 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1566 } 1567 } 1568 1569 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1570 1571 return (claimed); 1572 } 1573 1574 int 1575 re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m) 1576 { 1577 struct rl_txq *txq; 1578 bus_dmamap_t map; 1579 int error, seg, nsegs, curidx, lastidx, pad; 1580 int off; 1581 struct ip *ip; 1582 struct rl_desc *d; 1583 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1584 1585 /* 1586 * Set up checksum offload. Note: checksum offload bits must 1587 * appear in all descriptors of a multi-descriptor transmit 1588 * attempt. This is according to testing done with an 8169 1589 * chip. This is a requirement. 1590 */ 1591 1592 /* 1593 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1594 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1595 * RL_TDESC_CMD_UDPCSUM does not take affect. 1596 */ 1597 1598 if ((sc->rl_flags & RL_FLAG_JUMBOV2) && 1599 m->m_pkthdr.len > RL_MTU && 1600 (m->m_pkthdr.csum_flags & 1601 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1602 struct mbuf mh, *mp; 1603 1604 mp = m_getptr(m, ETHER_HDR_LEN, &off); 1605 mh.m_flags = 0; 1606 mh.m_data = mtod(mp, caddr_t) + off; 1607 mh.m_next = mp->m_next; 1608 mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN; 1609 mh.m_len = mp->m_len - off; 1610 ip = (struct ip *)mh.m_data; 1611 1612 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1613 ip->ip_sum = in_cksum(&mh, sizeof(struct ip)); 1614 if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) 1615 in_delayed_cksum(&mh); 1616 1617 m->m_pkthdr.csum_flags &= 1618 ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT); 1619 } 1620 1621 if ((m->m_pkthdr.csum_flags & 1622 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1623 if (sc->rl_flags & RL_FLAG_DESCV2) { 1624 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1625 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1626 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1627 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1628 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1629 } else { 1630 csum_flags |= RL_TDESC_CMD_IPCSUM; 1631 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1632 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1633 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1634 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1635 } 1636 } 1637 1638 txq = &sc->rl_ldata.rl_txq[idx]; 1639 map = txq->txq_dmamap; 1640 1641 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1642 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1643 switch (error) { 1644 case 0: 1645 break; 1646 1647 case EFBIG: 1648 if (m_defrag(m, M_DONTWAIT) == 0 && 1649 bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1650 BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0) 1651 break; 1652 1653 /* FALLTHROUGH */ 1654 default: 1655 return (0); 1656 } 1657 1658 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1659 BUS_DMASYNC_PREWRITE); 1660 1661 nsegs = map->dm_nsegs; 1662 pad = 0; 1663 1664 /* 1665 * With some of the RealTek chips, using the checksum offload 1666 * support in conjunction with the autopadding feature results 1667 * in the transmission of corrupt frames. For example, if we 1668 * need to send a really small IP fragment that's less than 60 1669 * bytes in size, and IP header checksumming is enabled, the 1670 * resulting ethernet frame that appears on the wire will 1671 * have garbled payload. To work around this, if TX IP checksum 1672 * offload is enabled, we always manually pad short frames out 1673 * to the minimum ethernet frame size. 1674 */ 1675 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 1676 m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 1677 (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) { 1678 pad = 1; 1679 nsegs++; 1680 } 1681 1682 /* 1683 * Set up hardware VLAN tagging. Note: vlan tag info must 1684 * appear in all descriptors of a multi-descriptor 1685 * transmission attempt. 1686 */ 1687 #if NVLAN > 0 1688 if (m->m_flags & M_VLANTAG) 1689 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1690 RL_TDESC_VLANCTL_TAG; 1691 #endif 1692 1693 /* 1694 * Map the segment array into descriptors. Note that we set the 1695 * start-of-frame and end-of-frame markers for either TX or RX, but 1696 * they really only have meaning in the TX case. (In the RX case, 1697 * it's the chip that tells us where packets begin and end.) 1698 * We also keep track of the end of the ring and set the 1699 * end-of-ring bits as needed, and we set the ownership bits 1700 * in all except the very first descriptor. (The caller will 1701 * set this descriptor later when it start transmission or 1702 * reception.) 1703 */ 1704 curidx = idx; 1705 cmdstat = RL_TDESC_CMD_SOF; 1706 1707 for (seg = 0; seg < map->dm_nsegs; seg++) { 1708 d = &sc->rl_ldata.rl_tx_list[curidx]; 1709 1710 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1711 1712 d->rl_vlanctl = htole32(vlanctl); 1713 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1714 cmdstat |= csum_flags | map->dm_segs[seg].ds_len; 1715 1716 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1717 cmdstat |= RL_TDESC_CMD_EOR; 1718 1719 d->rl_cmdstat = htole32(cmdstat); 1720 1721 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1722 1723 lastidx = curidx; 1724 cmdstat = RL_TDESC_CMD_OWN; 1725 curidx = RL_NEXT_TX_DESC(sc, curidx); 1726 } 1727 1728 if (pad) { 1729 d = &sc->rl_ldata.rl_tx_list[curidx]; 1730 1731 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1732 1733 d->rl_vlanctl = htole32(vlanctl); 1734 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1735 cmdstat = csum_flags | 1736 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1737 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1738 1739 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1740 cmdstat |= RL_TDESC_CMD_EOR; 1741 1742 d->rl_cmdstat = htole32(cmdstat); 1743 1744 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1745 1746 lastidx = curidx; 1747 } 1748 1749 /* d is already pointing at the last descriptor */ 1750 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1751 1752 /* Transfer ownership of packet to the chip. */ 1753 d = &sc->rl_ldata.rl_tx_list[idx]; 1754 1755 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1756 d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); 1757 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1758 1759 /* update info of TX queue and descriptors */ 1760 txq->txq_mbuf = m; 1761 txq->txq_descidx = lastidx; 1762 1763 return (nsegs); 1764 } 1765 1766 void 1767 re_txstart(void *xsc) 1768 { 1769 struct rl_softc *sc = xsc; 1770 1771 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1772 } 1773 1774 /* 1775 * Main transmit routine for C+ and gigE NICs. 1776 */ 1777 1778 void 1779 re_start(struct ifqueue *ifq) 1780 { 1781 struct ifnet *ifp = ifq->ifq_if; 1782 struct rl_softc *sc = ifp->if_softc; 1783 struct mbuf *m; 1784 unsigned int idx; 1785 unsigned int free, used; 1786 int post = 0; 1787 1788 if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { 1789 ifq_purge(ifq); 1790 return; 1791 } 1792 1793 free = sc->rl_ldata.rl_txq_considx; 1794 idx = sc->rl_ldata.rl_txq_prodidx; 1795 if (free <= idx) 1796 free += sc->rl_ldata.rl_tx_desc_cnt; 1797 free -= idx; 1798 1799 for (;;) { 1800 if (sc->rl_ldata.rl_tx_ndescs >= free + 2) { 1801 ifq_set_oactive(ifq); 1802 break; 1803 } 1804 1805 m = ifq_dequeue(ifq); 1806 if (m == NULL) 1807 break; 1808 1809 used = re_encap(sc, idx, m); 1810 if (used == 0) { 1811 m_freem(m); 1812 continue; 1813 } 1814 1815 #if NBPFILTER > 0 1816 if (ifp->if_bpf) 1817 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1818 #endif 1819 1820 KASSERT(used <= free); 1821 free -= used; 1822 1823 idx += used; 1824 if (idx >= sc->rl_ldata.rl_tx_desc_cnt) 1825 idx -= sc->rl_ldata.rl_tx_desc_cnt; 1826 1827 post = 1; 1828 } 1829 1830 if (post == 0) 1831 return; 1832 1833 ifp->if_timer = 5; 1834 sc->rl_ldata.rl_txq_prodidx = idx; 1835 ifq_serialize(ifq, &sc->rl_start); 1836 } 1837 1838 int 1839 re_init(struct ifnet *ifp) 1840 { 1841 struct rl_softc *sc = ifp->if_softc; 1842 u_int16_t cfg; 1843 uint32_t rxcfg; 1844 int s; 1845 union { 1846 u_int32_t align_dummy; 1847 u_char eaddr[ETHER_ADDR_LEN]; 1848 } eaddr; 1849 1850 s = splnet(); 1851 1852 /* 1853 * Cancel pending I/O and free all RX/TX buffers. 1854 */ 1855 re_stop(ifp); 1856 1857 /* Put controller into known state. */ 1858 re_reset(sc); 1859 1860 /* 1861 * Enable C+ RX and TX mode, as well as VLAN stripping and 1862 * RX checksum offload. We must configure the C+ register 1863 * before all others. 1864 */ 1865 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1866 RL_CPLUSCMD_RXCSUM_ENB; 1867 1868 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1869 cfg |= RL_CPLUSCMD_VLANSTRIP; 1870 1871 if (sc->rl_flags & RL_FLAG_MACSTAT) 1872 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1873 else 1874 cfg |= RL_CPLUSCMD_RXENB; 1875 1876 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1877 1878 /* 1879 * Init our MAC address. Even though the chipset 1880 * documentation doesn't mention it, we need to enter "Config 1881 * register write enable" mode to modify the ID registers. 1882 */ 1883 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1884 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1885 CSR_WRITE_4(sc, RL_IDR4, 1886 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1887 CSR_WRITE_4(sc, RL_IDR0, 1888 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1889 /* 1890 * Default on PC Engines APU1 is to have all LEDs off unless 1891 * there is network activity. Override to provide a link status 1892 * LED. 1893 */ 1894 if (sc->sc_hwrev == RL_HWREV_8168E && 1895 hw_vendor != NULL && hw_prod != NULL && 1896 strcmp(hw_vendor, "PC Engines") == 0 && 1897 strcmp(hw_prod, "APU") == 0) { 1898 CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED); 1899 CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4); 1900 } 1901 /* 1902 * Protect config register again 1903 */ 1904 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1905 1906 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1907 re_set_jumbo(sc); 1908 1909 /* 1910 * For C+ mode, initialize the RX descriptors and mbufs. 1911 */ 1912 re_rx_list_init(sc); 1913 re_tx_list_init(sc); 1914 1915 /* 1916 * Load the addresses of the RX and TX lists into the chip. 1917 */ 1918 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1919 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1920 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1921 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1922 1923 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1924 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1925 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1926 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1927 1928 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1929 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1930 ~0x00080000); 1931 1932 /* 1933 * Set the initial TX and RX configuration. 1934 */ 1935 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1936 1937 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1938 1939 rxcfg = RL_RXCFG_CONFIG; 1940 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1941 rxcfg |= RL_RXCFG_EARLYOFF; 1942 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1943 rxcfg |= RL_RXCFG_EARLYOFFV2; 1944 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1945 1946 /* 1947 * Enable transmit and receive. 1948 */ 1949 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); 1950 1951 /* Program promiscuous mode and multicast filters. */ 1952 re_iff(sc); 1953 1954 /* 1955 * Enable interrupts. 1956 */ 1957 re_setup_intr(sc, 1, sc->rl_imtype); 1958 CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs); 1959 1960 /* Start RX/TX process. */ 1961 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1962 1963 /* 1964 * For 8169 gigE NICs, set the max allowed RX packet 1965 * size so we can receive jumbo frames. 1966 */ 1967 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 1968 if (sc->rl_flags & RL_FLAG_PCIE && 1969 (sc->rl_flags & RL_FLAG_JUMBOV2) == 0) 1970 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 1971 else 1972 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 1973 } 1974 1975 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 1976 RL_CFG1_DRVLOAD); 1977 1978 ifp->if_flags |= IFF_RUNNING; 1979 ifq_clr_oactive(&ifp->if_snd); 1980 1981 splx(s); 1982 1983 sc->rl_flags &= ~RL_FLAG_LINK; 1984 mii_mediachg(&sc->sc_mii); 1985 1986 timeout_add_sec(&sc->timer_handle, 1); 1987 1988 return (0); 1989 } 1990 1991 /* 1992 * Set media options. 1993 */ 1994 int 1995 re_ifmedia_upd(struct ifnet *ifp) 1996 { 1997 struct rl_softc *sc; 1998 1999 sc = ifp->if_softc; 2000 2001 return (mii_mediachg(&sc->sc_mii)); 2002 } 2003 2004 /* 2005 * Report current media status. 2006 */ 2007 void 2008 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2009 { 2010 struct rl_softc *sc; 2011 2012 sc = ifp->if_softc; 2013 2014 mii_pollstat(&sc->sc_mii); 2015 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2016 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2017 } 2018 2019 int 2020 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2021 { 2022 struct rl_softc *sc = ifp->if_softc; 2023 struct ifreq *ifr = (struct ifreq *) data; 2024 int s, error = 0; 2025 2026 s = splnet(); 2027 2028 switch(command) { 2029 case SIOCSIFADDR: 2030 ifp->if_flags |= IFF_UP; 2031 if (!(ifp->if_flags & IFF_RUNNING)) 2032 re_init(ifp); 2033 break; 2034 case SIOCSIFFLAGS: 2035 if (ifp->if_flags & IFF_UP) { 2036 if (ifp->if_flags & IFF_RUNNING) 2037 error = ENETRESET; 2038 else 2039 re_init(ifp); 2040 } else { 2041 if (ifp->if_flags & IFF_RUNNING) 2042 re_stop(ifp); 2043 } 2044 break; 2045 case SIOCGIFMEDIA: 2046 case SIOCSIFMEDIA: 2047 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2048 break; 2049 case SIOCGIFRXR: 2050 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2051 NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring); 2052 break; 2053 default: 2054 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2055 } 2056 2057 if (error == ENETRESET) { 2058 if (ifp->if_flags & IFF_RUNNING) 2059 re_iff(sc); 2060 error = 0; 2061 } 2062 2063 splx(s); 2064 return (error); 2065 } 2066 2067 void 2068 re_watchdog(struct ifnet *ifp) 2069 { 2070 struct rl_softc *sc; 2071 int s; 2072 2073 sc = ifp->if_softc; 2074 s = splnet(); 2075 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2076 2077 re_init(ifp); 2078 2079 splx(s); 2080 } 2081 2082 /* 2083 * Stop the adapter and free any mbufs allocated to the 2084 * RX and TX lists. 2085 */ 2086 void 2087 re_stop(struct ifnet *ifp) 2088 { 2089 struct rl_softc *sc; 2090 int i; 2091 2092 sc = ifp->if_softc; 2093 2094 ifp->if_timer = 0; 2095 sc->rl_flags &= ~RL_FLAG_LINK; 2096 sc->rl_timerintr = 0; 2097 2098 timeout_del(&sc->timer_handle); 2099 ifp->if_flags &= ~IFF_RUNNING; 2100 2101 /* 2102 * Disable accepting frames to put RX MAC into idle state. 2103 * Otherwise it's possible to get frames while stop command 2104 * execution is in progress and controller can DMA the frame 2105 * to already freed RX buffer during that period. 2106 */ 2107 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 2108 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV | 2109 RL_RXCFG_RX_MULTI)); 2110 2111 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) { 2112 for (i = RL_TIMEOUT; i > 0; i--) { 2113 if ((CSR_READ_1(sc, sc->rl_txstart) & 2114 RL_TXSTART_START) == 0) 2115 break; 2116 DELAY(20); 2117 } 2118 if (i == 0) 2119 printf("%s: stopping TX poll timed out!\n", 2120 sc->sc_dev.dv_xname); 2121 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2122 } else if (sc->rl_flags & RL_FLAG_CMDSTOP) { 2123 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2124 RL_CMD_RX_ENB); 2125 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) { 2126 for (i = RL_TIMEOUT; i > 0; i--) { 2127 if ((CSR_READ_4(sc, RL_TXCFG) & 2128 RL_TXCFG_QUEUE_EMPTY) != 0) 2129 break; 2130 DELAY(100); 2131 } 2132 if (i == 0) 2133 printf("%s: stopping TXQ timed out!\n", 2134 sc->sc_dev.dv_xname); 2135 } 2136 } else 2137 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2138 DELAY(1000); 2139 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2140 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2141 2142 intr_barrier(sc->sc_ih); 2143 ifq_barrier(&ifp->if_snd); 2144 2145 ifq_clr_oactive(&ifp->if_snd); 2146 mii_down(&sc->sc_mii); 2147 2148 if (sc->rl_head != NULL) { 2149 m_freem(sc->rl_head); 2150 sc->rl_head = sc->rl_tail = NULL; 2151 } 2152 2153 /* Free the TX list buffers. */ 2154 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 2155 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2156 bus_dmamap_unload(sc->sc_dmat, 2157 sc->rl_ldata.rl_txq[i].txq_dmamap); 2158 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2159 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2160 } 2161 } 2162 2163 /* Free the RX list buffers. */ 2164 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2165 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2166 bus_dmamap_unload(sc->sc_dmat, 2167 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2168 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2169 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2170 } 2171 } 2172 } 2173 2174 void 2175 re_setup_hw_im(struct rl_softc *sc) 2176 { 2177 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2178 2179 /* 2180 * Interrupt moderation 2181 * 2182 * 0xABCD 2183 * A - unknown (maybe TX related) 2184 * B - TX timer (unit: 25us) 2185 * C - unknown (maybe RX related) 2186 * D - RX timer (unit: 25us) 2187 * 2188 * 2189 * re(4)'s interrupt moderation is actually controlled by 2190 * two variables, like most other NICs (bge, bnx etc.) 2191 * o timer 2192 * o number of packets [P] 2193 * 2194 * The logic relationship between these two variables is 2195 * similar to other NICs too: 2196 * if (timer expire || packets > [P]) 2197 * Interrupt is delivered 2198 * 2199 * Currently we only know how to set 'timer', but not 2200 * 'number of packets', which should be ~30, as far as I 2201 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2202 */ 2203 CSR_WRITE_2(sc, RL_IM, 2204 RL_IM_RXTIME(sc->rl_rx_time) | 2205 RL_IM_TXTIME(sc->rl_tx_time) | 2206 RL_IM_MAGIC); 2207 } 2208 2209 void 2210 re_disable_hw_im(struct rl_softc *sc) 2211 { 2212 if (sc->rl_flags & RL_FLAG_HWIM) 2213 CSR_WRITE_2(sc, RL_IM, 0); 2214 } 2215 2216 void 2217 re_setup_sim_im(struct rl_softc *sc) 2218 { 2219 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2220 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2221 else { 2222 u_int32_t nticks; 2223 2224 /* 2225 * Datasheet says tick decreases at bus speed, 2226 * but it seems the clock runs a little bit 2227 * faster, so we do some compensation here. 2228 */ 2229 nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2230 CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks); 2231 } 2232 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2233 sc->rl_timerintr = 1; 2234 } 2235 2236 void 2237 re_disable_sim_im(struct rl_softc *sc) 2238 { 2239 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2240 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2241 else 2242 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2243 sc->rl_timerintr = 0; 2244 } 2245 2246 void 2247 re_config_imtype(struct rl_softc *sc, int imtype) 2248 { 2249 switch (imtype) { 2250 case RL_IMTYPE_HW: 2251 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2252 /* FALLTHROUGH */ 2253 case RL_IMTYPE_NONE: 2254 sc->rl_intrs = RL_INTRS_CPLUS; 2255 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2256 RL_ISR_RX_OVERRUN; 2257 sc->rl_tx_ack = RL_ISR_TX_OK; 2258 break; 2259 2260 case RL_IMTYPE_SIM: 2261 sc->rl_intrs = RL_INTRS_TIMER; 2262 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2263 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2264 break; 2265 2266 default: 2267 panic("%s: unknown imtype %d", 2268 sc->sc_dev.dv_xname, imtype); 2269 } 2270 } 2271 2272 void 2273 re_set_jumbo(struct rl_softc *sc) 2274 { 2275 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2276 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2277 RL_CFG3_JUMBO_EN0); 2278 2279 switch (sc->sc_hwrev) { 2280 case RL_HWREV_8168DP: 2281 break; 2282 case RL_HWREV_8168E: 2283 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2284 RL_CFG4_8168E_JUMBO_EN1); 2285 break; 2286 default: 2287 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2288 RL_CFG4_JUMBO_EN1); 2289 break; 2290 } 2291 2292 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2293 } 2294 2295 void 2296 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2297 { 2298 re_config_imtype(sc, imtype); 2299 2300 if (enable_intrs) 2301 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2302 else 2303 CSR_WRITE_2(sc, RL_IMR, 0); 2304 2305 switch (imtype) { 2306 case RL_IMTYPE_NONE: 2307 re_disable_sim_im(sc); 2308 re_disable_hw_im(sc); 2309 break; 2310 2311 case RL_IMTYPE_HW: 2312 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2313 re_disable_sim_im(sc); 2314 re_setup_hw_im(sc); 2315 break; 2316 2317 case RL_IMTYPE_SIM: 2318 re_disable_hw_im(sc); 2319 re_setup_sim_im(sc); 2320 break; 2321 2322 default: 2323 panic("%s: unknown imtype %d", 2324 sc->sc_dev.dv_xname, imtype); 2325 } 2326 } 2327 2328 #ifndef SMALL_KERNEL 2329 int 2330 re_wol(struct ifnet *ifp, int enable) 2331 { 2332 struct rl_softc *sc = ifp->if_softc; 2333 u_int8_t val; 2334 2335 if (enable) { 2336 if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) { 2337 printf("%s: power management is disabled, " 2338 "cannot do WOL\n", sc->sc_dev.dv_xname); 2339 return (ENOTSUP); 2340 } 2341 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0) 2342 printf("%s: no auxiliary power, cannot do WOL from D3 " 2343 "(power-off) state\n", sc->sc_dev.dv_xname); 2344 } 2345 2346 re_iff(sc); 2347 2348 /* Temporarily enable write to configuration registers. */ 2349 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2350 2351 /* Always disable all wake events except magic packet. */ 2352 if (enable) { 2353 val = CSR_READ_1(sc, sc->rl_cfg5); 2354 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2355 RL_CFG5_WOL_BCAST); 2356 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2357 2358 val = CSR_READ_1(sc, sc->rl_cfg3); 2359 val |= RL_CFG3_WOL_MAGIC; 2360 val &= ~RL_CFG3_WOL_LINK; 2361 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2362 } else { 2363 val = CSR_READ_1(sc, sc->rl_cfg5); 2364 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2365 RL_CFG5_WOL_BCAST); 2366 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2367 2368 val = CSR_READ_1(sc, sc->rl_cfg3); 2369 val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK); 2370 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2371 } 2372 2373 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2374 2375 return (0); 2376 } 2377 #endif 2378