1 /* $OpenBSD: re.c,v 1.149 2014/03/13 13:11:30 brad Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support RealTek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 124 #include <net/if.h> 125 #include <net/if_dl.h> 126 #include <net/if_media.h> 127 128 #ifdef INET 129 #include <netinet/in.h> 130 #include <netinet/in_systm.h> 131 #include <netinet/ip.h> 132 #include <netinet/if_ether.h> 133 #endif 134 135 #if NVLAN > 0 136 #include <net/if_types.h> 137 #include <net/if_vlan_var.h> 138 #endif 139 140 #if NBPFILTER > 0 141 #include <net/bpf.h> 142 #endif 143 144 #include <dev/mii/mii.h> 145 #include <dev/mii/miivar.h> 146 147 #include <dev/pci/pcireg.h> 148 #include <dev/pci/pcivar.h> 149 150 #include <dev/ic/rtl81x9reg.h> 151 #include <dev/ic/revar.h> 152 153 #ifdef RE_DEBUG 154 int redebug = 0; 155 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 156 #else 157 #define DPRINTF(x) 158 #endif 159 160 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 161 162 int re_encap(struct rl_softc *, struct mbuf *, int *); 163 164 int re_newbuf(struct rl_softc *); 165 int re_rx_list_init(struct rl_softc *); 166 void re_rx_list_fill(struct rl_softc *); 167 int re_tx_list_init(struct rl_softc *); 168 int re_rxeof(struct rl_softc *); 169 int re_txeof(struct rl_softc *); 170 void re_tick(void *); 171 void re_start(struct ifnet *); 172 int re_ioctl(struct ifnet *, u_long, caddr_t); 173 void re_watchdog(struct ifnet *); 174 int re_ifmedia_upd(struct ifnet *); 175 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176 177 void re_eeprom_putbyte(struct rl_softc *, int); 178 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 179 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 180 181 int re_gmii_readreg(struct device *, int, int); 182 void re_gmii_writereg(struct device *, int, int, int); 183 184 int re_miibus_readreg(struct device *, int, int); 185 void re_miibus_writereg(struct device *, int, int, int); 186 void re_miibus_statchg(struct device *); 187 188 void re_iff(struct rl_softc *); 189 190 void re_setup_hw_im(struct rl_softc *); 191 void re_setup_sim_im(struct rl_softc *); 192 void re_disable_hw_im(struct rl_softc *); 193 void re_disable_sim_im(struct rl_softc *); 194 void re_config_imtype(struct rl_softc *, int); 195 void re_setup_intr(struct rl_softc *, int, int); 196 #ifndef SMALL_KERNEL 197 int re_wol(struct ifnet*, int); 198 #endif 199 200 struct cfdriver re_cd = { 201 0, "re", DV_IFNET 202 }; 203 204 #define EE_SET(x) \ 205 CSR_WRITE_1(sc, RL_EECMD, \ 206 CSR_READ_1(sc, RL_EECMD) | x) 207 208 #define EE_CLR(x) \ 209 CSR_WRITE_1(sc, RL_EECMD, \ 210 CSR_READ_1(sc, RL_EECMD) & ~x) 211 212 static const struct re_revision { 213 u_int32_t re_chipid; 214 const char *re_name; 215 } re_revisions[] = { 216 { RL_HWREV_8100, "RTL8100" }, 217 { RL_HWREV_8100E_SPIN1, "RTL8100E 1" }, 218 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 219 { RL_HWREV_8101, "RTL8101" }, 220 { RL_HWREV_8101E, "RTL8101E" }, 221 { RL_HWREV_8102E, "RTL8102E" }, 222 { RL_HWREV_8106E, "RTL8106E" }, 223 { RL_HWREV_8106E_SPIN1, "RTL8106E" }, 224 { RL_HWREV_8401E, "RTL8401E" }, 225 { RL_HWREV_8402, "RTL8402" }, 226 { RL_HWREV_8411, "RTL8411" }, 227 { RL_HWREV_8102EL, "RTL8102EL" }, 228 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 229 { RL_HWREV_8103E, "RTL8103E" }, 230 { RL_HWREV_8110S, "RTL8110S" }, 231 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 232 { RL_HWREV_8168_SPIN1, "RTL8168 1" }, 233 { RL_HWREV_8168_SPIN2, "RTL8168 2" }, 234 { RL_HWREV_8168_SPIN3, "RTL8168 3" }, 235 { RL_HWREV_8168C, "RTL8168C/8111C" }, 236 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 237 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 238 { RL_HWREV_8168F, "RTL8168F/8111F" }, 239 { RL_HWREV_8168G, "RTL8168G/8111G" }, 240 { RL_HWREV_8168G_SPIN1, "RTL8168G/8111G" }, 241 { RL_HWREV_8168G_SPIN2, "RTL8168G/8111G" }, 242 { RL_HWREV_8168G_SPIN4, "RTL8168G/8111G" }, 243 { RL_HWREV_8105E, "RTL8105E" }, 244 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 245 { RL_HWREV_8168D, "RTL8168D/8111D" }, 246 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 247 { RL_HWREV_8168E, "RTL8168E/8111E" }, 248 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 249 { RL_HWREV_8169, "RTL8169" }, 250 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 251 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 252 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 253 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 254 { RL_HWREV_8169S, "RTL8169S" }, 255 256 { 0, NULL } 257 }; 258 259 260 static inline void 261 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 262 { 263 d->rl_bufaddr_lo = htole32((uint32_t)addr); 264 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 265 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 266 else 267 d->rl_bufaddr_hi = 0; 268 } 269 270 /* 271 * Send a read command and address to the EEPROM, check for ACK. 272 */ 273 void 274 re_eeprom_putbyte(struct rl_softc *sc, int addr) 275 { 276 int d, i; 277 278 d = addr | (RL_9346_READ << sc->rl_eewidth); 279 280 /* 281 * Feed in each bit and strobe the clock. 282 */ 283 284 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 285 if (d & i) 286 EE_SET(RL_EE_DATAIN); 287 else 288 EE_CLR(RL_EE_DATAIN); 289 DELAY(100); 290 EE_SET(RL_EE_CLK); 291 DELAY(150); 292 EE_CLR(RL_EE_CLK); 293 DELAY(100); 294 } 295 } 296 297 /* 298 * Read a word of data stored in the EEPROM at address 'addr.' 299 */ 300 void 301 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 302 { 303 int i; 304 u_int16_t word = 0; 305 306 /* 307 * Send address of word we want to read. 308 */ 309 re_eeprom_putbyte(sc, addr); 310 311 /* 312 * Start reading bits from EEPROM. 313 */ 314 for (i = 0x8000; i; i >>= 1) { 315 EE_SET(RL_EE_CLK); 316 DELAY(100); 317 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 318 word |= i; 319 EE_CLR(RL_EE_CLK); 320 DELAY(100); 321 } 322 323 *dest = word; 324 } 325 326 /* 327 * Read a sequence of words from the EEPROM. 328 */ 329 void 330 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 331 { 332 int i; 333 u_int16_t word = 0, *ptr; 334 335 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 336 337 DELAY(100); 338 339 for (i = 0; i < cnt; i++) { 340 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 341 re_eeprom_getword(sc, off + i, &word); 342 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 343 ptr = (u_int16_t *)(dest + (i * 2)); 344 *ptr = word; 345 } 346 347 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 348 } 349 350 int 351 re_gmii_readreg(struct device *self, int phy, int reg) 352 { 353 struct rl_softc *sc = (struct rl_softc *)self; 354 u_int32_t rval; 355 int i; 356 357 if (phy != 7) 358 return (0); 359 360 /* Let the rgephy driver read the GMEDIASTAT register */ 361 362 if (reg == RL_GMEDIASTAT) { 363 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 364 return (rval); 365 } 366 367 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 368 369 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 370 rval = CSR_READ_4(sc, RL_PHYAR); 371 if (rval & RL_PHYAR_BUSY) 372 break; 373 DELAY(25); 374 } 375 376 if (i == RL_PHY_TIMEOUT) { 377 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 378 return (0); 379 } 380 381 DELAY(20); 382 383 return (rval & RL_PHYAR_PHYDATA); 384 } 385 386 void 387 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 388 { 389 struct rl_softc *sc = (struct rl_softc *)dev; 390 u_int32_t rval; 391 int i; 392 393 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 394 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 395 396 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 397 rval = CSR_READ_4(sc, RL_PHYAR); 398 if (!(rval & RL_PHYAR_BUSY)) 399 break; 400 DELAY(25); 401 } 402 403 if (i == RL_PHY_TIMEOUT) 404 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 405 406 DELAY(20); 407 } 408 409 int 410 re_miibus_readreg(struct device *dev, int phy, int reg) 411 { 412 struct rl_softc *sc = (struct rl_softc *)dev; 413 u_int16_t rval = 0; 414 u_int16_t re8139_reg = 0; 415 int s; 416 417 s = splnet(); 418 419 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 420 rval = re_gmii_readreg(dev, phy, reg); 421 splx(s); 422 return (rval); 423 } 424 425 /* Pretend the internal PHY is only at address 0 */ 426 if (phy) { 427 splx(s); 428 return (0); 429 } 430 switch(reg) { 431 case MII_BMCR: 432 re8139_reg = RL_BMCR; 433 break; 434 case MII_BMSR: 435 re8139_reg = RL_BMSR; 436 break; 437 case MII_ANAR: 438 re8139_reg = RL_ANAR; 439 break; 440 case MII_ANER: 441 re8139_reg = RL_ANER; 442 break; 443 case MII_ANLPAR: 444 re8139_reg = RL_LPAR; 445 break; 446 case MII_PHYIDR1: 447 case MII_PHYIDR2: 448 splx(s); 449 return (0); 450 /* 451 * Allow the rlphy driver to read the media status 452 * register. If we have a link partner which does not 453 * support NWAY, this is the register which will tell 454 * us the results of parallel detection. 455 */ 456 case RL_MEDIASTAT: 457 rval = CSR_READ_1(sc, RL_MEDIASTAT); 458 splx(s); 459 return (rval); 460 default: 461 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 462 splx(s); 463 return (0); 464 } 465 rval = CSR_READ_2(sc, re8139_reg); 466 if (re8139_reg == RL_BMCR) { 467 /* 8139C+ has different bit layout. */ 468 rval &= ~(BMCR_LOOP | BMCR_ISO); 469 } 470 splx(s); 471 return (rval); 472 } 473 474 void 475 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 476 { 477 struct rl_softc *sc = (struct rl_softc *)dev; 478 u_int16_t re8139_reg = 0; 479 int s; 480 481 s = splnet(); 482 483 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 484 re_gmii_writereg(dev, phy, reg, data); 485 splx(s); 486 return; 487 } 488 489 /* Pretend the internal PHY is only at address 0 */ 490 if (phy) { 491 splx(s); 492 return; 493 } 494 switch(reg) { 495 case MII_BMCR: 496 re8139_reg = RL_BMCR; 497 /* 8139C+ has different bit layout. */ 498 data &= ~(BMCR_LOOP | BMCR_ISO); 499 break; 500 case MII_BMSR: 501 re8139_reg = RL_BMSR; 502 break; 503 case MII_ANAR: 504 re8139_reg = RL_ANAR; 505 break; 506 case MII_ANER: 507 re8139_reg = RL_ANER; 508 break; 509 case MII_ANLPAR: 510 re8139_reg = RL_LPAR; 511 break; 512 case MII_PHYIDR1: 513 case MII_PHYIDR2: 514 splx(s); 515 return; 516 break; 517 default: 518 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 519 splx(s); 520 return; 521 } 522 CSR_WRITE_2(sc, re8139_reg, data); 523 splx(s); 524 } 525 526 void 527 re_miibus_statchg(struct device *dev) 528 { 529 } 530 531 void 532 re_iff(struct rl_softc *sc) 533 { 534 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 535 int h = 0; 536 u_int32_t hashes[2]; 537 u_int32_t rxfilt; 538 struct arpcom *ac = &sc->sc_arpcom; 539 struct ether_multi *enm; 540 struct ether_multistep step; 541 542 rxfilt = CSR_READ_4(sc, RL_RXCFG); 543 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 544 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 545 ifp->if_flags &= ~IFF_ALLMULTI; 546 547 /* 548 * Always accept frames destined to our station address. 549 * Always accept broadcast frames. 550 */ 551 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 552 553 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 554 ifp->if_flags |= IFF_ALLMULTI; 555 rxfilt |= RL_RXCFG_RX_MULTI; 556 if (ifp->if_flags & IFF_PROMISC) 557 rxfilt |= RL_RXCFG_RX_ALLPHYS; 558 hashes[0] = hashes[1] = 0xFFFFFFFF; 559 } else { 560 rxfilt |= RL_RXCFG_RX_MULTI; 561 /* Program new filter. */ 562 bzero(hashes, sizeof(hashes)); 563 564 ETHER_FIRST_MULTI(step, ac, enm); 565 while (enm != NULL) { 566 h = ether_crc32_be(enm->enm_addrlo, 567 ETHER_ADDR_LEN) >> 26; 568 569 if (h < 32) 570 hashes[0] |= (1 << h); 571 else 572 hashes[1] |= (1 << (h - 32)); 573 574 ETHER_NEXT_MULTI(step, enm); 575 } 576 } 577 578 /* 579 * For some unfathomable reason, RealTek decided to reverse 580 * the order of the multicast hash registers in the PCI Express 581 * parts. This means we have to write the hash pattern in reverse 582 * order for those devices. 583 */ 584 if (sc->rl_flags & RL_FLAG_INVMAR) { 585 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 586 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 587 } else { 588 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 589 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 590 } 591 592 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 593 } 594 595 void 596 re_reset(struct rl_softc *sc) 597 { 598 int i; 599 600 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 601 602 for (i = 0; i < RL_TIMEOUT; i++) { 603 DELAY(10); 604 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 605 break; 606 } 607 if (i == RL_TIMEOUT) 608 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 609 610 if (sc->rl_flags & RL_FLAG_MACLDPS) 611 CSR_WRITE_1(sc, RL_LDPS, 1); 612 } 613 614 #ifdef __armish__ 615 /* 616 * Thecus N2100 doesn't store the full mac address in eeprom 617 * so we read the old mac address from the device before the reset 618 * in hopes that the proper mac address is already there. 619 */ 620 union { 621 u_int32_t eaddr_word[2]; 622 u_char eaddr[ETHER_ADDR_LEN]; 623 } boot_eaddr; 624 int boot_eaddr_valid; 625 #endif /* __armish__ */ 626 /* 627 * Attach the interface. Allocate softc structures, do ifmedia 628 * setup and ethernet/BPF attach. 629 */ 630 int 631 re_attach(struct rl_softc *sc, const char *intrstr) 632 { 633 u_char eaddr[ETHER_ADDR_LEN]; 634 u_int16_t as[ETHER_ADDR_LEN / 2]; 635 struct ifnet *ifp; 636 u_int16_t re_did = 0; 637 int error = 0, i; 638 const struct re_revision *rr; 639 const char *re_name = NULL; 640 641 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 642 643 switch (sc->sc_hwrev) { 644 case RL_HWREV_8139CPLUS: 645 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_AUTOPAD; 646 break; 647 case RL_HWREV_8100E_SPIN1: 648 case RL_HWREV_8100E_SPIN2: 649 case RL_HWREV_8101E: 650 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 651 RL_FLAG_PHYWAKE; 652 break; 653 case RL_HWREV_8103E: 654 sc->rl_flags |= RL_FLAG_MACSLEEP; 655 /* FALLTHROUGH */ 656 case RL_HWREV_8102E: 657 case RL_HWREV_8102EL: 658 case RL_HWREV_8102EL_SPIN1: 659 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 660 RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 661 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 662 break; 663 case RL_HWREV_8401E: 664 case RL_HWREV_8402: 665 case RL_HWREV_8105E: 666 case RL_HWREV_8105E_SPIN1: 667 case RL_HWREV_8106E: 668 case RL_HWREV_8106E_SPIN1: 669 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 670 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 671 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 672 RL_FLAG_NOJUMBO; 673 break; 674 case RL_HWREV_8168_SPIN1: 675 case RL_HWREV_8168_SPIN2: 676 case RL_HWREV_8168_SPIN3: 677 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 678 RL_FLAG_MACSTAT | RL_FLAG_HWIM; 679 break; 680 case RL_HWREV_8168C_SPIN2: 681 sc->rl_flags |= RL_FLAG_MACSLEEP; 682 /* FALLTHROUGH */ 683 case RL_HWREV_8168C: 684 case RL_HWREV_8168CP: 685 case RL_HWREV_8168DP: 686 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 687 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 688 RL_FLAG_HWIM | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 689 /* 690 * These controllers support jumbo frame but it seems 691 * that enabling it requires touching additional magic 692 * registers. Depending on MAC revisions some 693 * controllers need to disable checksum offload. So 694 * disable jumbo frame until I have better idea what 695 * it really requires to make it support. 696 * RTL8168C/CP : supports up to 6KB jumbo frame. 697 * RTL8111C/CP : supports up to 9KB jumbo frame. 698 */ 699 sc->rl_flags |= RL_FLAG_NOJUMBO; 700 break; 701 case RL_HWREV_8168D: 702 case RL_HWREV_8168E: 703 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 704 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 705 RL_FLAG_MACSTAT | RL_FLAG_HWIM | RL_FLAG_CMDSTOP | 706 RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 707 break; 708 case RL_HWREV_8168E_VL: 709 case RL_HWREV_8168F: 710 sc->rl_flags |= RL_FLAG_EARLYOFF; 711 /* FALLTHROUGH */ 712 case RL_HWREV_8411: 713 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 714 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 715 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 716 break; 717 case RL_HWREV_8168G: 718 case RL_HWREV_8168G_SPIN1: 719 case RL_HWREV_8168G_SPIN4: 720 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 721 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 722 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO | 723 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 724 break; 725 case RL_HWREV_8168G_SPIN2: 726 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 727 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 728 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO | 729 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 730 break; 731 case RL_HWREV_8169_8110SB: 732 case RL_HWREV_8169_8110SBL: 733 case RL_HWREV_8169_8110SCd: 734 case RL_HWREV_8169_8110SCe: 735 sc->rl_flags |= RL_FLAG_PHYWAKE; 736 /* FALLTHROUGH */ 737 case RL_HWREV_8169: 738 case RL_HWREV_8169S: 739 case RL_HWREV_8110S: 740 sc->rl_flags |= RL_FLAG_MACLDPS; 741 break; 742 default: 743 break; 744 } 745 746 /* Reset the adapter. */ 747 re_reset(sc); 748 749 sc->rl_tx_time = 5; /* 125us */ 750 sc->rl_rx_time = 2; /* 50us */ 751 if (sc->rl_flags & RL_FLAG_PCIE) 752 sc->rl_sim_time = 75; /* 75us */ 753 else 754 sc->rl_sim_time = 125; /* 125us */ 755 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 756 757 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 758 sc->rl_bus_speed = 33; /* XXX */ 759 else if (sc->rl_flags & RL_FLAG_PCIE) 760 sc->rl_bus_speed = 125; 761 else { 762 u_int8_t cfg2; 763 764 cfg2 = CSR_READ_1(sc, RL_CFG2); 765 switch (cfg2 & RL_CFG2_PCI_MASK) { 766 case RL_CFG2_PCI_33MHZ: 767 sc->rl_bus_speed = 33; 768 break; 769 case RL_CFG2_PCI_66MHZ: 770 sc->rl_bus_speed = 66; 771 break; 772 default: 773 printf("%s: unknown bus speed, assume 33MHz\n", 774 sc->sc_dev.dv_xname); 775 sc->rl_bus_speed = 33; 776 break; 777 } 778 779 if (cfg2 & RL_CFG2_PCI_64BIT) 780 sc->rl_flags |= RL_FLAG_PCI64; 781 } 782 783 re_config_imtype(sc, sc->rl_imtype); 784 785 if (sc->rl_flags & RL_FLAG_PAR) { 786 /* 787 * XXX Should have a better way to extract station 788 * address from EEPROM. 789 */ 790 for (i = 0; i < ETHER_ADDR_LEN; i++) 791 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 792 } else { 793 sc->rl_eewidth = RL_9356_ADDR_LEN; 794 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 795 if (re_did != 0x8129) 796 sc->rl_eewidth = RL_9346_ADDR_LEN; 797 798 /* 799 * Get station address from the EEPROM. 800 */ 801 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 802 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 803 as[i] = letoh16(as[i]); 804 bcopy(as, eaddr, sizeof(eaddr)); 805 806 #ifdef __armish__ 807 /* 808 * On the Thecus N2100, the MAC address in the EEPROM is 809 * always 00:14:fd:10:00:00. The proper MAC address is 810 * stored in flash. Fortunately RedBoot configures the 811 * proper MAC address (for the first onboard interface) 812 * which we can read from the IDR. 813 */ 814 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 815 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 816 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 817 if (boot_eaddr_valid == 0) { 818 boot_eaddr.eaddr_word[1] = 819 letoh32(CSR_READ_4(sc, RL_IDR4)); 820 boot_eaddr.eaddr_word[0] = 821 letoh32(CSR_READ_4(sc, RL_IDR0)); 822 boot_eaddr_valid = 1; 823 } 824 825 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 826 eaddr[5] += sc->sc_dev.dv_unit; 827 } 828 #endif 829 } 830 831 /* 832 * Set RX length mask, TX poll request register 833 * and TX descriptor count. 834 */ 835 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 836 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 837 sc->rl_txstart = RL_TXSTART; 838 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139; 839 } else { 840 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 841 sc->rl_txstart = RL_GTXSTART; 842 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169; 843 } 844 845 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 846 847 for (rr = re_revisions; rr->re_name != NULL; rr++) { 848 if (rr->re_chipid == sc->sc_hwrev) 849 re_name = rr->re_name; 850 } 851 852 if (re_name == NULL) 853 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 854 else 855 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 856 857 printf(", %s, address %s\n", intrstr, 858 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 859 860 if (sc->rl_ldata.rl_tx_desc_cnt > 861 PAGE_SIZE / sizeof(struct rl_desc)) { 862 sc->rl_ldata.rl_tx_desc_cnt = 863 PAGE_SIZE / sizeof(struct rl_desc); 864 } 865 866 /* Allocate DMA'able memory for the TX ring */ 867 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 868 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 869 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 870 BUS_DMA_ZERO)) != 0) { 871 printf("%s: can't allocate tx listseg, error = %d\n", 872 sc->sc_dev.dv_xname, error); 873 goto fail_0; 874 } 875 876 /* Load the map for the TX ring. */ 877 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 878 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 879 (caddr_t *)&sc->rl_ldata.rl_tx_list, 880 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 881 printf("%s: can't map tx list, error = %d\n", 882 sc->sc_dev.dv_xname, error); 883 goto fail_1; 884 } 885 886 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 887 RL_TX_LIST_SZ(sc), 0, 0, 888 &sc->rl_ldata.rl_tx_list_map)) != 0) { 889 printf("%s: can't create tx list map, error = %d\n", 890 sc->sc_dev.dv_xname, error); 891 goto fail_2; 892 } 893 894 if ((error = bus_dmamap_load(sc->sc_dmat, 895 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 896 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 897 printf("%s: can't load tx list, error = %d\n", 898 sc->sc_dev.dv_xname, error); 899 goto fail_3; 900 } 901 902 /* Create DMA maps for TX buffers */ 903 for (i = 0; i < RL_TX_QLEN; i++) { 904 error = bus_dmamap_create(sc->sc_dmat, 905 RL_JUMBO_FRAMELEN, RL_NTXSEGS, RL_JUMBO_FRAMELEN, 906 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 907 if (error) { 908 printf("%s: can't create DMA map for TX\n", 909 sc->sc_dev.dv_xname); 910 goto fail_4; 911 } 912 } 913 914 /* Allocate DMA'able memory for the RX ring */ 915 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ, 916 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 917 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 918 BUS_DMA_ZERO)) != 0) { 919 printf("%s: can't allocate rx listnseg, error = %d\n", 920 sc->sc_dev.dv_xname, error); 921 goto fail_4; 922 } 923 924 /* Load the map for the RX ring. */ 925 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 926 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ, 927 (caddr_t *)&sc->rl_ldata.rl_rx_list, 928 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 929 printf("%s: can't map rx list, error = %d\n", 930 sc->sc_dev.dv_xname, error); 931 goto fail_5; 932 933 } 934 935 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1, 936 RL_RX_DMAMEM_SZ, 0, 0, 937 &sc->rl_ldata.rl_rx_list_map)) != 0) { 938 printf("%s: can't create rx list map, error = %d\n", 939 sc->sc_dev.dv_xname, error); 940 goto fail_6; 941 } 942 943 if ((error = bus_dmamap_load(sc->sc_dmat, 944 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 945 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 946 printf("%s: can't load rx list, error = %d\n", 947 sc->sc_dev.dv_xname, error); 948 goto fail_7; 949 } 950 951 /* Create DMA maps for RX buffers */ 952 for (i = 0; i < RL_RX_DESC_CNT; i++) { 953 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 954 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 955 if (error) { 956 printf("%s: can't create DMA map for RX\n", 957 sc->sc_dev.dv_xname); 958 goto fail_8; 959 } 960 } 961 962 ifp = &sc->sc_arpcom.ac_if; 963 ifp->if_softc = sc; 964 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 965 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 966 ifp->if_ioctl = re_ioctl; 967 ifp->if_start = re_start; 968 ifp->if_watchdog = re_watchdog; 969 if ((sc->rl_flags & RL_FLAG_NOJUMBO) == 0) 970 ifp->if_hardmtu = RL_JUMBO_MTU; 971 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 972 IFQ_SET_READY(&ifp->if_snd); 973 974 m_clsetwms(ifp, MCLBYTES, 2, RL_RX_DESC_CNT); 975 976 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 977 IFCAP_CSUM_UDPv4; 978 979 /* 980 * RTL8168/8111C generates wrong IP checksummed frame if the 981 * packet has IP options so disable TX IP checksum offloading. 982 */ 983 switch (sc->sc_hwrev) { 984 case RL_HWREV_8168C: 985 case RL_HWREV_8168C_SPIN2: 986 case RL_HWREV_8168CP: 987 break; 988 default: 989 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 990 } 991 992 #if NVLAN > 0 993 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 994 #endif 995 996 #ifndef SMALL_KERNEL 997 ifp->if_capabilities |= IFCAP_WOL; 998 ifp->if_wol = re_wol; 999 re_wol(ifp, 0); 1000 #endif 1001 timeout_set(&sc->timer_handle, re_tick, sc); 1002 1003 /* Take PHY out of power down mode. */ 1004 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1005 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1006 if (sc->sc_hwrev == RL_HWREV_8401E) 1007 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1008 } 1009 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1010 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1011 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1012 } 1013 1014 /* Do MII setup */ 1015 sc->sc_mii.mii_ifp = ifp; 1016 sc->sc_mii.mii_readreg = re_miibus_readreg; 1017 sc->sc_mii.mii_writereg = re_miibus_writereg; 1018 sc->sc_mii.mii_statchg = re_miibus_statchg; 1019 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1020 re_ifmedia_sts); 1021 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1022 MII_OFFSET_ANY, MIIF_DOPAUSE); 1023 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1024 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1025 ifmedia_add(&sc->sc_mii.mii_media, 1026 IFM_ETHER|IFM_NONE, 0, NULL); 1027 ifmedia_set(&sc->sc_mii.mii_media, 1028 IFM_ETHER|IFM_NONE); 1029 } else 1030 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1031 1032 /* 1033 * Call MI attach routine. 1034 */ 1035 re_reset(sc); 1036 if_attach(ifp); 1037 ether_ifattach(ifp); 1038 1039 return (0); 1040 1041 fail_8: 1042 /* Destroy DMA maps for RX buffers. */ 1043 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1044 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1045 bus_dmamap_destroy(sc->sc_dmat, 1046 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1047 } 1048 1049 /* Free DMA'able memory for the RX ring. */ 1050 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1051 fail_7: 1052 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1053 fail_6: 1054 bus_dmamem_unmap(sc->sc_dmat, 1055 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ); 1056 fail_5: 1057 bus_dmamem_free(sc->sc_dmat, 1058 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1059 1060 fail_4: 1061 /* Destroy DMA maps for TX buffers. */ 1062 for (i = 0; i < RL_TX_QLEN; i++) { 1063 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1064 bus_dmamap_destroy(sc->sc_dmat, 1065 sc->rl_ldata.rl_txq[i].txq_dmamap); 1066 } 1067 1068 /* Free DMA'able memory for the TX ring. */ 1069 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1070 fail_3: 1071 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1072 fail_2: 1073 bus_dmamem_unmap(sc->sc_dmat, 1074 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1075 fail_1: 1076 bus_dmamem_free(sc->sc_dmat, 1077 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1078 fail_0: 1079 return (1); 1080 } 1081 1082 1083 int 1084 re_newbuf(struct rl_softc *sc) 1085 { 1086 struct mbuf *m; 1087 bus_dmamap_t map; 1088 struct rl_desc *d; 1089 struct rl_rxsoft *rxs; 1090 u_int32_t cmdstat; 1091 int error, idx; 1092 1093 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1094 if (!m) 1095 return (ENOBUFS); 1096 1097 /* 1098 * Initialize mbuf length fields and fixup 1099 * alignment so that the frame payload is 1100 * longword aligned on strict alignment archs. 1101 */ 1102 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN; 1103 m->m_data += RE_ETHER_ALIGN; 1104 1105 idx = sc->rl_ldata.rl_rx_prodidx; 1106 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1107 map = rxs->rxs_dmamap; 1108 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1109 BUS_DMA_READ|BUS_DMA_NOWAIT); 1110 if (error) { 1111 m_freem(m); 1112 return (ENOBUFS); 1113 } 1114 1115 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1116 BUS_DMASYNC_PREREAD); 1117 1118 d = &sc->rl_ldata.rl_rx_list[idx]; 1119 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1120 cmdstat = letoh32(d->rl_cmdstat); 1121 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1122 if (cmdstat & RL_RDESC_STAT_OWN) { 1123 printf("%s: tried to map busy RX descriptor\n", 1124 sc->sc_dev.dv_xname); 1125 m_freem(m); 1126 return (ENOBUFS); 1127 } 1128 1129 rxs->rxs_mbuf = m; 1130 1131 d->rl_vlanctl = 0; 1132 cmdstat = map->dm_segs[0].ds_len; 1133 if (idx == (RL_RX_DESC_CNT - 1)) 1134 cmdstat |= RL_RDESC_CMD_EOR; 1135 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1136 d->rl_cmdstat = htole32(cmdstat); 1137 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1138 cmdstat |= RL_RDESC_CMD_OWN; 1139 d->rl_cmdstat = htole32(cmdstat); 1140 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1141 1142 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1143 sc->rl_ldata.rl_rx_cnt++; 1144 1145 return (0); 1146 } 1147 1148 1149 int 1150 re_tx_list_init(struct rl_softc *sc) 1151 { 1152 int i; 1153 1154 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1155 for (i = 0; i < RL_TX_QLEN; i++) { 1156 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1157 } 1158 1159 bus_dmamap_sync(sc->sc_dmat, 1160 sc->rl_ldata.rl_tx_list_map, 0, 1161 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1162 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1163 sc->rl_ldata.rl_txq_prodidx = 0; 1164 sc->rl_ldata.rl_txq_considx = 0; 1165 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc); 1166 sc->rl_ldata.rl_tx_nextfree = 0; 1167 1168 return (0); 1169 } 1170 1171 int 1172 re_rx_list_init(struct rl_softc *sc) 1173 { 1174 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1175 1176 sc->rl_ldata.rl_rx_prodidx = 0; 1177 sc->rl_ldata.rl_rx_considx = 0; 1178 sc->rl_ldata.rl_rx_cnt = 0; 1179 sc->rl_head = sc->rl_tail = NULL; 1180 1181 re_rx_list_fill(sc); 1182 1183 return (0); 1184 } 1185 1186 void 1187 re_rx_list_fill(struct rl_softc *sc) 1188 { 1189 while (sc->rl_ldata.rl_rx_cnt < RL_RX_DESC_CNT) { 1190 if (re_newbuf(sc) == ENOBUFS) 1191 break; 1192 } 1193 } 1194 1195 /* 1196 * RX handler for C+ and 8169. For the gigE chips, we support 1197 * the reception of jumbo frames that have been fragmented 1198 * across multiple 2K mbuf cluster buffers. 1199 */ 1200 int 1201 re_rxeof(struct rl_softc *sc) 1202 { 1203 struct mbuf *m; 1204 struct ifnet *ifp; 1205 int i, total_len, rx = 0; 1206 struct rl_desc *cur_rx; 1207 struct rl_rxsoft *rxs; 1208 u_int32_t rxstat, rxvlan; 1209 1210 ifp = &sc->sc_arpcom.ac_if; 1211 1212 for (i = sc->rl_ldata.rl_rx_considx; sc->rl_ldata.rl_rx_cnt > 0; 1213 i = RL_NEXT_RX_DESC(sc, i)) { 1214 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1215 RL_RXDESCSYNC(sc, i, 1216 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1217 rxstat = letoh32(cur_rx->rl_cmdstat); 1218 rxvlan = letoh32(cur_rx->rl_vlanctl); 1219 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1220 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1221 break; 1222 total_len = rxstat & sc->rl_rxlenmask; 1223 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1224 m = rxs->rxs_mbuf; 1225 rxs->rxs_mbuf = NULL; 1226 sc->rl_ldata.rl_rx_cnt--; 1227 rx = 1; 1228 1229 /* Invalidate the RX mbuf and unload its map */ 1230 1231 bus_dmamap_sync(sc->sc_dmat, 1232 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1233 BUS_DMASYNC_POSTREAD); 1234 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1235 1236 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1237 m->m_len = RE_RX_DESC_BUFLEN; 1238 if (sc->rl_head == NULL) 1239 sc->rl_head = sc->rl_tail = m; 1240 else { 1241 m->m_flags &= ~M_PKTHDR; 1242 sc->rl_tail->m_next = m; 1243 sc->rl_tail = m; 1244 } 1245 continue; 1246 } 1247 1248 /* 1249 * NOTE: for the 8139C+, the frame length field 1250 * is always 12 bits in size, but for the gigE chips, 1251 * it is 13 bits (since the max RX frame length is 16K). 1252 * Unfortunately, all 32 bits in the status word 1253 * were already used, so to make room for the extra 1254 * length bit, RealTek took out the 'frame alignment 1255 * error' bit and shifted the other status bits 1256 * over one slot. The OWN, EOR, FS and LS bits are 1257 * still in the same places. We have already extracted 1258 * the frame length and checked the OWN bit, so rather 1259 * than using an alternate bit mapping, we shift the 1260 * status bits one space to the right so we can evaluate 1261 * them using the 8169 status as though it was in the 1262 * same format as that of the 8139C+. 1263 */ 1264 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1265 rxstat >>= 1; 1266 1267 /* 1268 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1269 * set, but if CRC is clear, it will still be a valid frame. 1270 */ 1271 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1272 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1273 ifp->if_ierrors++; 1274 /* 1275 * If this is part of a multi-fragment packet, 1276 * discard all the pieces. 1277 */ 1278 if (sc->rl_head != NULL) { 1279 m_freem(sc->rl_head); 1280 sc->rl_head = sc->rl_tail = NULL; 1281 } 1282 continue; 1283 } 1284 1285 if (sc->rl_head != NULL) { 1286 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1287 if (m->m_len == 0) 1288 m->m_len = RE_RX_DESC_BUFLEN; 1289 /* 1290 * Special case: if there's 4 bytes or less 1291 * in this buffer, the mbuf can be discarded: 1292 * the last 4 bytes is the CRC, which we don't 1293 * care about anyway. 1294 */ 1295 if (m->m_len <= ETHER_CRC_LEN) { 1296 sc->rl_tail->m_len -= 1297 (ETHER_CRC_LEN - m->m_len); 1298 m_freem(m); 1299 } else { 1300 m->m_len -= ETHER_CRC_LEN; 1301 m->m_flags &= ~M_PKTHDR; 1302 sc->rl_tail->m_next = m; 1303 } 1304 m = sc->rl_head; 1305 sc->rl_head = sc->rl_tail = NULL; 1306 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1307 } else 1308 m->m_pkthdr.len = m->m_len = 1309 (total_len - ETHER_CRC_LEN); 1310 1311 ifp->if_ipackets++; 1312 m->m_pkthdr.rcvif = ifp; 1313 1314 /* Do RX checksumming */ 1315 1316 if (sc->rl_flags & RL_FLAG_DESCV2) { 1317 /* Check IP header checksum */ 1318 if ((rxvlan & RL_RDESC_IPV4) && 1319 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1320 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1321 1322 /* Check TCP/UDP checksum */ 1323 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1324 (((rxstat & RL_RDESC_STAT_TCP) && 1325 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1326 ((rxstat & RL_RDESC_STAT_UDP) && 1327 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1328 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1329 M_UDP_CSUM_IN_OK; 1330 } else { 1331 /* Check IP header checksum */ 1332 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1333 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1334 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1335 1336 /* Check TCP/UDP checksum */ 1337 if ((RL_TCPPKT(rxstat) && 1338 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1339 (RL_UDPPKT(rxstat) && 1340 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1341 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1342 M_UDP_CSUM_IN_OK; 1343 } 1344 #if NVLAN > 0 1345 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1346 m->m_pkthdr.ether_vtag = 1347 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1348 m->m_flags |= M_VLANTAG; 1349 } 1350 #endif 1351 1352 #if NBPFILTER > 0 1353 if (ifp->if_bpf) 1354 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1355 #endif 1356 ether_input_mbuf(ifp, m); 1357 } 1358 1359 sc->rl_ldata.rl_rx_considx = i; 1360 re_rx_list_fill(sc); 1361 1362 return (rx); 1363 } 1364 1365 int 1366 re_txeof(struct rl_softc *sc) 1367 { 1368 struct ifnet *ifp; 1369 struct rl_txq *txq; 1370 uint32_t txstat; 1371 int idx, descidx, tx = 0; 1372 1373 ifp = &sc->sc_arpcom.ac_if; 1374 1375 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) { 1376 txq = &sc->rl_ldata.rl_txq[idx]; 1377 1378 if (txq->txq_mbuf == NULL) { 1379 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx); 1380 break; 1381 } 1382 1383 descidx = txq->txq_descidx; 1384 RL_TXDESCSYNC(sc, descidx, 1385 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1386 txstat = 1387 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1388 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1389 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1390 if (txstat & RL_TDESC_CMD_OWN) 1391 break; 1392 1393 tx = 1; 1394 sc->rl_ldata.rl_tx_free += txq->txq_nsegs; 1395 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc)); 1396 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1397 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1398 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1399 m_freem(txq->txq_mbuf); 1400 txq->txq_mbuf = NULL; 1401 1402 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1403 ifp->if_collisions++; 1404 if (txstat & RL_TDESC_STAT_TXERRSUM) 1405 ifp->if_oerrors++; 1406 else 1407 ifp->if_opackets++; 1408 } 1409 1410 sc->rl_ldata.rl_txq_considx = idx; 1411 1412 ifp->if_flags &= ~IFF_OACTIVE; 1413 1414 /* 1415 * Some chips will ignore a second TX request issued while an 1416 * existing transmission is in progress. If the transmitter goes 1417 * idle but there are still packets waiting to be sent, we need 1418 * to restart the channel here to flush them out. This only 1419 * seems to be required with the PCIe devices. 1420 */ 1421 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) 1422 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1423 else 1424 ifp->if_timer = 0; 1425 1426 return (tx); 1427 } 1428 1429 void 1430 re_tick(void *xsc) 1431 { 1432 struct rl_softc *sc = xsc; 1433 struct mii_data *mii; 1434 struct ifnet *ifp; 1435 int s; 1436 1437 ifp = &sc->sc_arpcom.ac_if; 1438 mii = &sc->sc_mii; 1439 1440 s = splnet(); 1441 1442 mii_tick(mii); 1443 if (sc->rl_flags & RL_FLAG_LINK) { 1444 if (!(mii->mii_media_status & IFM_ACTIVE)) 1445 sc->rl_flags &= ~RL_FLAG_LINK; 1446 } else { 1447 if (mii->mii_media_status & IFM_ACTIVE && 1448 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1449 sc->rl_flags |= RL_FLAG_LINK; 1450 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1451 re_start(ifp); 1452 } 1453 } 1454 splx(s); 1455 1456 timeout_add_sec(&sc->timer_handle, 1); 1457 } 1458 1459 int 1460 re_intr(void *arg) 1461 { 1462 struct rl_softc *sc = arg; 1463 struct ifnet *ifp; 1464 u_int16_t status; 1465 int claimed = 0, rx, tx; 1466 1467 ifp = &sc->sc_arpcom.ac_if; 1468 1469 if (!(ifp->if_flags & IFF_RUNNING)) 1470 return (0); 1471 1472 /* Disable interrupts. */ 1473 CSR_WRITE_2(sc, RL_IMR, 0); 1474 1475 rx = tx = 0; 1476 status = CSR_READ_2(sc, RL_ISR); 1477 /* If the card has gone away the read returns 0xffff. */ 1478 if (status == 0xffff) 1479 return (0); 1480 if (status) 1481 CSR_WRITE_2(sc, RL_ISR, status); 1482 1483 if (status & RL_ISR_TIMEOUT_EXPIRED) 1484 claimed = 1; 1485 1486 if (status & RL_INTRS_CPLUS) { 1487 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1488 rx |= re_rxeof(sc); 1489 claimed = 1; 1490 } 1491 1492 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1493 tx |= re_txeof(sc); 1494 claimed = 1; 1495 } 1496 1497 if (status & RL_ISR_SYSTEM_ERR) { 1498 re_reset(sc); 1499 re_init(ifp); 1500 claimed = 1; 1501 } 1502 1503 if (status & RL_ISR_LINKCHG) { 1504 timeout_del(&sc->timer_handle); 1505 re_tick(sc); 1506 claimed = 1; 1507 } 1508 } 1509 1510 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1511 if ((sc->rl_flags & RL_FLAG_TIMERINTR)) { 1512 if ((tx | rx) == 0) { 1513 /* 1514 * Nothing needs to be processed, fallback 1515 * to use TX/RX interrupts. 1516 */ 1517 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1518 1519 /* 1520 * Recollect, mainly to avoid the possible 1521 * race introduced by changing interrupt 1522 * masks. 1523 */ 1524 re_rxeof(sc); 1525 tx = re_txeof(sc); 1526 } else 1527 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1528 } else if (tx | rx) { 1529 /* 1530 * Assume that using simulated interrupt moderation 1531 * (hardware timer based) could reduce the interrupt 1532 * rate. 1533 */ 1534 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1535 } 1536 } 1537 1538 if (tx && !IFQ_IS_EMPTY(&ifp->if_snd)) 1539 re_start(ifp); 1540 1541 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1542 1543 return (claimed); 1544 } 1545 1546 int 1547 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx) 1548 { 1549 bus_dmamap_t map; 1550 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1551 struct rl_desc *d; 1552 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1553 struct rl_txq *txq; 1554 1555 /* 1556 * Set up checksum offload. Note: checksum offload bits must 1557 * appear in all descriptors of a multi-descriptor transmit 1558 * attempt. This is according to testing done with an 8169 1559 * chip. This is a requirement. 1560 */ 1561 1562 /* 1563 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1564 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1565 * RL_TDESC_CMD_UDPCSUM does not take affect. 1566 */ 1567 1568 if ((m->m_pkthdr.csum_flags & 1569 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1570 if (sc->rl_flags & RL_FLAG_DESCV2) { 1571 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1572 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1573 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1574 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1575 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1576 } else { 1577 csum_flags |= RL_TDESC_CMD_IPCSUM; 1578 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1579 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1580 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1581 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1582 } 1583 } 1584 1585 txq = &sc->rl_ldata.rl_txq[*idx]; 1586 map = txq->txq_dmamap; 1587 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1588 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1589 if (error) { 1590 /* XXX try to defrag if EFBIG? */ 1591 printf("%s: can't map mbuf (error %d)\n", 1592 sc->sc_dev.dv_xname, error); 1593 return (error); 1594 } 1595 1596 nsegs = map->dm_nsegs; 1597 pad = 0; 1598 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 && 1599 m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN && 1600 (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) { 1601 pad = 1; 1602 nsegs++; 1603 } 1604 1605 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 1606 error = EFBIG; 1607 goto fail_unload; 1608 } 1609 1610 /* 1611 * Make sure that the caches are synchronized before we 1612 * ask the chip to start DMA for the packet data. 1613 */ 1614 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1615 BUS_DMASYNC_PREWRITE); 1616 1617 /* 1618 * Set up hardware VLAN tagging. Note: vlan tag info must 1619 * appear in all descriptors of a multi-descriptor 1620 * transmission attempt. 1621 */ 1622 #if NVLAN > 0 1623 if (m->m_flags & M_VLANTAG) 1624 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1625 RL_TDESC_VLANCTL_TAG; 1626 #endif 1627 1628 /* 1629 * Map the segment array into descriptors. Note that we set the 1630 * start-of-frame and end-of-frame markers for either TX or RX, but 1631 * they really only have meaning in the TX case. (In the RX case, 1632 * it's the chip that tells us where packets begin and end.) 1633 * We also keep track of the end of the ring and set the 1634 * end-of-ring bits as needed, and we set the ownership bits 1635 * in all except the very first descriptor. (The caller will 1636 * set this descriptor later when it start transmission or 1637 * reception.) 1638 */ 1639 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1640 lastidx = -1; 1641 for (seg = 0; seg < map->dm_nsegs; 1642 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1643 d = &sc->rl_ldata.rl_tx_list[curidx]; 1644 RL_TXDESCSYNC(sc, curidx, 1645 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1646 cmdstat = letoh32(d->rl_cmdstat); 1647 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1648 if (cmdstat & RL_TDESC_STAT_OWN) { 1649 printf("%s: tried to map busy TX descriptor\n", 1650 sc->sc_dev.dv_xname); 1651 for (; seg > 0; seg --) { 1652 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) % 1653 RL_TX_DESC_CNT(sc); 1654 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1655 RL_TXDESCSYNC(sc, uidx, 1656 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1657 } 1658 error = ENOBUFS; 1659 goto fail_unload; 1660 } 1661 1662 d->rl_vlanctl = htole32(vlanctl); 1663 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1664 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1665 if (seg == 0) 1666 cmdstat |= RL_TDESC_CMD_SOF; 1667 else 1668 cmdstat |= RL_TDESC_CMD_OWN; 1669 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1670 cmdstat |= RL_TDESC_CMD_EOR; 1671 if (seg == nsegs - 1) { 1672 cmdstat |= RL_TDESC_CMD_EOF; 1673 lastidx = curidx; 1674 } 1675 d->rl_cmdstat = htole32(cmdstat); 1676 RL_TXDESCSYNC(sc, curidx, 1677 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1678 } 1679 if (pad) { 1680 d = &sc->rl_ldata.rl_tx_list[curidx]; 1681 d->rl_vlanctl = htole32(vlanctl); 1682 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1683 cmdstat = csum_flags | 1684 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1685 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1686 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1687 cmdstat |= RL_TDESC_CMD_EOR; 1688 d->rl_cmdstat = htole32(cmdstat); 1689 RL_TXDESCSYNC(sc, curidx, 1690 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1691 lastidx = curidx; 1692 curidx = RL_NEXT_TX_DESC(sc, curidx); 1693 } 1694 KASSERT(lastidx != -1); 1695 1696 /* Transfer ownership of packet to the chip. */ 1697 1698 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1699 htole32(RL_TDESC_CMD_OWN); 1700 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1701 1702 /* update info of TX queue and descriptors */ 1703 txq->txq_mbuf = m; 1704 txq->txq_descidx = lastidx; 1705 txq->txq_nsegs = nsegs; 1706 1707 sc->rl_ldata.rl_tx_free -= nsegs; 1708 sc->rl_ldata.rl_tx_nextfree = curidx; 1709 1710 *idx = RL_NEXT_TXQ(sc, *idx); 1711 1712 return (0); 1713 1714 fail_unload: 1715 bus_dmamap_unload(sc->sc_dmat, map); 1716 1717 return (error); 1718 } 1719 1720 /* 1721 * Main transmit routine for C+ and gigE NICs. 1722 */ 1723 1724 void 1725 re_start(struct ifnet *ifp) 1726 { 1727 struct rl_softc *sc; 1728 int idx, queued = 0; 1729 1730 sc = ifp->if_softc; 1731 1732 if (ifp->if_flags & IFF_OACTIVE) 1733 return; 1734 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1735 return; 1736 1737 idx = sc->rl_ldata.rl_txq_prodidx; 1738 for (;;) { 1739 struct mbuf *m; 1740 int error; 1741 1742 IFQ_POLL(&ifp->if_snd, m); 1743 if (m == NULL) 1744 break; 1745 1746 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) { 1747 KASSERT(idx == sc->rl_ldata.rl_txq_considx); 1748 ifp->if_flags |= IFF_OACTIVE; 1749 break; 1750 } 1751 1752 error = re_encap(sc, m, &idx); 1753 if (error == EFBIG && 1754 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) { 1755 IFQ_DEQUEUE(&ifp->if_snd, m); 1756 m_freem(m); 1757 ifp->if_oerrors++; 1758 continue; 1759 } 1760 if (error) { 1761 ifp->if_flags |= IFF_OACTIVE; 1762 break; 1763 } 1764 1765 IFQ_DEQUEUE(&ifp->if_snd, m); 1766 queued++; 1767 1768 #if NBPFILTER > 0 1769 /* 1770 * If there's a BPF listener, bounce a copy of this frame 1771 * to him. 1772 */ 1773 if (ifp->if_bpf) 1774 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1775 #endif 1776 } 1777 1778 if (queued == 0) 1779 return; 1780 1781 sc->rl_ldata.rl_txq_prodidx = idx; 1782 1783 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1784 1785 /* 1786 * Set a timeout in case the chip goes out to lunch. 1787 */ 1788 ifp->if_timer = 5; 1789 } 1790 1791 int 1792 re_init(struct ifnet *ifp) 1793 { 1794 struct rl_softc *sc = ifp->if_softc; 1795 u_int16_t cfg; 1796 uint32_t rxcfg; 1797 int s; 1798 union { 1799 u_int32_t align_dummy; 1800 u_char eaddr[ETHER_ADDR_LEN]; 1801 } eaddr; 1802 1803 s = splnet(); 1804 1805 /* 1806 * Cancel pending I/O and free all RX/TX buffers. 1807 */ 1808 re_stop(ifp); 1809 1810 /* 1811 * Enable C+ RX and TX mode, as well as VLAN stripping and 1812 * RX checksum offload. We must configure the C+ register 1813 * before all others. 1814 */ 1815 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1816 RL_CPLUSCMD_RXCSUM_ENB; 1817 1818 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1819 cfg |= RL_CPLUSCMD_VLANSTRIP; 1820 1821 if (sc->rl_flags & RL_FLAG_MACSTAT) 1822 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1823 else 1824 cfg |= RL_CPLUSCMD_RXENB; 1825 1826 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1827 1828 /* 1829 * Init our MAC address. Even though the chipset 1830 * documentation doesn't mention it, we need to enter "Config 1831 * register write enable" mode to modify the ID registers. 1832 */ 1833 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1834 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1835 CSR_WRITE_4(sc, RL_IDR4, 1836 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1837 CSR_WRITE_4(sc, RL_IDR0, 1838 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1839 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1840 1841 /* 1842 * For C+ mode, initialize the RX descriptors and mbufs. 1843 */ 1844 re_rx_list_init(sc); 1845 re_tx_list_init(sc); 1846 1847 /* 1848 * Load the addresses of the RX and TX lists into the chip. 1849 */ 1850 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1851 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1852 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1853 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1854 1855 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1856 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1857 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1858 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1859 1860 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1861 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1862 ~0x00080000); 1863 1864 /* 1865 * Enable transmit and receive. 1866 */ 1867 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1868 1869 /* 1870 * Set the initial TX and RX configuration. 1871 */ 1872 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1873 1874 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1875 1876 rxcfg = RL_RXCFG_CONFIG; 1877 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1878 rxcfg |= RL_RXCFG_EARLYOFF; 1879 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1880 rxcfg |= RL_RXCFG_EARLYOFFV2; 1881 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1882 1883 /* Program promiscuous mode and multicast filters. */ 1884 re_iff(sc); 1885 1886 /* 1887 * Enable interrupts. 1888 */ 1889 re_setup_intr(sc, 1, sc->rl_imtype); 1890 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 1891 1892 /* Start RX/TX process. */ 1893 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1894 #ifdef notdef 1895 /* Enable receiver and transmitter. */ 1896 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1897 #endif 1898 1899 /* 1900 * For 8169 gigE NICs, set the max allowed RX packet 1901 * size so we can receive jumbo frames. 1902 */ 1903 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1904 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 1905 1906 mii_mediachg(&sc->sc_mii); 1907 1908 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 1909 1910 ifp->if_flags |= IFF_RUNNING; 1911 ifp->if_flags &= ~IFF_OACTIVE; 1912 1913 splx(s); 1914 1915 sc->rl_flags &= ~RL_FLAG_LINK; 1916 1917 timeout_add_sec(&sc->timer_handle, 1); 1918 1919 return (0); 1920 } 1921 1922 /* 1923 * Set media options. 1924 */ 1925 int 1926 re_ifmedia_upd(struct ifnet *ifp) 1927 { 1928 struct rl_softc *sc; 1929 1930 sc = ifp->if_softc; 1931 1932 return (mii_mediachg(&sc->sc_mii)); 1933 } 1934 1935 /* 1936 * Report current media status. 1937 */ 1938 void 1939 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1940 { 1941 struct rl_softc *sc; 1942 1943 sc = ifp->if_softc; 1944 1945 mii_pollstat(&sc->sc_mii); 1946 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1947 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1948 } 1949 1950 int 1951 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1952 { 1953 struct rl_softc *sc = ifp->if_softc; 1954 struct ifreq *ifr = (struct ifreq *) data; 1955 struct ifaddr *ifa = (struct ifaddr *)data; 1956 int s, error = 0; 1957 1958 s = splnet(); 1959 1960 switch(command) { 1961 case SIOCSIFADDR: 1962 ifp->if_flags |= IFF_UP; 1963 if (!(ifp->if_flags & IFF_RUNNING)) 1964 re_init(ifp); 1965 #ifdef INET 1966 if (ifa->ifa_addr->sa_family == AF_INET) 1967 arp_ifinit(&sc->sc_arpcom, ifa); 1968 #endif /* INET */ 1969 break; 1970 case SIOCSIFFLAGS: 1971 if (ifp->if_flags & IFF_UP) { 1972 if (ifp->if_flags & IFF_RUNNING) 1973 error = ENETRESET; 1974 else 1975 re_init(ifp); 1976 } else { 1977 if (ifp->if_flags & IFF_RUNNING) 1978 re_stop(ifp); 1979 } 1980 break; 1981 case SIOCGIFMEDIA: 1982 case SIOCSIFMEDIA: 1983 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1984 break; 1985 default: 1986 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1987 } 1988 1989 if (error == ENETRESET) { 1990 if (ifp->if_flags & IFF_RUNNING) 1991 re_iff(sc); 1992 error = 0; 1993 } 1994 1995 splx(s); 1996 return (error); 1997 } 1998 1999 void 2000 re_watchdog(struct ifnet *ifp) 2001 { 2002 struct rl_softc *sc; 2003 int s; 2004 2005 sc = ifp->if_softc; 2006 s = splnet(); 2007 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2008 ifp->if_oerrors++; 2009 2010 re_txeof(sc); 2011 re_rxeof(sc); 2012 2013 re_init(ifp); 2014 2015 splx(s); 2016 } 2017 2018 /* 2019 * Stop the adapter and free any mbufs allocated to the 2020 * RX and TX lists. 2021 */ 2022 void 2023 re_stop(struct ifnet *ifp) 2024 { 2025 struct rl_softc *sc; 2026 int i; 2027 2028 sc = ifp->if_softc; 2029 2030 ifp->if_timer = 0; 2031 sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR); 2032 2033 timeout_del(&sc->timer_handle); 2034 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2035 2036 mii_down(&sc->sc_mii); 2037 2038 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2039 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2040 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2041 2042 if (sc->rl_head != NULL) { 2043 m_freem(sc->rl_head); 2044 sc->rl_head = sc->rl_tail = NULL; 2045 } 2046 2047 /* Free the TX list buffers. */ 2048 for (i = 0; i < RL_TX_QLEN; i++) { 2049 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2050 bus_dmamap_unload(sc->sc_dmat, 2051 sc->rl_ldata.rl_txq[i].txq_dmamap); 2052 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2053 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2054 } 2055 } 2056 2057 /* Free the RX list buffers. */ 2058 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2059 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2060 bus_dmamap_unload(sc->sc_dmat, 2061 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2062 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2063 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2064 } 2065 } 2066 } 2067 2068 void 2069 re_setup_hw_im(struct rl_softc *sc) 2070 { 2071 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2072 2073 /* 2074 * Interrupt moderation 2075 * 2076 * 0xABCD 2077 * A - unknown (maybe TX related) 2078 * B - TX timer (unit: 25us) 2079 * C - unknown (maybe RX related) 2080 * D - RX timer (unit: 25us) 2081 * 2082 * 2083 * re(4)'s interrupt moderation is actually controlled by 2084 * two variables, like most other NICs (bge, bnx etc.) 2085 * o timer 2086 * o number of packets [P] 2087 * 2088 * The logic relationship between these two variables is 2089 * similar to other NICs too: 2090 * if (timer expire || packets > [P]) 2091 * Interrupt is delivered 2092 * 2093 * Currently we only know how to set 'timer', but not 2094 * 'number of packets', which should be ~30, as far as I 2095 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2096 */ 2097 CSR_WRITE_2(sc, RL_IM, 2098 RL_IM_RXTIME(sc->rl_rx_time) | 2099 RL_IM_TXTIME(sc->rl_tx_time) | 2100 RL_IM_MAGIC); 2101 } 2102 2103 void 2104 re_disable_hw_im(struct rl_softc *sc) 2105 { 2106 if (sc->rl_flags & RL_FLAG_HWIM) 2107 CSR_WRITE_2(sc, RL_IM, 0); 2108 } 2109 2110 void 2111 re_setup_sim_im(struct rl_softc *sc) 2112 { 2113 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2114 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2115 else { 2116 u_int32_t ticks; 2117 2118 /* 2119 * Datasheet says tick decreases at bus speed, 2120 * but it seems the clock runs a little bit 2121 * faster, so we do some compensation here. 2122 */ 2123 ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2124 CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks); 2125 } 2126 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2127 sc->rl_flags |= RL_FLAG_TIMERINTR; 2128 } 2129 2130 void 2131 re_disable_sim_im(struct rl_softc *sc) 2132 { 2133 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2134 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2135 else 2136 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2137 sc->rl_flags &= ~RL_FLAG_TIMERINTR; 2138 } 2139 2140 void 2141 re_config_imtype(struct rl_softc *sc, int imtype) 2142 { 2143 switch (imtype) { 2144 case RL_IMTYPE_HW: 2145 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2146 /* FALLTHROUGH */ 2147 case RL_IMTYPE_NONE: 2148 sc->rl_intrs = RL_INTRS_CPLUS; 2149 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2150 RL_ISR_RX_OVERRUN; 2151 sc->rl_tx_ack = RL_ISR_TX_OK; 2152 break; 2153 2154 case RL_IMTYPE_SIM: 2155 sc->rl_intrs = RL_INTRS_TIMER; 2156 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2157 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2158 break; 2159 2160 default: 2161 panic("%s: unknown imtype %d", 2162 sc->sc_dev.dv_xname, imtype); 2163 } 2164 } 2165 2166 void 2167 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2168 { 2169 re_config_imtype(sc, imtype); 2170 2171 if (enable_intrs) 2172 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2173 else 2174 CSR_WRITE_2(sc, RL_IMR, 0); 2175 2176 switch (imtype) { 2177 case RL_IMTYPE_NONE: 2178 re_disable_sim_im(sc); 2179 re_disable_hw_im(sc); 2180 break; 2181 2182 case RL_IMTYPE_HW: 2183 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2184 re_disable_sim_im(sc); 2185 re_setup_hw_im(sc); 2186 break; 2187 2188 case RL_IMTYPE_SIM: 2189 re_disable_hw_im(sc); 2190 re_setup_sim_im(sc); 2191 break; 2192 2193 default: 2194 panic("%s: unknown imtype %d", 2195 sc->sc_dev.dv_xname, imtype); 2196 } 2197 } 2198 2199 #ifndef SMALL_KERNEL 2200 int 2201 re_wol(struct ifnet *ifp, int enable) 2202 { 2203 struct rl_softc *sc = ifp->if_softc; 2204 int i; 2205 u_int8_t val; 2206 struct re_wolcfg { 2207 u_int8_t enable; 2208 u_int8_t reg; 2209 u_int8_t bit; 2210 } re_wolcfg[] = { 2211 /* Always disable all wake events expect magic packet. */ 2212 { 0, RL_CFG5, RL_CFG5_WOL_UCAST }, 2213 { 0, RL_CFG5, RL_CFG5_WOL_MCAST }, 2214 { 0, RL_CFG5, RL_CFG5_WOL_BCAST }, 2215 { 1, RL_CFG3, RL_CFG3_WOL_MAGIC }, 2216 { 0, RL_CFG3, RL_CFG3_WOL_LINK } 2217 }; 2218 2219 if (enable) { 2220 if ((CSR_READ_1(sc, RL_CFG1) & RL_CFG1_PME) == 0) { 2221 printf("%s: power management is disabled, " 2222 "cannot do WOL\n", sc->sc_dev.dv_xname); 2223 return (ENOTSUP); 2224 } 2225 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_AUXPWR) == 0) 2226 printf("%s: no auxiliary power, cannot do WOL from D3 " 2227 "(power-off) state\n", sc->sc_dev.dv_xname); 2228 } 2229 2230 /* Temporarily enable write to configuration registers. */ 2231 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2232 2233 for (i = 0; i < nitems(re_wolcfg); i++) { 2234 val = CSR_READ_1(sc, re_wolcfg[i].reg); 2235 if (enable && re_wolcfg[i].enable) 2236 val |= re_wolcfg[i].bit; 2237 else 2238 val &= ~re_wolcfg[i].bit; 2239 CSR_WRITE_1(sc, re_wolcfg[i].reg, val); 2240 } 2241 2242 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2243 2244 return (0); 2245 } 2246 #endif 2247