1 /* $OpenBSD: if_nge.c,v 1.98 2023/11/10 15:51:20 bluhm Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $ 35 */ 36 37 /* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60 /* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91 #include "bpfilter.h" 92 #include "vlan.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/sockio.h> 97 #include <sys/mbuf.h> 98 #include <sys/malloc.h> 99 #include <sys/kernel.h> 100 #include <sys/device.h> 101 #include <sys/socket.h> 102 103 #include <net/if.h> 104 #include <net/if_media.h> 105 106 #include <netinet/in.h> 107 #include <netinet/if_ether.h> 108 109 #if NBPFILTER > 0 110 #include <net/bpf.h> 111 #endif 112 113 #include <uvm/uvm_extern.h> /* for vtophys */ 114 #define VTOPHYS(v) vtophys((vaddr_t)(v)) 115 116 #include <dev/pci/pcireg.h> 117 #include <dev/pci/pcivar.h> 118 #include <dev/pci/pcidevs.h> 119 120 #include <dev/mii/mii.h> 121 #include <dev/mii/miivar.h> 122 123 #define NGE_USEIOSPACE 124 125 #include <dev/pci/if_ngereg.h> 126 127 int nge_probe(struct device *, void *, void *); 128 void nge_attach(struct device *, struct device *, void *); 129 130 int nge_newbuf(struct nge_softc *, struct nge_desc *, 131 struct mbuf *); 132 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *); 133 void nge_rxeof(struct nge_softc *); 134 void nge_txeof(struct nge_softc *); 135 int nge_intr(void *); 136 void nge_tick(void *); 137 void nge_start(struct ifnet *); 138 int nge_ioctl(struct ifnet *, u_long, caddr_t); 139 void nge_init(void *); 140 void nge_stop(struct nge_softc *); 141 void nge_watchdog(struct ifnet *); 142 int nge_ifmedia_mii_upd(struct ifnet *); 143 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *); 144 int nge_ifmedia_tbi_upd(struct ifnet *); 145 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *); 146 147 void nge_delay(struct nge_softc *); 148 void nge_eeprom_idle(struct nge_softc *); 149 void nge_eeprom_putbyte(struct nge_softc *, int); 150 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *); 151 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); 152 153 void nge_mii_sync(struct nge_softc *); 154 void nge_mii_send(struct nge_softc *, u_int32_t, int); 155 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 156 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 157 158 int nge_miibus_readreg(struct device *, int, int); 159 void nge_miibus_writereg(struct device *, int, int, int); 160 void nge_miibus_statchg(struct device *); 161 162 void nge_setmulti(struct nge_softc *); 163 void nge_reset(struct nge_softc *); 164 int nge_list_rx_init(struct nge_softc *); 165 int nge_list_tx_init(struct nge_softc *); 166 167 #ifdef NGE_USEIOSPACE 168 #define NGE_RES SYS_RES_IOPORT 169 #define NGE_RID NGE_PCI_LOIO 170 #else 171 #define NGE_RES SYS_RES_MEMORY 172 #define NGE_RID NGE_PCI_LOMEM 173 #endif 174 175 #ifdef NGE_DEBUG 176 #define DPRINTF(x) if (ngedebug) printf x 177 #define DPRINTFN(n,x) if (ngedebug >= (n)) printf x 178 int ngedebug = 0; 179 #else 180 #define DPRINTF(x) 181 #define DPRINTFN(n,x) 182 #endif 183 184 #define NGE_SETBIT(sc, reg, x) \ 185 CSR_WRITE_4(sc, reg, \ 186 CSR_READ_4(sc, reg) | (x)) 187 188 #define NGE_CLRBIT(sc, reg, x) \ 189 CSR_WRITE_4(sc, reg, \ 190 CSR_READ_4(sc, reg) & ~(x)) 191 192 #define SIO_SET(x) \ 193 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 194 195 #define SIO_CLR(x) \ 196 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 197 198 void 199 nge_delay(struct nge_softc *sc) 200 { 201 int idx; 202 203 for (idx = (300 / 33) + 1; idx > 0; idx--) 204 CSR_READ_4(sc, NGE_CSR); 205 } 206 207 void 208 nge_eeprom_idle(struct nge_softc *sc) 209 { 210 int i; 211 212 SIO_SET(NGE_MEAR_EE_CSEL); 213 nge_delay(sc); 214 SIO_SET(NGE_MEAR_EE_CLK); 215 nge_delay(sc); 216 217 for (i = 0; i < 25; i++) { 218 SIO_CLR(NGE_MEAR_EE_CLK); 219 nge_delay(sc); 220 SIO_SET(NGE_MEAR_EE_CLK); 221 nge_delay(sc); 222 } 223 224 SIO_CLR(NGE_MEAR_EE_CLK); 225 nge_delay(sc); 226 SIO_CLR(NGE_MEAR_EE_CSEL); 227 nge_delay(sc); 228 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 229 } 230 231 /* 232 * Send a read command and address to the EEPROM, check for ACK. 233 */ 234 void 235 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 236 { 237 int d, i; 238 239 d = addr | NGE_EECMD_READ; 240 241 /* 242 * Feed in each bit and strobe the clock. 243 */ 244 for (i = 0x400; i; i >>= 1) { 245 if (d & i) { 246 SIO_SET(NGE_MEAR_EE_DIN); 247 } else { 248 SIO_CLR(NGE_MEAR_EE_DIN); 249 } 250 nge_delay(sc); 251 SIO_SET(NGE_MEAR_EE_CLK); 252 nge_delay(sc); 253 SIO_CLR(NGE_MEAR_EE_CLK); 254 nge_delay(sc); 255 } 256 } 257 258 /* 259 * Read a word of data stored in the EEPROM at address 'addr.' 260 */ 261 void 262 nge_eeprom_getword(struct nge_softc *sc, int addr, u_int16_t *dest) 263 { 264 int i; 265 u_int16_t word = 0; 266 267 /* Force EEPROM to idle state. */ 268 nge_eeprom_idle(sc); 269 270 /* Enter EEPROM access mode. */ 271 nge_delay(sc); 272 SIO_CLR(NGE_MEAR_EE_CLK); 273 nge_delay(sc); 274 SIO_SET(NGE_MEAR_EE_CSEL); 275 nge_delay(sc); 276 277 /* 278 * Send address of word we want to read. 279 */ 280 nge_eeprom_putbyte(sc, addr); 281 282 /* 283 * Start reading bits from EEPROM. 284 */ 285 for (i = 0x8000; i; i >>= 1) { 286 SIO_SET(NGE_MEAR_EE_CLK); 287 nge_delay(sc); 288 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 289 word |= i; 290 nge_delay(sc); 291 SIO_CLR(NGE_MEAR_EE_CLK); 292 nge_delay(sc); 293 } 294 295 /* Turn off EEPROM access mode. */ 296 nge_eeprom_idle(sc); 297 298 *dest = word; 299 } 300 301 /* 302 * Read a sequence of words from the EEPROM. 303 */ 304 void 305 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt, int swap) 306 { 307 int i; 308 u_int16_t word = 0, *ptr; 309 310 for (i = 0; i < cnt; i++) { 311 nge_eeprom_getword(sc, off + i, &word); 312 ptr = (u_int16_t *)(dest + (i * 2)); 313 if (swap) 314 *ptr = ntohs(word); 315 else 316 *ptr = word; 317 } 318 } 319 320 /* 321 * Sync the PHYs by setting data bit and strobing the clock 32 times. 322 */ 323 void 324 nge_mii_sync(struct nge_softc *sc) 325 { 326 int i; 327 328 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 329 330 for (i = 0; i < 32; i++) { 331 SIO_SET(NGE_MEAR_MII_CLK); 332 DELAY(1); 333 SIO_CLR(NGE_MEAR_MII_CLK); 334 DELAY(1); 335 } 336 } 337 338 /* 339 * Clock a series of bits through the MII. 340 */ 341 void 342 nge_mii_send(struct nge_softc *sc, u_int32_t bits, int cnt) 343 { 344 int i; 345 346 SIO_CLR(NGE_MEAR_MII_CLK); 347 348 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 349 if (bits & i) { 350 SIO_SET(NGE_MEAR_MII_DATA); 351 } else { 352 SIO_CLR(NGE_MEAR_MII_DATA); 353 } 354 DELAY(1); 355 SIO_CLR(NGE_MEAR_MII_CLK); 356 DELAY(1); 357 SIO_SET(NGE_MEAR_MII_CLK); 358 } 359 } 360 361 /* 362 * Read an PHY register through the MII. 363 */ 364 int 365 nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame) 366 { 367 int i, ack, s; 368 369 s = splnet(); 370 371 /* 372 * Set up frame for RX. 373 */ 374 frame->mii_stdelim = NGE_MII_STARTDELIM; 375 frame->mii_opcode = NGE_MII_READOP; 376 frame->mii_turnaround = 0; 377 frame->mii_data = 0; 378 379 CSR_WRITE_4(sc, NGE_MEAR, 0); 380 381 /* 382 * Turn on data xmit. 383 */ 384 SIO_SET(NGE_MEAR_MII_DIR); 385 386 nge_mii_sync(sc); 387 388 /* 389 * Send command/address info. 390 */ 391 nge_mii_send(sc, frame->mii_stdelim, 2); 392 nge_mii_send(sc, frame->mii_opcode, 2); 393 nge_mii_send(sc, frame->mii_phyaddr, 5); 394 nge_mii_send(sc, frame->mii_regaddr, 5); 395 396 /* Idle bit */ 397 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 398 DELAY(1); 399 SIO_SET(NGE_MEAR_MII_CLK); 400 DELAY(1); 401 402 /* Turn off xmit. */ 403 SIO_CLR(NGE_MEAR_MII_DIR); 404 /* Check for ack */ 405 SIO_CLR(NGE_MEAR_MII_CLK); 406 DELAY(1); 407 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 408 SIO_SET(NGE_MEAR_MII_CLK); 409 DELAY(1); 410 411 /* 412 * Now try reading data bits. If the ack failed, we still 413 * need to clock through 16 cycles to keep the PHY(s) in sync. 414 */ 415 if (ack) { 416 for(i = 0; i < 16; i++) { 417 SIO_CLR(NGE_MEAR_MII_CLK); 418 DELAY(1); 419 SIO_SET(NGE_MEAR_MII_CLK); 420 DELAY(1); 421 } 422 goto fail; 423 } 424 425 for (i = 0x8000; i; i >>= 1) { 426 SIO_CLR(NGE_MEAR_MII_CLK); 427 DELAY(1); 428 if (!ack) { 429 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 430 frame->mii_data |= i; 431 DELAY(1); 432 } 433 SIO_SET(NGE_MEAR_MII_CLK); 434 DELAY(1); 435 } 436 437 fail: 438 439 SIO_CLR(NGE_MEAR_MII_CLK); 440 DELAY(1); 441 SIO_SET(NGE_MEAR_MII_CLK); 442 DELAY(1); 443 444 splx(s); 445 446 if (ack) 447 return(1); 448 return(0); 449 } 450 451 /* 452 * Write to a PHY register through the MII. 453 */ 454 int 455 nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame) 456 { 457 int s; 458 459 s = splnet(); 460 /* 461 * Set up frame for TX. 462 */ 463 464 frame->mii_stdelim = NGE_MII_STARTDELIM; 465 frame->mii_opcode = NGE_MII_WRITEOP; 466 frame->mii_turnaround = NGE_MII_TURNAROUND; 467 468 /* 469 * Turn on data output. 470 */ 471 SIO_SET(NGE_MEAR_MII_DIR); 472 473 nge_mii_sync(sc); 474 475 nge_mii_send(sc, frame->mii_stdelim, 2); 476 nge_mii_send(sc, frame->mii_opcode, 2); 477 nge_mii_send(sc, frame->mii_phyaddr, 5); 478 nge_mii_send(sc, frame->mii_regaddr, 5); 479 nge_mii_send(sc, frame->mii_turnaround, 2); 480 nge_mii_send(sc, frame->mii_data, 16); 481 482 /* Idle bit. */ 483 SIO_SET(NGE_MEAR_MII_CLK); 484 DELAY(1); 485 SIO_CLR(NGE_MEAR_MII_CLK); 486 DELAY(1); 487 488 /* 489 * Turn off xmit. 490 */ 491 SIO_CLR(NGE_MEAR_MII_DIR); 492 493 splx(s); 494 495 return(0); 496 } 497 498 int 499 nge_miibus_readreg(struct device *dev, int phy, int reg) 500 { 501 struct nge_softc *sc = (struct nge_softc *)dev; 502 struct nge_mii_frame frame; 503 504 DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname)); 505 506 bzero(&frame, sizeof(frame)); 507 508 frame.mii_phyaddr = phy; 509 frame.mii_regaddr = reg; 510 nge_mii_readreg(sc, &frame); 511 512 return(frame.mii_data); 513 } 514 515 void 516 nge_miibus_writereg(struct device *dev, int phy, int reg, int data) 517 { 518 struct nge_softc *sc = (struct nge_softc *)dev; 519 struct nge_mii_frame frame; 520 521 522 DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname)); 523 524 bzero(&frame, sizeof(frame)); 525 526 frame.mii_phyaddr = phy; 527 frame.mii_regaddr = reg; 528 frame.mii_data = data; 529 nge_mii_writereg(sc, &frame); 530 } 531 532 void 533 nge_miibus_statchg(struct device *dev) 534 { 535 struct nge_softc *sc = (struct nge_softc *)dev; 536 struct mii_data *mii = &sc->nge_mii; 537 u_int32_t txcfg, rxcfg; 538 539 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 540 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 541 542 DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n", 543 sc->sc_dv.dv_xname, txcfg, rxcfg)); 544 545 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 546 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 547 rxcfg |= (NGE_RXCFG_RX_FDX); 548 } else { 549 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 550 rxcfg &= ~(NGE_RXCFG_RX_FDX); 551 } 552 553 txcfg |= NGE_TXCFG_AUTOPAD; 554 555 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 556 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 557 558 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 559 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 560 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 561 else 562 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 563 } 564 565 void 566 nge_setmulti(struct nge_softc *sc) 567 { 568 struct arpcom *ac = &sc->arpcom; 569 struct ifnet *ifp = &ac->ac_if; 570 struct ether_multi *enm; 571 struct ether_multistep step; 572 u_int32_t h = 0, i, filtsave; 573 int bit, index; 574 575 if (ac->ac_multirangecnt > 0) 576 ifp->if_flags |= IFF_ALLMULTI; 577 578 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 579 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 580 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 581 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 582 return; 583 } 584 585 /* 586 * We have to explicitly enable the multicast hash table 587 * on the NatSemi chip if we want to use it, which we do. 588 * We also have to tell it that we don't want to use the 589 * hash table for matching unicast addresses. 590 */ 591 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 592 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 593 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); 594 595 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 596 597 /* first, zot all the existing hash bits */ 598 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 599 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 600 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 601 } 602 603 /* 604 * From the 11 bits returned by the crc routine, the top 7 605 * bits represent the 16-bit word in the mcast hash table 606 * that needs to be updated, and the lower 4 bits represent 607 * which bit within that byte needs to be set. 608 */ 609 ETHER_FIRST_MULTI(step, ac, enm); 610 while (enm != NULL) { 611 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) & 612 0x00000FFF; 613 index = (h >> 4) & 0x7F; 614 bit = h & 0xF; 615 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 616 NGE_FILTADDR_MCAST_LO + (index * 2)); 617 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 618 ETHER_NEXT_MULTI(step, enm); 619 } 620 621 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); 622 } 623 624 void 625 nge_reset(struct nge_softc *sc) 626 { 627 int i; 628 629 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 630 631 for (i = 0; i < NGE_TIMEOUT; i++) { 632 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 633 break; 634 } 635 636 if (i == NGE_TIMEOUT) 637 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 638 639 /* Wait a little while for the chip to get its brains in order. */ 640 DELAY(1000); 641 642 /* 643 * If this is a NetSemi chip, make sure to clear 644 * PME mode. 645 */ 646 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 647 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 648 } 649 650 /* 651 * Probe for an NatSemi chip. Check the PCI vendor and device 652 * IDs against our list and return a device name if we find a match. 653 */ 654 int 655 nge_probe(struct device *parent, void *match, void *aux) 656 { 657 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 658 659 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 660 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820) 661 return (1); 662 663 return (0); 664 } 665 666 /* 667 * Attach the interface. Allocate softc structures, do ifmedia 668 * setup and ethernet/BPF attach. 669 */ 670 void 671 nge_attach(struct device *parent, struct device *self, void *aux) 672 { 673 struct nge_softc *sc = (struct nge_softc *)self; 674 struct pci_attach_args *pa = aux; 675 pci_chipset_tag_t pc = pa->pa_pc; 676 pci_intr_handle_t ih; 677 const char *intrstr = NULL; 678 bus_size_t size; 679 bus_dma_segment_t seg; 680 bus_dmamap_t dmamap; 681 int rseg; 682 u_char eaddr[ETHER_ADDR_LEN]; 683 #ifndef NGE_USEIOSPACE 684 pcireg_t memtype; 685 #endif 686 struct ifnet *ifp; 687 caddr_t kva; 688 689 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 690 691 /* 692 * Map control/status registers. 693 */ 694 DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname)); 695 696 #ifdef NGE_USEIOSPACE 697 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 698 if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 699 &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) { 700 printf(": can't map i/o space\n"); 701 return; 702 } 703 #else 704 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 705 memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM); 706 if (pci_mapreg_map(pa, NGE_PCI_LOMEM, memtype, 0, &sc->nge_btag, 707 &sc->nge_bhandle, NULL, &size, 0)) { 708 printf(": can't map mem space\n"); 709 return; 710 } 711 #endif 712 713 /* Disable all interrupts */ 714 CSR_WRITE_4(sc, NGE_IER, 0); 715 716 DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname)); 717 if (pci_intr_map(pa, &ih)) { 718 printf(": couldn't map interrupt\n"); 719 goto fail_1; 720 } 721 722 DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname)); 723 intrstr = pci_intr_string(pc, ih); 724 DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname)); 725 sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc, 726 sc->sc_dv.dv_xname); 727 if (sc->nge_intrhand == NULL) { 728 printf(": couldn't establish interrupt"); 729 if (intrstr != NULL) 730 printf(" at %s", intrstr); 731 printf("\n"); 732 goto fail_1; 733 } 734 printf(": %s", intrstr); 735 736 /* Reset the adapter. */ 737 DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname)); 738 nge_reset(sc); 739 740 /* 741 * Get station address from the EEPROM. 742 */ 743 DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname)); 744 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 745 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 746 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); 747 748 /* 749 * A NatSemi chip was detected. Inform the world. 750 */ 751 printf(", address %s\n", ether_sprintf(eaddr)); 752 753 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 754 755 sc->sc_dmatag = pa->pa_dmat; 756 DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname)); 757 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data), 758 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | 759 BUS_DMA_ZERO)) { 760 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 761 goto fail_2; 762 } 763 DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname)); 764 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 765 sizeof(struct nge_list_data), &kva, 766 BUS_DMA_NOWAIT)) { 767 printf("%s: can't map dma buffers (%zd bytes)\n", 768 sc->sc_dv.dv_xname, sizeof(struct nge_list_data)); 769 goto fail_3; 770 } 771 DPRINTFN(5, ("%s: bus_dmamap_create\n", sc->sc_dv.dv_xname)); 772 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1, 773 sizeof(struct nge_list_data), 0, 774 BUS_DMA_NOWAIT, &dmamap)) { 775 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 776 goto fail_4; 777 } 778 DPRINTFN(5, ("%s: bus_dmamap_load\n", sc->sc_dv.dv_xname)); 779 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 780 sizeof(struct nge_list_data), NULL, 781 BUS_DMA_NOWAIT)) { 782 goto fail_5; 783 } 784 785 DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname)); 786 sc->nge_ldata = (struct nge_list_data *)kva; 787 788 ifp = &sc->arpcom.ac_if; 789 ifp->if_softc = sc; 790 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 791 ifp->if_ioctl = nge_ioctl; 792 ifp->if_start = nge_start; 793 ifp->if_watchdog = nge_watchdog; 794 ifp->if_hardmtu = NGE_JUMBO_MTU; 795 ifq_init_maxlen(&ifp->if_snd, NGE_TX_LIST_CNT - 1); 796 DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname)); 797 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 798 799 ifp->if_capabilities = IFCAP_VLAN_MTU; 800 801 #if NVLAN > 0 802 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 803 #endif 804 805 /* 806 * Do MII setup. 807 */ 808 DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname)); 809 if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { 810 DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname)); 811 sc->nge_tbi = 1; 812 813 ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd, 814 nge_ifmedia_tbi_sts); 815 816 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL), 817 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 818 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 819 0, NULL); 820 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 821 822 ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO); 823 824 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 825 | NGE_GPIO_GP4_OUT 826 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 827 | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB 828 | NGE_GPIO_GP5_OUTENB); 829 830 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 831 } else { 832 sc->nge_mii.mii_ifp = ifp; 833 sc->nge_mii.mii_readreg = nge_miibus_readreg; 834 sc->nge_mii.mii_writereg = nge_miibus_writereg; 835 sc->nge_mii.mii_statchg = nge_miibus_statchg; 836 837 ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd, 838 nge_ifmedia_mii_sts); 839 mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY, 840 MII_OFFSET_ANY, 0); 841 842 if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) { 843 844 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 845 ifmedia_add(&sc->nge_mii.mii_media, 846 IFM_ETHER|IFM_MANUAL, 0, NULL); 847 ifmedia_set(&sc->nge_mii.mii_media, 848 IFM_ETHER|IFM_MANUAL); 849 } 850 else 851 ifmedia_set(&sc->nge_mii.mii_media, 852 IFM_ETHER|IFM_AUTO); 853 } 854 855 /* 856 * Call MI attach routine. 857 */ 858 DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname)); 859 if_attach(ifp); 860 DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname)); 861 ether_ifattach(ifp); 862 DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname)); 863 timeout_set(&sc->nge_timeout, nge_tick, sc); 864 timeout_add_sec(&sc->nge_timeout, 1); 865 return; 866 867 fail_5: 868 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 869 870 fail_4: 871 bus_dmamem_unmap(sc->sc_dmatag, kva, 872 sizeof(struct nge_list_data)); 873 874 fail_3: 875 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 876 877 fail_2: 878 pci_intr_disestablish(pc, sc->nge_intrhand); 879 880 fail_1: 881 bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size); 882 } 883 884 /* 885 * Initialize the transmit descriptors. 886 */ 887 int 888 nge_list_tx_init(struct nge_softc *sc) 889 { 890 struct nge_list_data *ld; 891 struct nge_ring_data *cd; 892 int i; 893 894 cd = &sc->nge_cdata; 895 ld = sc->nge_ldata; 896 897 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 898 if (i == (NGE_TX_LIST_CNT - 1)) { 899 ld->nge_tx_list[i].nge_nextdesc = 900 &ld->nge_tx_list[0]; 901 ld->nge_tx_list[i].nge_next = 902 VTOPHYS(&ld->nge_tx_list[0]); 903 } else { 904 ld->nge_tx_list[i].nge_nextdesc = 905 &ld->nge_tx_list[i + 1]; 906 ld->nge_tx_list[i].nge_next = 907 VTOPHYS(&ld->nge_tx_list[i + 1]); 908 } 909 ld->nge_tx_list[i].nge_mbuf = NULL; 910 ld->nge_tx_list[i].nge_ptr = 0; 911 ld->nge_tx_list[i].nge_ctl = 0; 912 } 913 914 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; 915 916 return(0); 917 } 918 919 920 /* 921 * Initialize the RX descriptors and allocate mbufs for them. Note that 922 * we arrange the descriptors in a closed ring, so that the last descriptor 923 * points back to the first. 924 */ 925 int 926 nge_list_rx_init(struct nge_softc *sc) 927 { 928 struct nge_list_data *ld; 929 struct nge_ring_data *cd; 930 int i; 931 932 ld = sc->nge_ldata; 933 cd = &sc->nge_cdata; 934 935 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 936 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) 937 return(ENOBUFS); 938 if (i == (NGE_RX_LIST_CNT - 1)) { 939 ld->nge_rx_list[i].nge_nextdesc = 940 &ld->nge_rx_list[0]; 941 ld->nge_rx_list[i].nge_next = 942 VTOPHYS(&ld->nge_rx_list[0]); 943 } else { 944 ld->nge_rx_list[i].nge_nextdesc = 945 &ld->nge_rx_list[i + 1]; 946 ld->nge_rx_list[i].nge_next = 947 VTOPHYS(&ld->nge_rx_list[i + 1]); 948 } 949 } 950 951 cd->nge_rx_prod = 0; 952 953 return(0); 954 } 955 956 /* 957 * Initialize an RX descriptor and attach an MBUF cluster. 958 */ 959 int 960 nge_newbuf(struct nge_softc *sc, struct nge_desc *c, struct mbuf *m) 961 { 962 struct mbuf *m_new = NULL; 963 964 if (m == NULL) { 965 m_new = MCLGETL(NULL, M_DONTWAIT, NGE_MCLBYTES); 966 if (m_new == NULL) 967 return (ENOBUFS); 968 } else { 969 /* 970 * We're re-using a previously allocated mbuf; 971 * be sure to re-init pointers and lengths to 972 * default values. 973 */ 974 m_new = m; 975 m_new->m_data = m_new->m_ext.ext_buf; 976 } 977 978 m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES; 979 m_adj(m_new, sizeof(u_int64_t)); 980 981 c->nge_mbuf = m_new; 982 c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t)); 983 DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname, 984 c->nge_ptr)); 985 c->nge_ctl = m_new->m_len; 986 c->nge_extsts = 0; 987 988 return(0); 989 } 990 991 /* 992 * A frame has been uploaded: pass the resulting mbuf chain up to 993 * the higher level protocols. 994 */ 995 void 996 nge_rxeof(struct nge_softc *sc) 997 { 998 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 999 struct mbuf *m; 1000 struct ifnet *ifp; 1001 struct nge_desc *cur_rx; 1002 int i, total_len = 0; 1003 u_int32_t rxstat; 1004 1005 ifp = &sc->arpcom.ac_if; 1006 i = sc->nge_cdata.nge_rx_prod; 1007 1008 while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1009 struct mbuf *m0 = NULL; 1010 u_int32_t extsts; 1011 1012 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1013 rxstat = cur_rx->nge_rxstat; 1014 extsts = cur_rx->nge_extsts; 1015 m = cur_rx->nge_mbuf; 1016 cur_rx->nge_mbuf = NULL; 1017 total_len = NGE_RXBYTES(cur_rx); 1018 NGE_INC(i, NGE_RX_LIST_CNT); 1019 1020 /* 1021 * If an error occurs, update stats, clear the 1022 * status word and leave the mbuf cluster in place: 1023 * it should simply get re-used next time this descriptor 1024 * comes up in the ring. 1025 */ 1026 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1027 #if NVLAN > 0 1028 if ((rxstat & NGE_RXSTAT_RUNT) && 1029 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 1030 ETHER_VLAN_ENCAP_LEN)) { 1031 /* 1032 * Workaround a hardware bug. Accept runt 1033 * frames if its length is larger than or 1034 * equal to 56. 1035 */ 1036 } else { 1037 #endif 1038 ifp->if_ierrors++; 1039 nge_newbuf(sc, cur_rx, m); 1040 continue; 1041 #if NVLAN > 0 1042 } 1043 #endif 1044 } 1045 1046 /* 1047 * Ok. NatSemi really screwed up here. This is the 1048 * only gigE chip I know of with alignment constraints 1049 * on receive buffers. RX buffers must be 64-bit aligned. 1050 */ 1051 #ifndef __STRICT_ALIGNMENT 1052 /* 1053 * By popular demand, ignore the alignment problems 1054 * on the Intel x86 platform. The performance hit 1055 * incurred due to unaligned accesses is much smaller 1056 * than the hit produced by forcing buffer copies all 1057 * the time, especially with jumbo frames. We still 1058 * need to fix up the alignment everywhere else though. 1059 */ 1060 if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 1061 #endif 1062 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN); 1063 nge_newbuf(sc, cur_rx, m); 1064 if (m0 == NULL) { 1065 ifp->if_ierrors++; 1066 continue; 1067 } 1068 m_adj(m0, ETHER_ALIGN); 1069 m = m0; 1070 #ifndef __STRICT_ALIGNMENT 1071 } else { 1072 m->m_pkthdr.len = m->m_len = total_len; 1073 } 1074 #endif 1075 1076 #if NVLAN > 0 1077 if (extsts & NGE_RXEXTSTS_VLANPKT) { 1078 m->m_pkthdr.ether_vtag = 1079 ntohs(extsts & NGE_RXEXTSTS_VTCI); 1080 m->m_flags |= M_VLANTAG; 1081 } 1082 #endif 1083 1084 /* Do IP checksum checking. */ 1085 if (extsts & NGE_RXEXTSTS_IPPKT) { 1086 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1087 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1088 if ((extsts & NGE_RXEXTSTS_TCPPKT) && 1089 (!(extsts & NGE_RXEXTSTS_TCPCSUMERR))) 1090 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 1091 else if ((extsts & NGE_RXEXTSTS_UDPPKT) && 1092 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR))) 1093 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 1094 } 1095 1096 ml_enqueue(&ml, m); 1097 } 1098 1099 if_input(ifp, &ml); 1100 1101 sc->nge_cdata.nge_rx_prod = i; 1102 } 1103 1104 /* 1105 * A frame was downloaded to the chip. It's safe for us to clean up 1106 * the list buffers. 1107 */ 1108 1109 void 1110 nge_txeof(struct nge_softc *sc) 1111 { 1112 struct nge_desc *cur_tx; 1113 struct ifnet *ifp; 1114 u_int32_t idx; 1115 1116 ifp = &sc->arpcom.ac_if; 1117 1118 /* 1119 * Go through our tx list and free mbufs for those 1120 * frames that have been transmitted. 1121 */ 1122 idx = sc->nge_cdata.nge_tx_cons; 1123 while (idx != sc->nge_cdata.nge_tx_prod) { 1124 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1125 1126 if (NGE_OWNDESC(cur_tx)) 1127 break; 1128 1129 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1130 sc->nge_cdata.nge_tx_cnt--; 1131 NGE_INC(idx, NGE_TX_LIST_CNT); 1132 continue; 1133 } 1134 1135 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { 1136 ifp->if_oerrors++; 1137 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) 1138 ifp->if_collisions++; 1139 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) 1140 ifp->if_collisions++; 1141 } 1142 1143 ifp->if_collisions += 1144 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1145 1146 if (cur_tx->nge_mbuf != NULL) { 1147 m_freem(cur_tx->nge_mbuf); 1148 cur_tx->nge_mbuf = NULL; 1149 ifq_clr_oactive(&ifp->if_snd); 1150 } 1151 1152 sc->nge_cdata.nge_tx_cnt--; 1153 NGE_INC(idx, NGE_TX_LIST_CNT); 1154 } 1155 1156 sc->nge_cdata.nge_tx_cons = idx; 1157 1158 if (idx == sc->nge_cdata.nge_tx_prod) 1159 ifp->if_timer = 0; 1160 } 1161 1162 void 1163 nge_tick(void *xsc) 1164 { 1165 struct nge_softc *sc = xsc; 1166 struct mii_data *mii = &sc->nge_mii; 1167 struct ifnet *ifp = &sc->arpcom.ac_if; 1168 int s; 1169 1170 s = splnet(); 1171 1172 DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname, 1173 sc->nge_link)); 1174 1175 timeout_add_sec(&sc->nge_timeout, 1); 1176 if (sc->nge_link) { 1177 splx(s); 1178 return; 1179 } 1180 1181 if (sc->nge_tbi) { 1182 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1183 == IFM_AUTO) { 1184 u_int32_t bmsr, anlpar, txcfg, rxcfg; 1185 1186 bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 1187 DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n", 1188 sc->sc_dv.dv_xname, bmsr)); 1189 1190 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 1191 CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); 1192 1193 splx(s); 1194 return; 1195 } 1196 1197 anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 1198 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1199 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1200 1201 DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, " 1202 "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar, 1203 txcfg, rxcfg)); 1204 1205 if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) { 1206 txcfg |= (NGE_TXCFG_IGN_HBEAT| 1207 NGE_TXCFG_IGN_CARR); 1208 rxcfg |= NGE_RXCFG_RX_FDX; 1209 } else { 1210 txcfg &= ~(NGE_TXCFG_IGN_HBEAT| 1211 NGE_TXCFG_IGN_CARR); 1212 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1213 } 1214 txcfg |= NGE_TXCFG_AUTOPAD; 1215 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1216 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1217 } 1218 1219 DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname)); 1220 sc->nge_link++; 1221 if (!ifq_empty(&ifp->if_snd)) 1222 nge_start(ifp); 1223 } else { 1224 mii_tick(mii); 1225 if (mii->mii_media_status & IFM_ACTIVE && 1226 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1227 sc->nge_link++; 1228 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1229 DPRINTF(("%s: gigabit link up\n", 1230 sc->sc_dv.dv_xname)); 1231 if (!ifq_empty(&ifp->if_snd)) 1232 nge_start(ifp); 1233 } 1234 1235 } 1236 1237 splx(s); 1238 } 1239 1240 int 1241 nge_intr(void *arg) 1242 { 1243 struct nge_softc *sc; 1244 struct ifnet *ifp; 1245 u_int32_t status; 1246 int claimed = 0; 1247 1248 sc = arg; 1249 ifp = &sc->arpcom.ac_if; 1250 1251 /* Suppress unwanted interrupts */ 1252 if (!(ifp->if_flags & IFF_UP)) { 1253 nge_stop(sc); 1254 return (0); 1255 } 1256 1257 /* Disable interrupts. */ 1258 CSR_WRITE_4(sc, NGE_IER, 0); 1259 1260 /* Data LED on for TBI mode */ 1261 if(sc->nge_tbi) 1262 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1263 | NGE_GPIO_GP3_OUT); 1264 1265 for (;;) { 1266 /* Reading the ISR register clears all interrupts. */ 1267 status = CSR_READ_4(sc, NGE_ISR); 1268 1269 if ((status & NGE_INTRS) == 0) 1270 break; 1271 1272 claimed = 1; 1273 1274 if ((status & NGE_ISR_TX_DESC_OK) || 1275 (status & NGE_ISR_TX_ERR) || 1276 (status & NGE_ISR_TX_OK) || 1277 (status & NGE_ISR_TX_IDLE)) 1278 nge_txeof(sc); 1279 1280 if ((status & NGE_ISR_RX_DESC_OK) || 1281 (status & NGE_ISR_RX_ERR) || 1282 (status & NGE_ISR_RX_OFLOW) || 1283 (status & NGE_ISR_RX_FIFO_OFLOW) || 1284 (status & NGE_ISR_RX_IDLE) || 1285 (status & NGE_ISR_RX_OK)) 1286 nge_rxeof(sc); 1287 1288 if ((status & NGE_ISR_RX_IDLE)) 1289 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1290 1291 if (status & NGE_ISR_SYSERR) { 1292 nge_reset(sc); 1293 ifp->if_flags &= ~IFF_RUNNING; 1294 nge_init(sc); 1295 } 1296 1297 #if 0 1298 /* 1299 * XXX: nge_tick() is not ready to be called this way 1300 * it screws up the aneg timeout because mii_tick() is 1301 * only to be called once per second. 1302 */ 1303 if (status & NGE_IMR_PHY_INTR) { 1304 sc->nge_link = 0; 1305 nge_tick(sc); 1306 } 1307 #endif 1308 } 1309 1310 /* Re-enable interrupts. */ 1311 CSR_WRITE_4(sc, NGE_IER, 1); 1312 1313 if (!ifq_empty(&ifp->if_snd)) 1314 nge_start(ifp); 1315 1316 /* Data LED off for TBI mode */ 1317 if(sc->nge_tbi) 1318 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1319 & ~NGE_GPIO_GP3_OUT); 1320 1321 return claimed; 1322 } 1323 1324 /* 1325 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1326 * pointers to the fragment pointers. 1327 */ 1328 int 1329 nge_encap(struct nge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1330 { 1331 struct nge_desc *f = NULL; 1332 struct mbuf *m; 1333 int frag, cur, cnt = 0; 1334 1335 /* 1336 * Start packing the mbufs in this chain into 1337 * the fragment pointers. Stop when we run out 1338 * of fragments or hit the end of the mbuf chain. 1339 */ 1340 m = m_head; 1341 cur = frag = *txidx; 1342 1343 for (m = m_head; m != NULL; m = m->m_next) { 1344 if (m->m_len != 0) { 1345 if ((NGE_TX_LIST_CNT - 1346 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1347 return(ENOBUFS); 1348 f = &sc->nge_ldata->nge_tx_list[frag]; 1349 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1350 f->nge_ptr = VTOPHYS(mtod(m, vaddr_t)); 1351 DPRINTFN(7,("%s: f->nge_ptr=%#x\n", 1352 sc->sc_dv.dv_xname, f->nge_ptr)); 1353 if (cnt != 0) 1354 f->nge_ctl |= NGE_CMDSTS_OWN; 1355 cur = frag; 1356 NGE_INC(frag, NGE_TX_LIST_CNT); 1357 cnt++; 1358 } 1359 } 1360 1361 if (m != NULL) 1362 return(ENOBUFS); 1363 1364 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1365 1366 #if NVLAN > 0 1367 if (m_head->m_flags & M_VLANTAG) { 1368 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1369 (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag)); 1370 } 1371 #endif 1372 1373 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1374 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1375 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1376 sc->nge_cdata.nge_tx_cnt += cnt; 1377 *txidx = frag; 1378 1379 return(0); 1380 } 1381 1382 /* 1383 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1384 * to the mbuf data regions directly in the transmit lists. We also save a 1385 * copy of the pointers since the transmit list fragment pointers are 1386 * physical addresses. 1387 */ 1388 1389 void 1390 nge_start(struct ifnet *ifp) 1391 { 1392 struct nge_softc *sc; 1393 struct mbuf *m_head = NULL; 1394 u_int32_t idx; 1395 int pkts = 0; 1396 1397 sc = ifp->if_softc; 1398 1399 if (!sc->nge_link) 1400 return; 1401 1402 idx = sc->nge_cdata.nge_tx_prod; 1403 1404 if (ifq_is_oactive(&ifp->if_snd)) 1405 return; 1406 1407 while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1408 m_head = ifq_deq_begin(&ifp->if_snd); 1409 if (m_head == NULL) 1410 break; 1411 1412 if (nge_encap(sc, m_head, &idx)) { 1413 ifq_deq_rollback(&ifp->if_snd, m_head); 1414 ifq_set_oactive(&ifp->if_snd); 1415 break; 1416 } 1417 1418 /* now we are committed to transmit the packet */ 1419 ifq_deq_commit(&ifp->if_snd, m_head); 1420 pkts++; 1421 1422 #if NBPFILTER > 0 1423 /* 1424 * If there's a BPF listener, bounce a copy of this frame 1425 * to him. 1426 */ 1427 if (ifp->if_bpf) 1428 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1429 #endif 1430 } 1431 if (pkts == 0) 1432 return; 1433 1434 /* Transmit */ 1435 sc->nge_cdata.nge_tx_prod = idx; 1436 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 1437 1438 /* 1439 * Set a timeout in case the chip goes out to lunch. 1440 */ 1441 ifp->if_timer = 5; 1442 } 1443 1444 void 1445 nge_init(void *xsc) 1446 { 1447 struct nge_softc *sc = xsc; 1448 struct ifnet *ifp = &sc->arpcom.ac_if; 1449 struct mii_data *mii; 1450 u_int32_t txcfg, rxcfg; 1451 uint64_t media; 1452 int s; 1453 1454 if (ifp->if_flags & IFF_RUNNING) 1455 return; 1456 1457 s = splnet(); 1458 1459 /* 1460 * Cancel pending I/O and free all RX/TX buffers. 1461 */ 1462 nge_stop(sc); 1463 1464 mii = sc->nge_tbi ? NULL: &sc->nge_mii; 1465 1466 /* Set MAC address */ 1467 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 1468 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1469 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1470 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 1471 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1472 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1473 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 1474 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1475 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1476 1477 /* Init circular RX list. */ 1478 if (nge_list_rx_init(sc) == ENOBUFS) { 1479 printf("%s: initialization failed: no " 1480 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1481 nge_stop(sc); 1482 splx(s); 1483 return; 1484 } 1485 1486 /* 1487 * Init tx descriptors. 1488 */ 1489 nge_list_tx_init(sc); 1490 1491 /* 1492 * For the NatSemi chip, we have to explicitly enable the 1493 * reception of ARP frames, as well as turn on the 'perfect 1494 * match' filter where we store the station address, otherwise 1495 * we won't receive unicasts meant for this host. 1496 */ 1497 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1498 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1499 1500 /* If we want promiscuous mode, set the allframes bit. */ 1501 if (ifp->if_flags & IFF_PROMISC) 1502 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1503 else 1504 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1505 1506 /* 1507 * Set the capture broadcast bit to capture broadcast frames. 1508 */ 1509 if (ifp->if_flags & IFF_BROADCAST) 1510 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1511 else 1512 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1513 1514 /* 1515 * Load the multicast filter. 1516 */ 1517 nge_setmulti(sc); 1518 1519 /* Turn the receive filter on */ 1520 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1521 1522 /* 1523 * Load the address of the RX and TX lists. 1524 */ 1525 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1526 VTOPHYS(&sc->nge_ldata->nge_rx_list[0])); 1527 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1528 VTOPHYS(&sc->nge_ldata->nge_tx_list[0])); 1529 1530 /* Set RX configuration */ 1531 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 1532 1533 /* 1534 * Enable hardware checksum validation for all IPv4 1535 * packets, do not reject packets with bad checksums. 1536 */ 1537 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 1538 1539 /* 1540 * If VLAN support is enabled, tell the chip to detect 1541 * and strip VLAN tag info from received frames. The tag 1542 * will be provided in the extsts field in the RX descriptors. 1543 */ 1544 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1545 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1546 NGE_VIPRXCTL_TAG_DETECT_ENB | NGE_VIPRXCTL_TAG_STRIP_ENB); 1547 1548 /* Set TX configuration */ 1549 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1550 1551 /* 1552 * If VLAN support is enabled, tell the chip to insert 1553 * VLAN tags on a per-packet basis as dictated by the 1554 * code in the frame encapsulation routine. 1555 */ 1556 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1557 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1558 1559 /* Set full/half duplex mode. */ 1560 if (sc->nge_tbi) 1561 media = sc->nge_ifmedia.ifm_cur->ifm_media; 1562 else 1563 media = mii->mii_media_active; 1564 1565 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1566 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1567 1568 DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n", 1569 sc->sc_dv.dv_xname, txcfg, rxcfg)); 1570 1571 if ((media & IFM_GMASK) == IFM_FDX) { 1572 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1573 rxcfg |= (NGE_RXCFG_RX_FDX); 1574 } else { 1575 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1576 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1577 } 1578 1579 txcfg |= NGE_TXCFG_AUTOPAD; 1580 1581 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1582 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1583 1584 nge_tick(sc); 1585 1586 /* 1587 * Enable the delivery of PHY interrupts based on 1588 * link/speed/duplex status changes and enable return 1589 * of extended status information in the DMA descriptors, 1590 * required for checksum offloading. 1591 */ 1592 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK| 1593 NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); 1594 1595 DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname, 1596 CSR_READ_4(sc, NGE_CFG))); 1597 1598 /* 1599 * Configure interrupt holdoff (moderation). We can 1600 * have the chip delay interrupt delivery for a certain 1601 * period. Units are in 100us, and the max setting 1602 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1603 */ 1604 CSR_WRITE_4(sc, NGE_IHR, 0x01); 1605 1606 /* 1607 * Enable interrupts. 1608 */ 1609 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1610 CSR_WRITE_4(sc, NGE_IER, 1); 1611 1612 /* Enable receiver and transmitter. */ 1613 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1614 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1615 1616 if (sc->nge_tbi) 1617 nge_ifmedia_tbi_upd(ifp); 1618 else 1619 nge_ifmedia_mii_upd(ifp); 1620 1621 ifp->if_flags |= IFF_RUNNING; 1622 ifq_clr_oactive(&ifp->if_snd); 1623 1624 splx(s); 1625 } 1626 1627 /* 1628 * Set mii media options. 1629 */ 1630 int 1631 nge_ifmedia_mii_upd(struct ifnet *ifp) 1632 { 1633 struct nge_softc *sc = ifp->if_softc; 1634 struct mii_data *mii = &sc->nge_mii; 1635 1636 DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname)); 1637 1638 sc->nge_link = 0; 1639 1640 if (mii->mii_instance) { 1641 struct mii_softc *miisc; 1642 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1643 mii_phy_reset(miisc); 1644 } 1645 mii_mediachg(mii); 1646 1647 return(0); 1648 } 1649 1650 /* 1651 * Report current mii media status. 1652 */ 1653 void 1654 nge_ifmedia_mii_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1655 { 1656 struct nge_softc *sc = ifp->if_softc; 1657 struct mii_data *mii = &sc->nge_mii; 1658 1659 DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname)); 1660 1661 mii_pollstat(mii); 1662 ifmr->ifm_active = mii->mii_media_active; 1663 ifmr->ifm_status = mii->mii_media_status; 1664 } 1665 1666 /* 1667 * Set mii media options. 1668 */ 1669 int 1670 nge_ifmedia_tbi_upd(struct ifnet *ifp) 1671 { 1672 struct nge_softc *sc = ifp->if_softc; 1673 1674 DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname)); 1675 1676 sc->nge_link = 0; 1677 1678 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1679 == IFM_AUTO) { 1680 u_int32_t anar, bmcr; 1681 anar = CSR_READ_4(sc, NGE_TBI_ANAR); 1682 anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX); 1683 CSR_WRITE_4(sc, NGE_TBI_ANAR, anar); 1684 1685 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 1686 bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG); 1687 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1688 1689 bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG); 1690 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1691 } else { 1692 u_int32_t txcfg, rxcfg; 1693 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1694 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1695 1696 if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 1697 == IFM_FDX) { 1698 txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR; 1699 rxcfg |= NGE_RXCFG_RX_FDX; 1700 } else { 1701 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1702 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1703 } 1704 1705 txcfg |= NGE_TXCFG_AUTOPAD; 1706 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1707 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1708 } 1709 1710 NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT); 1711 1712 return(0); 1713 } 1714 1715 /* 1716 * Report current tbi media status. 1717 */ 1718 void 1719 nge_ifmedia_tbi_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1720 { 1721 struct nge_softc *sc = ifp->if_softc; 1722 u_int32_t bmcr; 1723 1724 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 1725 1726 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 1727 u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 1728 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n", 1729 sc->sc_dv.dv_xname, bmsr, bmcr)); 1730 1731 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 1732 ifmr->ifm_active = IFM_ETHER|IFM_NONE; 1733 ifmr->ifm_status = IFM_AVALID; 1734 return; 1735 } 1736 } else { 1737 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n", 1738 sc->sc_dv.dv_xname, bmcr)); 1739 } 1740 1741 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 1742 ifmr->ifm_active = IFM_ETHER|IFM_1000_SX; 1743 1744 if (bmcr & NGE_TBIBMCR_LOOPBACK) 1745 ifmr->ifm_active |= IFM_LOOP; 1746 1747 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 1748 u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 1749 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n", 1750 sc->sc_dv.dv_xname, anlpar)); 1751 1752 ifmr->ifm_active |= IFM_AUTO; 1753 if (anlpar & NGE_TBIANLPAR_FDX) { 1754 ifmr->ifm_active |= IFM_FDX; 1755 } else if (anlpar & NGE_TBIANLPAR_HDX) { 1756 ifmr->ifm_active |= IFM_HDX; 1757 } else 1758 ifmr->ifm_active |= IFM_FDX; 1759 1760 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) 1761 ifmr->ifm_active |= IFM_FDX; 1762 else 1763 ifmr->ifm_active |= IFM_HDX; 1764 1765 } 1766 1767 int 1768 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1769 { 1770 struct nge_softc *sc = ifp->if_softc; 1771 struct ifreq *ifr = (struct ifreq *) data; 1772 struct mii_data *mii; 1773 int s, error = 0; 1774 1775 s = splnet(); 1776 1777 switch(command) { 1778 case SIOCSIFADDR: 1779 ifp->if_flags |= IFF_UP; 1780 nge_init(sc); 1781 break; 1782 1783 case SIOCSIFFLAGS: 1784 if (ifp->if_flags & IFF_UP) { 1785 if (ifp->if_flags & IFF_RUNNING && 1786 ifp->if_flags & IFF_PROMISC && 1787 !(sc->nge_if_flags & IFF_PROMISC)) { 1788 NGE_SETBIT(sc, NGE_RXFILT_CTL, 1789 NGE_RXFILTCTL_ALLPHYS| 1790 NGE_RXFILTCTL_ALLMULTI); 1791 } else if (ifp->if_flags & IFF_RUNNING && 1792 !(ifp->if_flags & IFF_PROMISC) && 1793 sc->nge_if_flags & IFF_PROMISC) { 1794 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1795 NGE_RXFILTCTL_ALLPHYS); 1796 if (!(ifp->if_flags & IFF_ALLMULTI)) 1797 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1798 NGE_RXFILTCTL_ALLMULTI); 1799 } else { 1800 ifp->if_flags &= ~IFF_RUNNING; 1801 nge_init(sc); 1802 } 1803 } else { 1804 if (ifp->if_flags & IFF_RUNNING) 1805 nge_stop(sc); 1806 } 1807 sc->nge_if_flags = ifp->if_flags; 1808 error = 0; 1809 break; 1810 1811 case SIOCGIFMEDIA: 1812 case SIOCSIFMEDIA: 1813 if (sc->nge_tbi) { 1814 error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, 1815 command); 1816 } else { 1817 mii = &sc->nge_mii; 1818 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1819 command); 1820 } 1821 break; 1822 1823 default: 1824 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1825 } 1826 1827 if (error == ENETRESET) { 1828 if (ifp->if_flags & IFF_RUNNING) 1829 nge_setmulti(sc); 1830 error = 0; 1831 } 1832 1833 splx(s); 1834 return(error); 1835 } 1836 1837 void 1838 nge_watchdog(struct ifnet *ifp) 1839 { 1840 struct nge_softc *sc; 1841 1842 sc = ifp->if_softc; 1843 1844 ifp->if_oerrors++; 1845 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1846 1847 nge_stop(sc); 1848 nge_reset(sc); 1849 ifp->if_flags &= ~IFF_RUNNING; 1850 nge_init(sc); 1851 1852 if (!ifq_empty(&ifp->if_snd)) 1853 nge_start(ifp); 1854 } 1855 1856 /* 1857 * Stop the adapter and free any mbufs allocated to the 1858 * RX and TX lists. 1859 */ 1860 void 1861 nge_stop(struct nge_softc *sc) 1862 { 1863 int i; 1864 struct ifnet *ifp; 1865 struct mii_data *mii; 1866 1867 ifp = &sc->arpcom.ac_if; 1868 ifp->if_timer = 0; 1869 if (sc->nge_tbi) { 1870 mii = NULL; 1871 } else { 1872 mii = &sc->nge_mii; 1873 } 1874 1875 timeout_del(&sc->nge_timeout); 1876 1877 ifp->if_flags &= ~IFF_RUNNING; 1878 ifq_clr_oactive(&ifp->if_snd); 1879 1880 CSR_WRITE_4(sc, NGE_IER, 0); 1881 CSR_WRITE_4(sc, NGE_IMR, 0); 1882 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1883 DELAY(1000); 1884 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 1885 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); 1886 1887 if (!sc->nge_tbi) 1888 mii_down(mii); 1889 1890 sc->nge_link = 0; 1891 1892 /* 1893 * Free data in the RX lists. 1894 */ 1895 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1896 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 1897 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 1898 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; 1899 } 1900 } 1901 bzero(&sc->nge_ldata->nge_rx_list, 1902 sizeof(sc->nge_ldata->nge_rx_list)); 1903 1904 /* 1905 * Free the TX list buffers. 1906 */ 1907 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 1908 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 1909 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 1910 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; 1911 } 1912 } 1913 1914 bzero(&sc->nge_ldata->nge_tx_list, 1915 sizeof(sc->nge_ldata->nge_tx_list)); 1916 } 1917 1918 const struct cfattach nge_ca = { 1919 sizeof(struct nge_softc), nge_probe, nge_attach 1920 }; 1921 1922 struct cfdriver nge_cd = { 1923 NULL, "nge", DV_IFNET 1924 }; 1925