1 /* $OpenBSD: fxp.c,v 1.100 2009/10/15 17:54:54 deraadt Exp $ */ 2 /* $NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1995, David Greenman 6 * All rights reserved. 7 * 8 * Modifications to support NetBSD: 9 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp 34 */ 35 36 /* 37 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 38 */ 39 40 #include "bpfilter.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/syslog.h> 49 #include <sys/timeout.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #endif 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <sys/ioctl.h> 68 #include <sys/errno.h> 69 #include <sys/device.h> 70 71 #include <netinet/if_ether.h> 72 73 #include <machine/cpu.h> 74 #include <machine/bus.h> 75 #include <machine/intr.h> 76 77 #include <dev/mii/miivar.h> 78 79 #include <dev/ic/fxpreg.h> 80 #include <dev/ic/fxpvar.h> 81 82 /* 83 * NOTE! On the Alpha, we have an alignment constraint. The 84 * card DMAs the packet immediately following the RFA. However, 85 * the first thing in the packet is a 14-byte Ethernet header. 86 * This means that the packet is misaligned. To compensate, 87 * we actually offset the RFA 2 bytes into the cluster. This 88 * aligns the packet after the Ethernet header at a 32-bit 89 * boundary. HOWEVER! This means that the RFA is misaligned! 90 */ 91 #define RFA_ALIGNMENT_FUDGE (2 + sizeof(bus_dmamap_t *)) 92 93 /* 94 * Inline function to copy a 16-bit aligned 32-bit quantity. 95 */ 96 static __inline void fxp_lwcopy(volatile u_int32_t *, 97 volatile u_int32_t *); 98 99 static __inline void 100 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst) 101 { 102 volatile u_int16_t *a = (u_int16_t *)src; 103 volatile u_int16_t *b = (u_int16_t *)dst; 104 105 b[0] = a[0]; 106 b[1] = a[1]; 107 } 108 109 /* 110 * Template for default configuration parameters. 111 * See struct fxp_cb_config for the bit definitions. 112 * Note, cb_command is filled in later. 113 */ 114 static u_char fxp_cb_config_template[] = { 115 0x0, 0x0, /* cb_status */ 116 0x0, 0x0, /* cb_command */ 117 0xff, 0xff, 0xff, 0xff, /* link_addr */ 118 0x16, /* 0 Byte count. */ 119 0x08, /* 1 Fifo limit */ 120 0x00, /* 2 Adaptive ifs */ 121 0x00, /* 3 ctrl0 */ 122 0x00, /* 4 rx_dma_bytecount */ 123 0x80, /* 5 tx_dma_bytecount */ 124 0xb2, /* 6 ctrl 1*/ 125 0x03, /* 7 ctrl 2*/ 126 0x01, /* 8 mediatype */ 127 0x00, /* 9 void2 */ 128 0x26, /* 10 ctrl3 */ 129 0x00, /* 11 linear priority */ 130 0x60, /* 12 interfrm_spacing */ 131 0x00, /* 13 void31 */ 132 0xf2, /* 14 void32 */ 133 0x48, /* 15 promiscuous */ 134 0x00, /* 16 void41 */ 135 0x40, /* 17 void42 */ 136 0xf3, /* 18 stripping */ 137 0x00, /* 19 fdx_pin */ 138 0x3f, /* 20 multi_ia */ 139 0x05 /* 21 mc_all */ 140 }; 141 142 void fxp_eeprom_shiftin(struct fxp_softc *, int, int); 143 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t); 144 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int); 145 int fxp_mediachange(struct ifnet *); 146 void fxp_mediastatus(struct ifnet *, struct ifmediareq *); 147 void fxp_scb_wait(struct fxp_softc *); 148 void fxp_start(struct ifnet *); 149 int fxp_ioctl(struct ifnet *, u_long, caddr_t); 150 void fxp_init(void *); 151 void fxp_load_ucode(struct fxp_softc *); 152 void fxp_stop(struct fxp_softc *, int, int); 153 void fxp_watchdog(struct ifnet *); 154 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *); 155 int fxp_mdi_read(struct device *, int, int); 156 void fxp_mdi_write(struct device *, int, int, int); 157 void fxp_autosize_eeprom(struct fxp_softc *); 158 void fxp_statchg(struct device *); 159 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *, 160 int, int); 161 void fxp_stats_update(void *); 162 void fxp_mc_setup(struct fxp_softc *, int); 163 void fxp_scb_cmd(struct fxp_softc *, u_int16_t); 164 165 /* 166 * Set initial transmit threshold at 64 (512 bytes). This is 167 * increased by 64 (512 bytes) at a time, to maximum of 192 168 * (1536 bytes), if an underrun occurs. 169 */ 170 static int tx_threshold = 64; 171 172 /* 173 * Interrupts coalescing code params 174 */ 175 int fxp_int_delay = FXP_INT_DELAY; 176 int fxp_bundle_max = FXP_BUNDLE_MAX; 177 int fxp_min_size_mask = FXP_MIN_SIZE_MASK; 178 179 /* 180 * TxCB list index mask. This is used to do list wrap-around. 181 */ 182 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 183 184 /* 185 * Maximum number of seconds that the receiver can be idle before we 186 * assume it's dead and attempt to reset it by reprogramming the 187 * multicast filter. This is part of a work-around for a bug in the 188 * NIC. See fxp_stats_update(). 189 */ 190 #define FXP_MAX_RX_IDLE 15 191 192 /* 193 * Wait for the previous command to be accepted (but not necessarily 194 * completed). 195 */ 196 void 197 fxp_scb_wait(struct fxp_softc *sc) 198 { 199 int i = FXP_CMD_TMO; 200 201 while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i) 202 DELAY(2); 203 if (i == 0) 204 printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname); 205 } 206 207 void 208 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 209 { 210 u_int16_t reg; 211 int x; 212 213 /* 214 * Shift in data. 215 */ 216 for (x = 1 << (length - 1); x; x >>= 1) { 217 if (data & x) 218 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 219 else 220 reg = FXP_EEPROM_EECS; 221 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 222 DELAY(1); 223 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 224 DELAY(1); 225 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 226 DELAY(1); 227 } 228 } 229 230 void 231 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) 232 { 233 int i; 234 235 /* 236 * Erase/write enable. 237 */ 238 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 239 fxp_eeprom_shiftin(sc, 0x4, 3); 240 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 241 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 242 DELAY(1); 243 /* 244 * Shift in write opcode, address, data. 245 */ 246 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 247 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 248 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 249 fxp_eeprom_shiftin(sc, data, 16); 250 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 251 DELAY(1); 252 /* 253 * Wait for EEPROM to finish up. 254 */ 255 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 256 DELAY(1); 257 for (i = 0; i < 1000; i++) { 258 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 259 break; 260 DELAY(50); 261 } 262 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 263 DELAY(1); 264 /* 265 * Erase/write disable. 266 */ 267 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 268 fxp_eeprom_shiftin(sc, 0x4, 3); 269 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 270 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 271 DELAY(1); 272 } 273 274 void 275 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 276 { 277 int i; 278 279 for (i = 0; i < words; i++) 280 fxp_eeprom_putword(sc, offset + i, data[i]); 281 } 282 283 /************************************************************* 284 * Operating system-specific autoconfiguration glue 285 *************************************************************/ 286 287 void fxp_power(int, void *); 288 289 struct cfdriver fxp_cd = { 290 NULL, "fxp", DV_IFNET 291 }; 292 293 /* 294 * Power handler routine. Called when the system is transitioning 295 * into/out of power save modes. The main purpose of this routine 296 * is to shut off receiver DMA so it doesn't clobber kernel memory 297 * at the wrong time. 298 */ 299 void 300 fxp_power(int why, void *arg) 301 { 302 struct fxp_softc *sc = arg; 303 struct ifnet *ifp; 304 int s; 305 306 s = splnet(); 307 if (why != PWR_RESUME) 308 fxp_stop(sc, 0, 0); 309 else { 310 ifp = &sc->sc_arpcom.ac_if; 311 if (ifp->if_flags & IFF_UP) 312 fxp_init(sc); 313 } 314 splx(s); 315 } 316 317 /************************************************************* 318 * End of operating system-specific autoconfiguration glue 319 *************************************************************/ 320 321 /* 322 * Do generic parts of attach. 323 */ 324 int 325 fxp_attach(struct fxp_softc *sc, const char *intrstr) 326 { 327 struct ifnet *ifp; 328 struct mbuf *m; 329 bus_dmamap_t rxmap; 330 u_int16_t data; 331 u_int8_t enaddr[6]; 332 int i, err; 333 334 /* 335 * Reset to a stable state. 336 */ 337 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 338 DELAY(10); 339 340 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl), 341 PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg, BUS_DMA_NOWAIT)) 342 goto fail; 343 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg, 344 sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl, 345 BUS_DMA_NOWAIT)) { 346 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 347 goto fail; 348 } 349 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl), 350 1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT, 351 &sc->tx_cb_map)) { 352 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 353 sizeof(struct fxp_ctrl)); 354 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 355 goto fail; 356 } 357 if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl, 358 sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) { 359 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 360 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 361 sizeof(struct fxp_ctrl)); 362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 363 goto fail; 364 } 365 366 for (i = 0; i < FXP_NTXCB; i++) { 367 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 368 FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) { 369 printf("%s: unable to create tx dma map %d, error %d\n", 370 sc->sc_dev.dv_xname, i, err); 371 goto fail; 372 } 373 sc->txs[i].tx_mbuf = NULL; 374 sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i; 375 sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]); 376 sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK]; 377 } 378 bzero(sc->sc_ctrl, sizeof(struct fxp_ctrl)); 379 380 /* 381 * Pre-allocate some receive buffers. 382 */ 383 sc->sc_rxfree = 0; 384 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 385 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 386 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 387 printf("%s: unable to create rx dma map %d, error %d\n", 388 sc->sc_dev.dv_xname, i, err); 389 goto fail; 390 } 391 sc->rx_bufs++; 392 } 393 for (i = 0; i < FXP_NRFABUFS_MIN; i++) 394 if (fxp_add_rfabuf(sc, NULL) != 0) 395 goto fail; 396 397 /* 398 * Find out how large of an SEEPROM we have. 399 */ 400 fxp_autosize_eeprom(sc); 401 402 /* 403 * Get info about the primary PHY 404 */ 405 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 406 sc->phy_primary_addr = data & 0xff; 407 sc->phy_primary_device = (data >> 8) & 0x3f; 408 sc->phy_10Mbps_only = data >> 15; 409 410 /* 411 * Only 82558 and newer cards can do this. 412 */ 413 if (sc->sc_revision >= FXP_REV_82558_A4) { 414 sc->sc_int_delay = fxp_int_delay; 415 sc->sc_bundle_max = fxp_bundle_max; 416 sc->sc_min_size_mask = fxp_min_size_mask; 417 } 418 /* 419 * Read MAC address. 420 */ 421 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 422 423 ifp = &sc->sc_arpcom.ac_if; 424 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 425 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 426 ifp->if_softc = sc; 427 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 428 ifp->if_ioctl = fxp_ioctl; 429 ifp->if_start = fxp_start; 430 ifp->if_watchdog = fxp_watchdog; 431 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 432 IFQ_SET_READY(&ifp->if_snd); 433 434 ifp->if_capabilities = IFCAP_VLAN_MTU; 435 436 printf(": %s, address %s\n", intrstr, 437 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 438 439 if (sc->sc_flags & FXPF_DISABLE_STANDBY) { 440 fxp_read_eeprom(sc, &data, 10, 1); 441 if (data & 0x02) { /* STB enable */ 442 u_int16_t cksum; 443 444 printf("%s: Disabling dynamic standby mode in EEPROM", 445 sc->sc_dev.dv_xname); 446 data &= ~0x02; 447 fxp_write_eeprom(sc, &data, 10, 1); 448 printf(", New ID 0x%x", data); 449 cksum = 0; 450 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 451 fxp_read_eeprom(sc, &data, i, 1); 452 cksum += data; 453 } 454 i = (1 << sc->eeprom_size) - 1; 455 cksum = 0xBABA - cksum; 456 fxp_read_eeprom(sc, &data, i, 1); 457 fxp_write_eeprom(sc, &cksum, i, 1); 458 printf(", cksum @ 0x%x: 0x%x -> 0x%x\n", 459 i, data, cksum); 460 } 461 } 462 463 /* Receiver lock-up workaround detection. */ 464 fxp_read_eeprom(sc, &data, 3, 1); 465 if ((data & 0x03) != 0x03) 466 sc->sc_flags |= FXPF_RECV_WORKAROUND; 467 468 /* 469 * Initialize our media structures and probe the MII. 470 */ 471 sc->sc_mii.mii_ifp = ifp; 472 sc->sc_mii.mii_readreg = fxp_mdi_read; 473 sc->sc_mii.mii_writereg = fxp_mdi_write; 474 sc->sc_mii.mii_statchg = fxp_statchg; 475 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange, 476 fxp_mediastatus); 477 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 478 MII_OFFSET_ANY, MIIF_NOISOLATE); 479 /* If no phy found, just use auto mode */ 480 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 481 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 482 0, NULL); 483 printf("%s: no phy found, using manual mode\n", 484 sc->sc_dev.dv_xname); 485 } 486 487 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0)) 488 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 489 else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) 490 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 491 else 492 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 493 494 /* 495 * Attach the interface. 496 */ 497 if_attach(ifp); 498 ether_ifattach(ifp); 499 500 /* 501 * Add power hook, so that DMA is disabled prior to reboot. Not 502 * doing so could allow DMA to corrupt kernel memory during the 503 * reboot before the driver initializes. 504 */ 505 sc->sc_powerhook = powerhook_establish(fxp_power, sc); 506 507 /* 508 * Initialize timeout for statistics update. 509 */ 510 timeout_set(&sc->stats_update_to, fxp_stats_update, sc); 511 512 return (0); 513 514 fail: 515 printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname); 516 if (sc->tx_cb_map != NULL) { 517 bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map); 518 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 519 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 520 sizeof(struct fxp_cb_tx) * FXP_NTXCB); 521 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 522 } 523 m = sc->rfa_headm; 524 while (m != NULL) { 525 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 526 bus_dmamap_unload(sc->sc_dmat, rxmap); 527 FXP_RXMAP_PUT(sc, rxmap); 528 m = m_free(m); 529 } 530 return (ENOMEM); 531 } 532 533 /* 534 * From NetBSD: 535 * 536 * Figure out EEPROM size. 537 * 538 * 559's can have either 64-word or 256-word EEPROMs, the 558 539 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 540 * talks about the existence of 16 to 256 word EEPROMs. 541 * 542 * The only known sizes are 64 and 256, where the 256 version is used 543 * by CardBus cards to store CIS information. 544 * 545 * The address is shifted in msb-to-lsb, and after the last 546 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 547 * after which follows the actual data. We try to detect this zero, by 548 * probing the data-out bit in the EEPROM control register just after 549 * having shifted in a bit. If the bit is zero, we assume we've 550 * shifted enough address bits. The data-out should be tri-state, 551 * before this, which should translate to a logical one. 552 * 553 * Other ways to do this would be to try to read a register with known 554 * contents with a varying number of address bits, but no such 555 * register seem to be available. The high bits of register 10 are 01 556 * on the 558 and 559, but apparently not on the 557. 557 * 558 * The Linux driver computes a checksum on the EEPROM data, but the 559 * value of this checksum is not very well documented. 560 */ 561 void 562 fxp_autosize_eeprom(struct fxp_softc *sc) 563 { 564 u_int16_t reg; 565 int x; 566 567 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 568 /* 569 * Shift in read opcode. 570 */ 571 for (x = 3; x > 0; x--) { 572 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 573 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 574 } else { 575 reg = FXP_EEPROM_EECS; 576 } 577 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 578 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 579 reg | FXP_EEPROM_EESK); 580 DELAY(4); 581 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 582 DELAY(4); 583 } 584 /* 585 * Shift in address. 586 * Wait for the dummy zero following a correct address shift. 587 */ 588 for (x = 1; x <= 8; x++) { 589 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 590 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 591 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 592 DELAY(4); 593 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 594 break; 595 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 596 DELAY(4); 597 } 598 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 599 DELAY(4); 600 sc->eeprom_size = x; 601 } 602 603 /* 604 * Read from the serial EEPROM. Basically, you manually shift in 605 * the read opcode (one bit at a time) and then shift in the address, 606 * and then you shift out the data (all of this one bit at a time). 607 * The word size is 16 bits, so you have to provide the address for 608 * every 16 bits of data. 609 */ 610 void 611 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, 612 int words) 613 { 614 u_int16_t reg; 615 int i, x; 616 617 for (i = 0; i < words; i++) { 618 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 619 /* 620 * Shift in read opcode. 621 */ 622 for (x = 3; x > 0; x--) { 623 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 624 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 625 } else { 626 reg = FXP_EEPROM_EECS; 627 } 628 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 629 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 630 reg | FXP_EEPROM_EESK); 631 DELAY(4); 632 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 633 DELAY(4); 634 } 635 /* 636 * Shift in address. 637 */ 638 for (x = sc->eeprom_size; x > 0; x--) { 639 if ((i + offset) & (1 << (x - 1))) { 640 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 641 } else { 642 reg = FXP_EEPROM_EECS; 643 } 644 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 645 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 646 reg | FXP_EEPROM_EESK); 647 DELAY(4); 648 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 649 DELAY(4); 650 } 651 reg = FXP_EEPROM_EECS; 652 data[i] = 0; 653 /* 654 * Shift out data. 655 */ 656 for (x = 16; x > 0; x--) { 657 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 658 reg | FXP_EEPROM_EESK); 659 DELAY(4); 660 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 661 FXP_EEPROM_EEDO) 662 data[i] |= (1 << (x - 1)); 663 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 664 DELAY(4); 665 } 666 data[i] = letoh16(data[i]); 667 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 668 DELAY(4); 669 } 670 } 671 672 /* 673 * Start packet transmission on the interface. 674 */ 675 void 676 fxp_start(struct ifnet *ifp) 677 { 678 struct fxp_softc *sc = ifp->if_softc; 679 struct fxp_txsw *txs = sc->sc_cbt_prod; 680 struct fxp_cb_tx *txc; 681 struct mbuf *m0, *m = NULL; 682 int cnt = sc->sc_cbt_cnt, seg; 683 684 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 685 return; 686 687 while (1) { 688 if (cnt >= (FXP_NTXCB - 2)) { 689 ifp->if_flags |= IFF_OACTIVE; 690 break; 691 } 692 693 txs = txs->tx_next; 694 695 IFQ_POLL(&ifp->if_snd, m0); 696 if (m0 == NULL) 697 break; 698 699 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 700 m0, BUS_DMA_NOWAIT) != 0) { 701 MGETHDR(m, M_DONTWAIT, MT_DATA); 702 if (m == NULL) 703 break; 704 if (m0->m_pkthdr.len > MHLEN) { 705 MCLGET(m, M_DONTWAIT); 706 if (!(m->m_flags & M_EXT)) { 707 m_freem(m); 708 break; 709 } 710 } 711 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 712 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 713 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 714 m, BUS_DMA_NOWAIT) != 0) { 715 m_freem(m); 716 break; 717 } 718 } 719 720 IFQ_DEQUEUE(&ifp->if_snd, m0); 721 if (m != NULL) { 722 m_freem(m0); 723 m0 = m; 724 m = NULL; 725 } 726 727 txs->tx_mbuf = m0; 728 729 #if NBPFILTER > 0 730 if (ifp->if_bpf) 731 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 732 #endif 733 734 FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE); 735 736 txc = txs->tx_cb; 737 txc->tbd_number = txs->tx_map->dm_nsegs; 738 txc->cb_status = 0; 739 txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF); 740 txc->tx_threshold = tx_threshold; 741 for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) { 742 txc->tbd[seg].tb_addr = 743 htole32(txs->tx_map->dm_segs[seg].ds_addr); 744 txc->tbd[seg].tb_size = 745 htole32(txs->tx_map->dm_segs[seg].ds_len); 746 } 747 FXP_TXCB_SYNC(sc, txs, 748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 749 750 ++cnt; 751 sc->sc_cbt_prod = txs; 752 } 753 754 if (cnt != sc->sc_cbt_cnt) { 755 /* We enqueued at least one. */ 756 ifp->if_timer = 5; 757 758 txs = sc->sc_cbt_prod; 759 txs = txs->tx_next; 760 sc->sc_cbt_prod = txs; 761 txs->tx_cb->cb_command = 762 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 763 FXP_TXCB_SYNC(sc, txs, 764 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 765 766 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 767 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 768 sc->sc_cbt_prev->tx_cb->cb_command &= 769 htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I)); 770 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 771 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 772 773 sc->sc_cbt_prev = txs; 774 775 fxp_scb_wait(sc); 776 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 777 778 sc->sc_cbt_cnt = cnt + 1; 779 } 780 } 781 782 /* 783 * Process interface interrupts. 784 */ 785 int 786 fxp_intr(void *arg) 787 { 788 struct fxp_softc *sc = arg; 789 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 790 u_int16_t statack; 791 bus_dmamap_t rxmap; 792 int claimed = 0; 793 int rnr = 0; 794 795 /* 796 * If the interface isn't running, don't try to 797 * service the interrupt.. just ack it and bail. 798 */ 799 if ((ifp->if_flags & IFF_RUNNING) == 0) { 800 statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS); 801 if (statack) { 802 claimed = 1; 803 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 804 statack & FXP_SCB_STATACK_MASK); 805 } 806 return claimed; 807 } 808 809 while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) & 810 FXP_SCB_STATACK_MASK) { 811 claimed = 1; 812 rnr = (statack & (FXP_SCB_STATACK_RNR | 813 FXP_SCB_STATACK_SWI)) ? 1 : 0; 814 /* 815 * First ACK all the interrupts in this pass. 816 */ 817 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 818 statack & FXP_SCB_STATACK_MASK); 819 820 /* 821 * Free any finished transmit mbuf chains. 822 */ 823 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { 824 int txcnt = sc->sc_cbt_cnt; 825 struct fxp_txsw *txs = sc->sc_cbt_cons; 826 827 FXP_TXCB_SYNC(sc, txs, 828 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 829 830 while ((txcnt > 0) && 831 ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) || 832 (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) { 833 if (txs->tx_mbuf != NULL) { 834 FXP_MBUF_SYNC(sc, txs->tx_map, 835 BUS_DMASYNC_POSTWRITE); 836 bus_dmamap_unload(sc->sc_dmat, 837 txs->tx_map); 838 m_freem(txs->tx_mbuf); 839 txs->tx_mbuf = NULL; 840 } 841 --txcnt; 842 txs = txs->tx_next; 843 FXP_TXCB_SYNC(sc, txs, 844 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 845 } 846 sc->sc_cbt_cnt = txcnt; 847 /* Did we transmit any packets? */ 848 if (sc->sc_cbt_cons != txs) 849 ifp->if_flags &= ~IFF_OACTIVE; 850 ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0; 851 sc->sc_cbt_cons = txs; 852 853 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 854 /* 855 * Try to start more packets transmitting. 856 */ 857 fxp_start(ifp); 858 } 859 } 860 /* 861 * Process receiver interrupts. If a Receive Unit 862 * not ready (RNR) condition exists, get whatever 863 * packets we can and re-start the receiver. 864 */ 865 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | 866 FXP_SCB_STATACK_SWI)) { 867 struct mbuf *m; 868 u_int8_t *rfap; 869 rcvloop: 870 m = sc->rfa_headm; 871 rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 872 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 873 bus_dmamap_sync(sc->sc_dmat, rxmap, 874 0, MCLBYTES, BUS_DMASYNC_POSTREAD | 875 BUS_DMASYNC_POSTWRITE); 876 877 if (*(u_int16_t *)(rfap + 878 offsetof(struct fxp_rfa, rfa_status)) & 879 htole16(FXP_RFA_STATUS_C)) { 880 if (*(u_int16_t *)(rfap + 881 offsetof(struct fxp_rfa, rfa_status)) & 882 htole16(FXP_RFA_STATUS_RNR)) 883 rnr = 1; 884 885 /* 886 * Remove first packet from the chain. 887 */ 888 sc->rfa_headm = m->m_next; 889 m->m_next = NULL; 890 891 /* 892 * Add a new buffer to the receive chain. 893 * If this fails, the old buffer is recycled 894 * instead. 895 */ 896 if (fxp_add_rfabuf(sc, m) == 0) { 897 u_int16_t total_len; 898 899 total_len = htole16(*(u_int16_t *)(rfap + 900 offsetof(struct fxp_rfa, 901 actual_size))) & 902 (MCLBYTES - 1); 903 if (total_len < 904 sizeof(struct ether_header)) { 905 m_freem(m); 906 goto rcvloop; 907 } 908 if (*(u_int16_t *)(rfap + 909 offsetof(struct fxp_rfa, 910 rfa_status)) & 911 htole16(FXP_RFA_STATUS_CRC)) { 912 m_freem(m); 913 goto rcvloop; 914 } 915 916 m->m_pkthdr.rcvif = ifp; 917 m->m_pkthdr.len = m->m_len = 918 total_len; 919 #if NBPFILTER > 0 920 if (ifp->if_bpf) 921 bpf_mtap(ifp->if_bpf, m, 922 BPF_DIRECTION_IN); 923 #endif /* NBPFILTER > 0 */ 924 ether_input_mbuf(ifp, m); 925 } 926 goto rcvloop; 927 } 928 } 929 if (rnr) { 930 rxmap = *((bus_dmamap_t *) 931 sc->rfa_headm->m_ext.ext_buf); 932 fxp_scb_wait(sc); 933 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 934 rxmap->dm_segs[0].ds_addr + 935 RFA_ALIGNMENT_FUDGE); 936 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 937 938 } 939 } 940 return (claimed); 941 } 942 943 /* 944 * Update packet in/out/collision statistics. The i82557 doesn't 945 * allow you to access these counters without doing a fairly 946 * expensive DMA to get _all_ of the statistics it maintains, so 947 * we do this operation here only once per second. The statistics 948 * counters in the kernel are updated from the previous dump-stats 949 * DMA and then a new dump-stats DMA is started. The on-chip 950 * counters are zeroed when the DMA completes. If we can't start 951 * the DMA immediately, we don't wait - we just prepare to read 952 * them again next time. 953 */ 954 void 955 fxp_stats_update(void *arg) 956 { 957 struct fxp_softc *sc = arg; 958 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 959 struct fxp_stats *sp = &sc->sc_ctrl->stats; 960 int s; 961 962 FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 963 ifp->if_opackets += letoh32(sp->tx_good); 964 ifp->if_collisions += letoh32(sp->tx_total_collisions); 965 if (sp->rx_good) { 966 ifp->if_ipackets += letoh32(sp->rx_good); 967 sc->rx_idle_secs = 0; 968 } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) 969 sc->rx_idle_secs++; 970 ifp->if_ierrors += 971 letoh32(sp->rx_crc_errors) + 972 letoh32(sp->rx_alignment_errors) + 973 letoh32(sp->rx_rnr_errors) + 974 letoh32(sp->rx_overrun_errors); 975 /* 976 * If any transmit underruns occurred, bump up the transmit 977 * threshold by another 512 bytes (64 * 8). 978 */ 979 if (sp->tx_underruns) { 980 ifp->if_oerrors += letoh32(sp->tx_underruns); 981 if (tx_threshold < 192) 982 tx_threshold += 64; 983 } 984 s = splnet(); 985 /* 986 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, 987 * then assume the receiver has locked up and attempt to clear 988 * the condition by reprogramming the multicast filter. This is 989 * a work-around for a bug in the 82557 where the receiver locks 990 * up if it gets certain types of garbage in the synchronization 991 * bits prior to the packet header. This bug is supposed to only 992 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 993 * mode as well (perhaps due to a 10/100 speed transition). 994 */ 995 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 996 sc->rx_idle_secs = 0; 997 fxp_init(sc); 998 splx(s); 999 return; 1000 } 1001 /* 1002 * If there is no pending command, start another stats 1003 * dump. Otherwise punt for now. 1004 */ 1005 FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1006 if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) { 1007 /* 1008 * Start another stats dump. 1009 */ 1010 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 1011 } else { 1012 /* 1013 * A previous command is still waiting to be accepted. 1014 * Just zero our copy of the stats and wait for the 1015 * next timer event to update them. 1016 */ 1017 sp->tx_good = 0; 1018 sp->tx_underruns = 0; 1019 sp->tx_total_collisions = 0; 1020 1021 sp->rx_good = 0; 1022 sp->rx_crc_errors = 0; 1023 sp->rx_alignment_errors = 0; 1024 sp->rx_rnr_errors = 0; 1025 sp->rx_overrun_errors = 0; 1026 } 1027 1028 /* Tick the MII clock. */ 1029 mii_tick(&sc->sc_mii); 1030 1031 splx(s); 1032 /* 1033 * Schedule another timeout one second from now. 1034 */ 1035 timeout_add_sec(&sc->stats_update_to, 1); 1036 } 1037 1038 void 1039 fxp_detach(struct fxp_softc *sc) 1040 { 1041 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1042 1043 /* Get rid of our timeouts and mbufs */ 1044 fxp_stop(sc, 1, 1); 1045 1046 /* Detach any PHYs we might have. */ 1047 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1048 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1049 1050 /* Delete any remaining media. */ 1051 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 1052 1053 ether_ifdetach(ifp); 1054 if_detach(ifp); 1055 1056 if (sc->sc_powerhook != NULL) 1057 powerhook_disestablish(sc->sc_powerhook); 1058 } 1059 1060 /* 1061 * Stop the interface. Cancels the statistics updater and resets 1062 * the interface. 1063 */ 1064 void 1065 fxp_stop(struct fxp_softc *sc, int drain, int softonly) 1066 { 1067 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1068 int i; 1069 1070 /* 1071 * Cancel stats updater. 1072 */ 1073 timeout_del(&sc->stats_update_to); 1074 1075 /* 1076 * Turn down interface (done early to avoid bad interactions 1077 * between panics, and the watchdog timer) 1078 */ 1079 ifp->if_timer = 0; 1080 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1081 1082 if (!softonly) 1083 mii_down(&sc->sc_mii); 1084 1085 /* 1086 * Issue software reset. 1087 */ 1088 if (!softonly) { 1089 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1090 DELAY(10); 1091 } 1092 1093 /* 1094 * Release any xmit buffers. 1095 */ 1096 for (i = 0; i < FXP_NTXCB; i++) { 1097 if (sc->txs[i].tx_mbuf != NULL) { 1098 bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map); 1099 m_freem(sc->txs[i].tx_mbuf); 1100 sc->txs[i].tx_mbuf = NULL; 1101 } 1102 } 1103 sc->sc_cbt_cnt = 0; 1104 1105 if (drain) { 1106 bus_dmamap_t rxmap; 1107 struct mbuf *m; 1108 1109 /* 1110 * Free all the receive buffers then reallocate/reinitialize 1111 */ 1112 m = sc->rfa_headm; 1113 while (m != NULL) { 1114 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 1115 bus_dmamap_unload(sc->sc_dmat, rxmap); 1116 FXP_RXMAP_PUT(sc, rxmap); 1117 m = m_free(m); 1118 sc->rx_bufs--; 1119 } 1120 sc->rfa_headm = NULL; 1121 sc->rfa_tailm = NULL; 1122 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 1123 if (fxp_add_rfabuf(sc, NULL) != 0) { 1124 /* 1125 * This "can't happen" - we're at splnet() 1126 * and we just freed all the buffers we need 1127 * above. 1128 */ 1129 panic("fxp_stop: no buffers!"); 1130 } 1131 sc->rx_bufs++; 1132 } 1133 } 1134 } 1135 1136 /* 1137 * Watchdog/transmission transmit timeout handler. Called when a 1138 * transmission is started on the interface, but no interrupt is 1139 * received before the timeout. This usually indicates that the 1140 * card has wedged for some reason. 1141 */ 1142 void 1143 fxp_watchdog(struct ifnet *ifp) 1144 { 1145 struct fxp_softc *sc = ifp->if_softc; 1146 1147 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1148 ifp->if_oerrors++; 1149 1150 fxp_init(sc); 1151 } 1152 1153 /* 1154 * Submit a command to the i82557. 1155 */ 1156 void 1157 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd) 1158 { 1159 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd); 1160 } 1161 1162 void 1163 fxp_init(void *xsc) 1164 { 1165 struct fxp_softc *sc = xsc; 1166 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1167 struct fxp_cb_config *cbp; 1168 struct fxp_cb_ias *cb_ias; 1169 struct fxp_cb_tx *txp; 1170 bus_dmamap_t rxmap; 1171 int i, prm, save_bf, lrxen, allm, s, bufs; 1172 1173 s = splnet(); 1174 1175 /* 1176 * Cancel any pending I/O 1177 */ 1178 fxp_stop(sc, 0, 0); 1179 1180 /* 1181 * Initialize base of CBL and RFA memory. Loading with zero 1182 * sets it up for regular linear addressing. 1183 */ 1184 fxp_scb_wait(sc); 1185 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1186 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 1187 1188 fxp_scb_wait(sc); 1189 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1190 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 1191 1192 #ifndef SMALL_KERNEL 1193 fxp_load_ucode(sc); 1194 #endif 1195 /* Once through to set flags */ 1196 fxp_mc_setup(sc, 0); 1197 1198 /* 1199 * In order to support receiving 802.1Q VLAN frames, we have to 1200 * enable "save bad frames", since they are 4 bytes larger than 1201 * the normal Ethernet maximum frame length. On i82558 and later, 1202 * we have a better mechanism for this. 1203 */ 1204 save_bf = 0; 1205 lrxen = 0; 1206 1207 if (sc->sc_revision >= FXP_REV_82558_A4) 1208 lrxen = 1; 1209 else 1210 save_bf = 1; 1211 1212 /* 1213 * Initialize base of dump-stats buffer. 1214 */ 1215 fxp_scb_wait(sc); 1216 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1217 sc->tx_cb_map->dm_segs->ds_addr + 1218 offsetof(struct fxp_ctrl, stats)); 1219 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 1220 1221 cbp = &sc->sc_ctrl->u.cfg; 1222 /* 1223 * This bcopy is kind of disgusting, but there are a bunch of must be 1224 * zero and must be one bits in this structure and this is the easiest 1225 * way to initialize them all to proper values. 1226 */ 1227 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status, 1228 sizeof(fxp_cb_config_template)); 1229 1230 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1231 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1232 1233 #if 0 1234 cbp->cb_status = 0; 1235 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1236 cbp->link_addr = 0xffffffff; /* (no) next command */ 1237 cbp->byte_count = 22; /* (22) bytes to config */ 1238 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1239 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1240 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1241 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1242 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1243 cbp->dma_bce = 0; /* (disable) dma max counters */ 1244 cbp->late_scb = 0; /* (don't) defer SCB update */ 1245 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1246 cbp->ci_int = 1; /* interrupt on CU idle */ 1247 cbp->save_bf = save_bf ? 1 : prm; /* save bad frames */ 1248 cbp->disc_short_rx = !prm; /* discard short packets */ 1249 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1250 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1251 cbp->nsai = 1; /* (don't) disable source addr insert */ 1252 cbp->preamble_length = 2; /* (7 byte) preamble */ 1253 cbp->loopback = 0; /* (don't) loopback */ 1254 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1255 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1256 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1257 cbp->promiscuous = prm; /* promiscuous mode */ 1258 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1259 cbp->crscdt = 0; /* (CRS only) */ 1260 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1261 cbp->padding = 1; /* (do) pad short tx packets */ 1262 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1263 cbp->long_rx = lrxen; /* (enable) long packets */ 1264 cbp->force_fdx = 0; /* (don't) force full duplex */ 1265 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1266 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1267 cbp->mc_all = allm; 1268 #else 1269 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); 1270 1271 if (allm && !prm) 1272 cbp->mc_all |= 0x08; /* accept all multicasts */ 1273 else 1274 cbp->mc_all &= ~0x08; /* reject all multicasts */ 1275 1276 if (prm) { 1277 cbp->promiscuous |= 1; /* promiscuous mode */ 1278 cbp->ctrl2 &= ~0x01; /* save short packets */ 1279 cbp->stripping &= ~0x01; /* don't truncate rx packets */ 1280 } else { 1281 cbp->promiscuous &= ~1; /* no promiscuous mode */ 1282 cbp->ctrl2 |= 0x01; /* discard short packets */ 1283 cbp->stripping |= 0x01; /* truncate rx packets */ 1284 } 1285 1286 if (prm || save_bf) 1287 cbp->ctrl1 |= 0x80; /* save bad frames */ 1288 else 1289 cbp->ctrl1 &= ~0x80; /* discard bad frames */ 1290 1291 if (sc->sc_flags & FXPF_MWI_ENABLE) 1292 cbp->ctrl0 |= 0x01; /* enable PCI MWI command */ 1293 1294 if(!sc->phy_10Mbps_only) /* interface mode */ 1295 cbp->mediatype |= 0x01; 1296 else 1297 cbp->mediatype &= ~0x01; 1298 1299 if(lrxen) /* long packets */ 1300 cbp->stripping |= 0x08; 1301 else 1302 cbp->stripping &= ~0x08; 1303 1304 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */ 1305 cbp->ctrl1 |= 0x08; /* ci_int = 1 */ 1306 cbp->ctrl3 |= 0x08; /* nsai */ 1307 cbp->fifo_limit = 0x08; /* tx and rx fifo limit */ 1308 cbp->fdx_pin |= 0x80; /* Enable full duplex setting by pin */ 1309 #endif 1310 1311 /* 1312 * Start the config command/DMA. 1313 */ 1314 fxp_scb_wait(sc); 1315 FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1316 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1317 offsetof(struct fxp_ctrl, u.cfg)); 1318 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1319 /* ...and wait for it to complete. */ 1320 i = FXP_CMD_TMO; 1321 do { 1322 DELAY(1); 1323 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1324 } while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--); 1325 1326 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1327 if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) { 1328 printf("%s: config command timeout\n", sc->sc_dev.dv_xname); 1329 return; 1330 } 1331 1332 /* 1333 * Now initialize the station address. 1334 */ 1335 cb_ias = &sc->sc_ctrl->u.ias; 1336 cb_ias->cb_status = htole16(0); 1337 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 1338 cb_ias->link_addr = htole32(0xffffffff); 1339 bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr, 1340 sizeof(sc->sc_arpcom.ac_enaddr)); 1341 1342 /* 1343 * Start the IAS (Individual Address Setup) command/DMA. 1344 */ 1345 fxp_scb_wait(sc); 1346 FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1347 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1348 offsetof(struct fxp_ctrl, u.ias)); 1349 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1350 /* ...and wait for it to complete. */ 1351 i = FXP_CMD_TMO; 1352 do { 1353 DELAY(1); 1354 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1355 } while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1356 1357 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1358 if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) { 1359 printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname); 1360 return; 1361 } 1362 1363 /* Again, this time really upload the multicast addresses */ 1364 fxp_mc_setup(sc, 1); 1365 1366 /* 1367 * Initialize transmit control block (TxCB) list. 1368 */ 1369 bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1370 txp = sc->sc_ctrl->tx_cb; 1371 for (i = 0; i < FXP_NTXCB; i++) { 1372 txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 1373 txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr + 1374 offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK])); 1375 txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr + 1376 offsetof(struct fxp_ctrl, tx_cb[i].tbd[0])); 1377 } 1378 /* 1379 * Set the suspend flag on the first TxCB and start the control 1380 * unit. It will execute the NOP and then suspend. 1381 */ 1382 sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs; 1383 sc->sc_cbt_cnt = 1; 1384 sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP | 1385 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1386 bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0, 1387 sc->tx_cb_map->dm_mapsize, 1388 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1389 1390 fxp_scb_wait(sc); 1391 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1392 offsetof(struct fxp_ctrl, tx_cb[0])); 1393 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1394 1395 /* 1396 * Initialize receiver buffer area - RFA. 1397 */ 1398 if (ifp->if_flags & IFF_UP) 1399 bufs = FXP_NRFABUFS_MAX; 1400 else 1401 bufs = FXP_NRFABUFS_MIN; 1402 if (sc->rx_bufs > bufs) { 1403 while (sc->rfa_headm != NULL && sc->rx_bufs-- > bufs) { 1404 rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf); 1405 bus_dmamap_unload(sc->sc_dmat, rxmap); 1406 FXP_RXMAP_PUT(sc, rxmap); 1407 sc->rfa_headm = m_free(sc->rfa_headm); 1408 } 1409 } else if (sc->rx_bufs < bufs) { 1410 int err, tmp_rx_bufs = sc->rx_bufs; 1411 for (i = sc->rx_bufs; i < bufs; i++) { 1412 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1413 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 1414 printf("%s: unable to create rx dma map %d, " 1415 "error %d\n", sc->sc_dev.dv_xname, i, err); 1416 break; 1417 } 1418 sc->rx_bufs++; 1419 } 1420 for (i = tmp_rx_bufs; i < sc->rx_bufs; i++) 1421 if (fxp_add_rfabuf(sc, NULL) != 0) 1422 break; 1423 } 1424 fxp_scb_wait(sc); 1425 1426 /* 1427 * Set current media. 1428 */ 1429 mii_mediachg(&sc->sc_mii); 1430 1431 ifp->if_flags |= IFF_RUNNING; 1432 ifp->if_flags &= ~IFF_OACTIVE; 1433 1434 /* 1435 * Request a software generated interrupt that will be used to 1436 * (re)start the RU processing. If we direct the chip to start 1437 * receiving from the start of queue now, instead of letting the 1438 * interrupt handler first process all received packets, we run 1439 * the risk of having it overwrite mbuf clusters while they are 1440 * being processed or after they have been returned to the pool. 1441 */ 1442 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, 1443 CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) | 1444 FXP_SCB_INTRCNTL_REQUEST_SWI); 1445 splx(s); 1446 1447 /* 1448 * Start stats updater. 1449 */ 1450 timeout_add_sec(&sc->stats_update_to, 1); 1451 } 1452 1453 /* 1454 * Change media according to request. 1455 */ 1456 int 1457 fxp_mediachange(struct ifnet *ifp) 1458 { 1459 struct fxp_softc *sc = ifp->if_softc; 1460 struct mii_data *mii = &sc->sc_mii; 1461 1462 if (mii->mii_instance) { 1463 struct mii_softc *miisc; 1464 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1465 mii_phy_reset(miisc); 1466 } 1467 mii_mediachg(&sc->sc_mii); 1468 return (0); 1469 } 1470 1471 /* 1472 * Notify the world which media we're using. 1473 */ 1474 void 1475 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1476 { 1477 struct fxp_softc *sc = ifp->if_softc; 1478 1479 mii_pollstat(&sc->sc_mii); 1480 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1481 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1482 } 1483 1484 /* 1485 * Add a buffer to the end of the RFA buffer list. 1486 * Return 0 if successful, 1 for failure. A failure results in 1487 * adding the 'oldm' (if non-NULL) on to the end of the list - 1488 * tossing out its old contents and recycling it. 1489 * The RFA struct is stuck at the beginning of mbuf cluster and the 1490 * data pointer is fixed up to point just past it. 1491 */ 1492 int 1493 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm) 1494 { 1495 u_int32_t v; 1496 struct mbuf *m; 1497 u_int8_t *rfap; 1498 bus_dmamap_t rxmap = NULL; 1499 1500 MGETHDR(m, M_DONTWAIT, MT_DATA); 1501 if (m != NULL) { 1502 MCLGET(m, M_DONTWAIT); 1503 if ((m->m_flags & M_EXT) == 0) { 1504 m_freem(m); 1505 if (oldm == NULL) 1506 return 1; 1507 m = oldm; 1508 m->m_data = m->m_ext.ext_buf; 1509 } 1510 if (oldm == NULL) { 1511 rxmap = FXP_RXMAP_GET(sc); 1512 *((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap; 1513 bus_dmamap_load(sc->sc_dmat, rxmap, 1514 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1515 BUS_DMA_NOWAIT); 1516 } else if (oldm == m) 1517 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1518 else { 1519 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1520 bus_dmamap_unload(sc->sc_dmat, rxmap); 1521 bus_dmamap_load(sc->sc_dmat, rxmap, 1522 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1523 BUS_DMA_NOWAIT); 1524 *mtod(m, bus_dmamap_t *) = rxmap; 1525 } 1526 } else { 1527 if (oldm == NULL) 1528 return 1; 1529 m = oldm; 1530 m->m_data = m->m_ext.ext_buf; 1531 rxmap = *mtod(m, bus_dmamap_t *); 1532 } 1533 1534 /* 1535 * Move the data pointer up so that the incoming data packet 1536 * will be 32-bit aligned. 1537 */ 1538 m->m_data += RFA_ALIGNMENT_FUDGE; 1539 1540 /* 1541 * Get a pointer to the base of the mbuf cluster and move 1542 * data start past it. 1543 */ 1544 rfap = m->m_data; 1545 m->m_data += sizeof(struct fxp_rfa); 1546 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) = 1547 htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1548 1549 /* 1550 * Initialize the rest of the RFA. Note that since the RFA 1551 * is misaligned, we cannot store values directly. Instead, 1552 * we use an optimized, inline copy. 1553 */ 1554 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0; 1555 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) = 1556 htole16(FXP_RFA_CONTROL_EL); 1557 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0; 1558 1559 v = -1; 1560 fxp_lwcopy(&v, 1561 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1562 fxp_lwcopy(&v, 1563 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr))); 1564 1565 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES, 1566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1567 1568 /* 1569 * If there are other buffers already on the list, attach this 1570 * one to the end by fixing up the tail to point to this one. 1571 */ 1572 if (sc->rfa_headm != NULL) { 1573 sc->rfa_tailm->m_next = m; 1574 v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); 1575 rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 1576 fxp_lwcopy(&v, 1577 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1578 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &= 1579 htole16((u_int16_t)~FXP_RFA_CONTROL_EL); 1580 /* XXX we only need to sync the control struct */ 1581 bus_dmamap_sync(sc->sc_dmat, 1582 *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0, 1583 MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1584 } else 1585 sc->rfa_headm = m; 1586 1587 sc->rfa_tailm = m; 1588 1589 return (m == oldm); 1590 } 1591 1592 int 1593 fxp_mdi_read(struct device *self, int phy, int reg) 1594 { 1595 struct fxp_softc *sc = (struct fxp_softc *)self; 1596 int count = FXP_CMD_TMO; 1597 int value; 1598 1599 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1600 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1601 1602 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1603 && count--) 1604 DELAY(10); 1605 1606 if (count <= 0) 1607 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname); 1608 1609 return (value & 0xffff); 1610 } 1611 1612 void 1613 fxp_statchg(struct device *self) 1614 { 1615 /* Nothing to do. */ 1616 } 1617 1618 void 1619 fxp_mdi_write(struct device *self, int phy, int reg, int value) 1620 { 1621 struct fxp_softc *sc = (struct fxp_softc *)self; 1622 int count = FXP_CMD_TMO; 1623 1624 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1625 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1626 (value & 0xffff)); 1627 1628 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1629 count--) 1630 DELAY(10); 1631 1632 if (count <= 0) 1633 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname); 1634 } 1635 1636 int 1637 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1638 { 1639 struct fxp_softc *sc = ifp->if_softc; 1640 struct ifreq *ifr = (struct ifreq *)data; 1641 struct ifaddr *ifa = (struct ifaddr *)data; 1642 int s, error = 0; 1643 1644 s = splnet(); 1645 1646 switch (command) { 1647 case SIOCSIFADDR: 1648 ifp->if_flags |= IFF_UP; 1649 if (!(ifp->if_flags & IFF_RUNNING)) 1650 fxp_init(sc); 1651 #ifdef INET 1652 if (ifa->ifa_addr->sa_family == AF_INET) 1653 arp_ifinit(&sc->sc_arpcom, ifa); 1654 #endif 1655 break; 1656 1657 case SIOCSIFFLAGS: 1658 if (ifp->if_flags & IFF_UP) { 1659 if (ifp->if_flags & IFF_RUNNING) 1660 error = ENETRESET; 1661 else 1662 fxp_init(sc); 1663 } else { 1664 if (ifp->if_flags & IFF_RUNNING) 1665 fxp_stop(sc, 1, 0); 1666 } 1667 break; 1668 1669 case SIOCSIFMEDIA: 1670 case SIOCGIFMEDIA: 1671 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1672 break; 1673 1674 default: 1675 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1676 } 1677 1678 if (error == ENETRESET) { 1679 if (ifp->if_flags & IFF_RUNNING) 1680 fxp_init(sc); 1681 error = 0; 1682 } 1683 1684 splx(s); 1685 return (error); 1686 } 1687 1688 /* 1689 * Program the multicast filter. 1690 * 1691 * We have an artificial restriction that the multicast setup command 1692 * must be the first command in the chain, so we take steps to ensure 1693 * this. By requiring this, it allows us to keep up the performance of 1694 * the pre-initialized command ring (esp. link pointers) by not actually 1695 * inserting the mcsetup command in the ring - i.e. its link pointer 1696 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1697 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1698 * lead into the regular TxCB ring when it completes. 1699 * 1700 * This function must be called at splnet. 1701 */ 1702 void 1703 fxp_mc_setup(struct fxp_softc *sc, int doit) 1704 { 1705 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1706 struct arpcom *ac = &sc->sc_arpcom; 1707 struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs; 1708 struct ether_multistep step; 1709 struct ether_multi *enm; 1710 int i, nmcasts = 0; 1711 1712 ifp->if_flags &= ~IFF_ALLMULTI; 1713 1714 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1715 ac->ac_multicnt >= MAXMCADDR) { 1716 ifp->if_flags |= IFF_ALLMULTI; 1717 } else { 1718 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1719 while (enm != NULL) { 1720 bcopy(enm->enm_addrlo, 1721 (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 1722 1723 nmcasts++; 1724 1725 ETHER_NEXT_MULTI(step, enm); 1726 } 1727 } 1728 1729 if (doit == 0) 1730 return; 1731 1732 /* 1733 * Initialize multicast setup descriptor. 1734 */ 1735 mcsp->cb_status = htole16(0); 1736 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1737 mcsp->link_addr = htole32(-1); 1738 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 1739 1740 /* 1741 * Wait until command unit is not active. This should never 1742 * be the case when nothing is queued, but make sure anyway. 1743 */ 1744 for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1745 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1)); 1746 1747 if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1748 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) { 1749 printf("%s: timeout waiting for CU ready\n", 1750 sc->sc_dev.dv_xname); 1751 return; 1752 } 1753 1754 /* 1755 * Start the multicast setup command. 1756 */ 1757 fxp_scb_wait(sc); 1758 FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1759 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1760 offsetof(struct fxp_ctrl, u.mcs)); 1761 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1762 1763 i = FXP_CMD_TMO; 1764 do { 1765 DELAY(1); 1766 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1767 } while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1768 1769 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1770 if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) { 1771 printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname); 1772 return; 1773 } 1774 1775 } 1776 1777 #ifndef SMALL_KERNEL 1778 #include <dev/microcode/fxp/rcvbundl.h> 1779 struct ucode { 1780 u_int16_t revision; 1781 u_int16_t int_delay_offset; 1782 u_int16_t bundle_max_offset; 1783 u_int16_t min_size_mask_offset; 1784 const char *uname; 1785 } const ucode_table[] = { 1786 { FXP_REV_82558_A4, D101_CPUSAVER_DWORD, 1787 0, 0, 1788 "fxp-d101a" }, 1789 1790 { FXP_REV_82558_B0, D101_CPUSAVER_DWORD, 1791 0, 0, 1792 "fxp-d101b0" }, 1793 1794 { FXP_REV_82559_A0, D101M_CPUSAVER_DWORD, 1795 D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD, 1796 "fxp-d101ma" }, 1797 1798 { FXP_REV_82559S_A, D101S_CPUSAVER_DWORD, 1799 D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD, 1800 "fxp-d101s" }, 1801 1802 { FXP_REV_82550, D102_B_CPUSAVER_DWORD, 1803 D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD, 1804 "fxp-d102" }, 1805 1806 { FXP_REV_82550_C, D102_C_CPUSAVER_DWORD, 1807 D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD, 1808 "fxp-d102c" }, 1809 1810 { FXP_REV_82551_F, D102_E_CPUSAVER_DWORD, 1811 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1812 "fxp-d102e" }, 1813 1814 { FXP_REV_82551_10, D102_E_CPUSAVER_DWORD, 1815 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1816 "fxp-d102e" }, 1817 1818 { 0, 0, 1819 0, 0, 1820 NULL } 1821 }; 1822 1823 void 1824 fxp_load_ucode(struct fxp_softc *sc) 1825 { 1826 const struct ucode *uc; 1827 struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code; 1828 int i, error; 1829 u_int32_t *ucode_buf; 1830 size_t ucode_len; 1831 1832 if (sc->sc_flags & FXPF_UCODE) 1833 return; 1834 1835 for (uc = ucode_table; uc->revision != 0; uc++) 1836 if (sc->sc_revision == uc->revision) 1837 break; 1838 if (uc->revision == NULL) 1839 return; /* no ucode for this chip is found */ 1840 1841 error = loadfirmware(uc->uname, (u_char **)&ucode_buf, &ucode_len); 1842 if (error) { 1843 printf("%s: error %d, could not read firmware %s\n", 1844 sc->sc_dev.dv_xname, error, uc->uname); 1845 sc->sc_flags |= FXPF_UCODE; 1846 return; 1847 } 1848 1849 cbp->cb_status = 0; 1850 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL); 1851 cbp->link_addr = 0xffffffff; /* (no) next command */ 1852 for (i = 0; i < (ucode_len / sizeof(u_int32_t)); i++) 1853 cbp->ucode[i] = ucode_buf[i]; 1854 1855 if (uc->int_delay_offset) 1856 *((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) = 1857 htole16(sc->sc_int_delay + sc->sc_int_delay / 2); 1858 1859 if (uc->bundle_max_offset) 1860 *((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) = 1861 htole16(sc->sc_bundle_max); 1862 1863 if (uc->min_size_mask_offset) 1864 *((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) = 1865 htole16(sc->sc_min_size_mask); 1866 1867 FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1868 1869 /* 1870 * Download the ucode to the chip. 1871 */ 1872 fxp_scb_wait(sc); 1873 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr 1874 + offsetof(struct fxp_ctrl, u.code)); 1875 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1876 1877 /* ...and wait for it to complete. */ 1878 i = FXP_CMD_TMO; 1879 do { 1880 DELAY(2); 1881 FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1882 } while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i); 1883 if (i == 0) { 1884 printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname); 1885 free(ucode_buf, M_DEVBUF); 1886 return; 1887 } 1888 1889 #ifdef DEBUG 1890 printf("%s: microcode loaded, int_delay: %d usec", 1891 sc->sc_dev.dv_xname, sc->sc_int_delay); 1892 1893 if (uc->bundle_max_offset) 1894 printf(", bundle_max %d\n", sc->sc_bundle_max); 1895 else 1896 printf("\n"); 1897 #endif 1898 1899 free(ucode_buf, M_DEVBUF); 1900 sc->sc_flags |= FXPF_UCODE; 1901 } 1902 #endif /* SMALL_KERNEL */ 1903