1 /* $OpenBSD: fxp.c,v 1.130 2016/04/13 10:49:26 mpi Exp $ */ 2 /* $NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1995, David Greenman 6 * All rights reserved. 7 * 8 * Modifications to support NetBSD: 9 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp 34 */ 35 36 /* 37 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 38 */ 39 40 #include "bpfilter.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/syslog.h> 49 #include <sys/timeout.h> 50 51 #include <net/if.h> 52 #include <net/if_media.h> 53 54 #include <netinet/in.h> 55 56 #if NBPFILTER > 0 57 #include <net/bpf.h> 58 #endif 59 60 #include <sys/ioctl.h> 61 #include <sys/errno.h> 62 #include <sys/device.h> 63 64 #include <netinet/if_ether.h> 65 66 #include <machine/cpu.h> 67 #include <machine/bus.h> 68 #include <machine/intr.h> 69 70 #include <dev/mii/miivar.h> 71 72 #include <dev/ic/fxpreg.h> 73 #include <dev/ic/fxpvar.h> 74 75 /* 76 * NOTE! On the Alpha, we have an alignment constraint. The 77 * card DMAs the packet immediately following the RFA. However, 78 * the first thing in the packet is a 14-byte Ethernet header. 79 * This means that the packet is misaligned. To compensate, 80 * we actually offset the RFA 2 bytes into the cluster. This 81 * aligns the packet after the Ethernet header at a 32-bit 82 * boundary. HOWEVER! This means that the RFA is misaligned! 83 */ 84 #define RFA_ALIGNMENT_FUDGE (2 + sizeof(bus_dmamap_t *)) 85 86 /* 87 * Inline function to copy a 16-bit aligned 32-bit quantity. 88 */ 89 static __inline void fxp_lwcopy(volatile u_int32_t *, 90 volatile u_int32_t *); 91 92 static __inline void 93 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst) 94 { 95 volatile u_int16_t *a = (u_int16_t *)src; 96 volatile u_int16_t *b = (u_int16_t *)dst; 97 98 b[0] = a[0]; 99 b[1] = a[1]; 100 } 101 102 /* 103 * Template for default configuration parameters. 104 * See struct fxp_cb_config for the bit definitions. 105 * Note, cb_command is filled in later. 106 */ 107 static u_char fxp_cb_config_template[] = { 108 0x0, 0x0, /* cb_status */ 109 0x0, 0x0, /* cb_command */ 110 0xff, 0xff, 0xff, 0xff, /* link_addr */ 111 0x16, /* 0 Byte count. */ 112 0x08, /* 1 Fifo limit */ 113 0x00, /* 2 Adaptive ifs */ 114 0x00, /* 3 ctrl0 */ 115 0x00, /* 4 rx_dma_bytecount */ 116 0x80, /* 5 tx_dma_bytecount */ 117 0xb2, /* 6 ctrl 1*/ 118 0x03, /* 7 ctrl 2*/ 119 0x01, /* 8 mediatype */ 120 0x00, /* 9 void2 */ 121 0x26, /* 10 ctrl3 */ 122 0x00, /* 11 linear priority */ 123 0x60, /* 12 interfrm_spacing */ 124 0x00, /* 13 void31 */ 125 0xf2, /* 14 void32 */ 126 0x48, /* 15 promiscuous */ 127 0x00, /* 16 void41 */ 128 0x40, /* 17 void42 */ 129 0xf3, /* 18 stripping */ 130 0x00, /* 19 fdx_pin */ 131 0x3f, /* 20 multi_ia */ 132 0x05 /* 21 mc_all */ 133 }; 134 135 void fxp_eeprom_shiftin(struct fxp_softc *, int, int); 136 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t); 137 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int); 138 int fxp_mediachange(struct ifnet *); 139 void fxp_mediastatus(struct ifnet *, struct ifmediareq *); 140 void fxp_scb_wait(struct fxp_softc *); 141 void fxp_start(struct ifnet *); 142 int fxp_ioctl(struct ifnet *, u_long, caddr_t); 143 void fxp_load_ucode(struct fxp_softc *); 144 void fxp_watchdog(struct ifnet *); 145 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *); 146 int fxp_mdi_read(struct device *, int, int); 147 void fxp_mdi_write(struct device *, int, int, int); 148 void fxp_autosize_eeprom(struct fxp_softc *); 149 void fxp_statchg(struct device *); 150 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *, 151 int, int); 152 void fxp_stats_update(void *); 153 void fxp_mc_setup(struct fxp_softc *, int); 154 void fxp_scb_cmd(struct fxp_softc *, u_int16_t); 155 156 /* 157 * Set initial transmit threshold at 64 (512 bytes). This is 158 * increased by 64 (512 bytes) at a time, to maximum of 192 159 * (1536 bytes), if an underrun occurs. 160 */ 161 static int tx_threshold = 64; 162 163 /* 164 * Interrupts coalescing code params 165 */ 166 int fxp_int_delay = FXP_INT_DELAY; 167 int fxp_bundle_max = FXP_BUNDLE_MAX; 168 int fxp_min_size_mask = FXP_MIN_SIZE_MASK; 169 170 /* 171 * TxCB list index mask. This is used to do list wrap-around. 172 */ 173 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 174 175 /* 176 * Maximum number of seconds that the receiver can be idle before we 177 * assume it's dead and attempt to reset it by reprogramming the 178 * multicast filter. This is part of a work-around for a bug in the 179 * NIC. See fxp_stats_update(). 180 */ 181 #define FXP_MAX_RX_IDLE 15 182 183 /* 184 * Wait for the previous command to be accepted (but not necessarily 185 * completed). 186 */ 187 void 188 fxp_scb_wait(struct fxp_softc *sc) 189 { 190 int i = FXP_CMD_TMO; 191 192 while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i) 193 DELAY(2); 194 if (i == 0) 195 printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname); 196 } 197 198 void 199 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 200 { 201 u_int16_t reg; 202 int x; 203 204 /* 205 * Shift in data. 206 */ 207 for (x = 1 << (length - 1); x; x >>= 1) { 208 if (data & x) 209 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 210 else 211 reg = FXP_EEPROM_EECS; 212 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 213 DELAY(1); 214 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 215 DELAY(1); 216 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 217 DELAY(1); 218 } 219 } 220 221 void 222 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) 223 { 224 int i; 225 226 /* 227 * Erase/write enable. 228 */ 229 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 230 fxp_eeprom_shiftin(sc, 0x4, 3); 231 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 232 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 233 DELAY(1); 234 /* 235 * Shift in write opcode, address, data. 236 */ 237 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 238 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 239 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 240 fxp_eeprom_shiftin(sc, data, 16); 241 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 242 DELAY(1); 243 /* 244 * Wait for EEPROM to finish up. 245 */ 246 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 247 DELAY(1); 248 for (i = 0; i < 1000; i++) { 249 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 250 break; 251 DELAY(50); 252 } 253 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 254 DELAY(1); 255 /* 256 * Erase/write disable. 257 */ 258 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 259 fxp_eeprom_shiftin(sc, 0x4, 3); 260 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 261 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 262 DELAY(1); 263 } 264 265 void 266 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 267 { 268 int i; 269 270 for (i = 0; i < words; i++) 271 fxp_eeprom_putword(sc, offset + i, data[i]); 272 } 273 274 /************************************************************* 275 * Operating system-specific autoconfiguration glue 276 *************************************************************/ 277 278 struct cfdriver fxp_cd = { 279 NULL, "fxp", DV_IFNET 280 }; 281 282 int 283 fxp_activate(struct device *self, int act) 284 { 285 struct fxp_softc *sc = (struct fxp_softc *)self; 286 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 287 int rv = 0; 288 289 switch (act) { 290 case DVACT_SUSPEND: 291 if (ifp->if_flags & IFF_RUNNING) 292 fxp_stop(sc, 1, 0); 293 rv = config_activate_children(self, act); 294 break; 295 case DVACT_WAKEUP: 296 if (ifp->if_flags & IFF_UP) 297 fxp_wakeup(sc); 298 break; 299 default: 300 rv = config_activate_children(self, act); 301 break; 302 } 303 return (rv); 304 } 305 306 void 307 fxp_wakeup(struct fxp_softc *sc) 308 { 309 int s = splnet(); 310 311 /* force reload of the microcode */ 312 sc->sc_flags &= ~FXPF_UCODELOADED; 313 314 fxp_init(sc); 315 splx(s); 316 } 317 318 /************************************************************* 319 * End of operating system-specific autoconfiguration glue 320 *************************************************************/ 321 322 /* 323 * Do generic parts of attach. 324 */ 325 int 326 fxp_attach(struct fxp_softc *sc, const char *intrstr) 327 { 328 struct ifnet *ifp; 329 struct mbuf *m; 330 bus_dmamap_t rxmap; 331 u_int16_t data; 332 u_int8_t enaddr[6]; 333 int i, err; 334 335 /* 336 * Reset to a stable state. 337 */ 338 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 339 DELAY(10); 340 341 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl), 342 PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg, 343 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) 344 goto fail; 345 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg, 346 sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl, 347 BUS_DMA_NOWAIT)) { 348 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 349 goto fail; 350 } 351 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl), 352 1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT, 353 &sc->tx_cb_map)) { 354 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 355 sizeof(struct fxp_ctrl)); 356 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 357 goto fail; 358 } 359 if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl, 360 sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) { 361 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 362 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 363 sizeof(struct fxp_ctrl)); 364 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 365 goto fail; 366 } 367 368 for (i = 0; i < FXP_NTXCB; i++) { 369 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 370 FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) { 371 printf("%s: unable to create tx dma map %d, error %d\n", 372 sc->sc_dev.dv_xname, i, err); 373 goto fail; 374 } 375 sc->txs[i].tx_mbuf = NULL; 376 sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i; 377 sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]); 378 sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK]; 379 } 380 381 /* 382 * Pre-allocate some receive buffers. 383 */ 384 sc->sc_rxfree = 0; 385 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 386 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 387 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 388 printf("%s: unable to create rx dma map %d, error %d\n", 389 sc->sc_dev.dv_xname, i, err); 390 goto fail; 391 } 392 sc->rx_bufs++; 393 } 394 for (i = 0; i < FXP_NRFABUFS_MIN; i++) 395 if (fxp_add_rfabuf(sc, NULL) != 0) 396 goto fail; 397 398 /* 399 * Find out how large of an SEEPROM we have. 400 */ 401 fxp_autosize_eeprom(sc); 402 403 /* 404 * Get info about the primary PHY 405 */ 406 fxp_read_eeprom(sc, (u_int16_t *)&data, FXP_EEPROM_REG_PHY, 1); 407 sc->phy_primary_addr = data & 0xff; 408 sc->phy_primary_device = (data >> 8) & 0x3f; 409 sc->phy_10Mbps_only = data >> 15; 410 411 /* 412 * Only 82558 and newer cards can do this. 413 */ 414 if (sc->sc_revision >= FXP_REV_82558_A4) { 415 sc->sc_int_delay = fxp_int_delay; 416 sc->sc_bundle_max = fxp_bundle_max; 417 sc->sc_min_size_mask = fxp_min_size_mask; 418 } 419 /* 420 * Read MAC address. 421 */ 422 fxp_read_eeprom(sc, (u_int16_t *)enaddr, FXP_EEPROM_REG_MAC, 3); 423 424 ifp = &sc->sc_arpcom.ac_if; 425 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 426 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 427 ifp->if_softc = sc; 428 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 429 ifp->if_ioctl = fxp_ioctl; 430 ifp->if_start = fxp_start; 431 ifp->if_watchdog = fxp_watchdog; 432 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 433 434 ifp->if_capabilities = IFCAP_VLAN_MTU; 435 436 printf(": %s, address %s\n", intrstr, 437 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 438 439 if (sc->sc_flags & FXPF_DISABLE_STANDBY) { 440 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1); 441 if (data & FXP_EEPROM_REG_ID_STB) { 442 u_int16_t cksum; 443 444 printf("%s: Disabling dynamic standby mode in EEPROM", 445 sc->sc_dev.dv_xname); 446 data &= ~FXP_EEPROM_REG_ID_STB; 447 fxp_write_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1); 448 printf(", New ID 0x%x", data); 449 cksum = 0; 450 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 451 fxp_read_eeprom(sc, &data, i, 1); 452 cksum += data; 453 } 454 i = (1 << sc->eeprom_size) - 1; 455 cksum = 0xBABA - cksum; 456 fxp_read_eeprom(sc, &data, i, 1); 457 fxp_write_eeprom(sc, &cksum, i, 1); 458 printf(", cksum @ 0x%x: 0x%x -> 0x%x\n", 459 i, data, cksum); 460 } 461 } 462 463 /* Receiver lock-up workaround detection. */ 464 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1); 465 if ((data & (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100)) 466 != (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100)) 467 sc->sc_flags |= FXPF_RECV_WORKAROUND; 468 469 /* 470 * Initialize our media structures and probe the MII. 471 */ 472 sc->sc_mii.mii_ifp = ifp; 473 sc->sc_mii.mii_readreg = fxp_mdi_read; 474 sc->sc_mii.mii_writereg = fxp_mdi_write; 475 sc->sc_mii.mii_statchg = fxp_statchg; 476 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange, 477 fxp_mediastatus); 478 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 479 MII_OFFSET_ANY, MIIF_NOISOLATE); 480 /* If no phy found, just use auto mode */ 481 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 482 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 483 0, NULL); 484 printf("%s: no phy found, using manual mode\n", 485 sc->sc_dev.dv_xname); 486 } 487 488 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0)) 489 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 490 else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) 491 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 492 else 493 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 494 495 /* 496 * Attach the interface. 497 */ 498 if_attach(ifp); 499 ether_ifattach(ifp); 500 501 /* 502 * Initialize timeout for statistics update. 503 */ 504 timeout_set(&sc->stats_update_to, fxp_stats_update, sc); 505 506 return (0); 507 508 fail: 509 printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname); 510 if (sc->tx_cb_map != NULL) { 511 bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map); 512 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 513 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 514 sizeof(struct fxp_cb_tx) * FXP_NTXCB); 515 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 516 } 517 m = sc->rfa_headm; 518 while (m != NULL) { 519 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 520 bus_dmamap_unload(sc->sc_dmat, rxmap); 521 FXP_RXMAP_PUT(sc, rxmap); 522 m = m_free(m); 523 } 524 return (ENOMEM); 525 } 526 527 /* 528 * From NetBSD: 529 * 530 * Figure out EEPROM size. 531 * 532 * 559's can have either 64-word or 256-word EEPROMs, the 558 533 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 534 * talks about the existence of 16 to 256 word EEPROMs. 535 * 536 * The only known sizes are 64 and 256, where the 256 version is used 537 * by CardBus cards to store CIS information. 538 * 539 * The address is shifted in msb-to-lsb, and after the last 540 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 541 * after which follows the actual data. We try to detect this zero, by 542 * probing the data-out bit in the EEPROM control register just after 543 * having shifted in a bit. If the bit is zero, we assume we've 544 * shifted enough address bits. The data-out should be tri-state, 545 * before this, which should translate to a logical one. 546 * 547 * Other ways to do this would be to try to read a register with known 548 * contents with a varying number of address bits, but no such 549 * register seem to be available. The high bits of register 10 are 01 550 * on the 558 and 559, but apparently not on the 557. 551 * 552 * The Linux driver computes a checksum on the EEPROM data, but the 553 * value of this checksum is not very well documented. 554 */ 555 void 556 fxp_autosize_eeprom(struct fxp_softc *sc) 557 { 558 u_int16_t reg; 559 int x; 560 561 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 562 /* 563 * Shift in read opcode. 564 */ 565 for (x = 3; x > 0; x--) { 566 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 567 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 568 } else { 569 reg = FXP_EEPROM_EECS; 570 } 571 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 572 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 573 reg | FXP_EEPROM_EESK); 574 DELAY(4); 575 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 576 DELAY(4); 577 } 578 /* 579 * Shift in address. 580 * Wait for the dummy zero following a correct address shift. 581 */ 582 for (x = 1; x <= 8; x++) { 583 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 584 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 585 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 586 DELAY(4); 587 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 588 break; 589 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 590 DELAY(4); 591 } 592 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 593 DELAY(4); 594 sc->eeprom_size = x; 595 } 596 597 /* 598 * Read from the serial EEPROM. Basically, you manually shift in 599 * the read opcode (one bit at a time) and then shift in the address, 600 * and then you shift out the data (all of this one bit at a time). 601 * The word size is 16 bits, so you have to provide the address for 602 * every 16 bits of data. 603 */ 604 void 605 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, 606 int words) 607 { 608 u_int16_t reg; 609 int i, x; 610 611 for (i = 0; i < words; i++) { 612 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 613 /* 614 * Shift in read opcode. 615 */ 616 for (x = 3; x > 0; x--) { 617 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 618 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 619 } else { 620 reg = FXP_EEPROM_EECS; 621 } 622 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 623 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 624 reg | FXP_EEPROM_EESK); 625 DELAY(4); 626 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 627 DELAY(4); 628 } 629 /* 630 * Shift in address. 631 */ 632 for (x = sc->eeprom_size; x > 0; x--) { 633 if ((i + offset) & (1 << (x - 1))) { 634 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 635 } else { 636 reg = FXP_EEPROM_EECS; 637 } 638 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 639 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 640 reg | FXP_EEPROM_EESK); 641 DELAY(4); 642 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 643 DELAY(4); 644 } 645 reg = FXP_EEPROM_EECS; 646 data[i] = 0; 647 /* 648 * Shift out data. 649 */ 650 for (x = 16; x > 0; x--) { 651 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 652 reg | FXP_EEPROM_EESK); 653 DELAY(4); 654 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 655 FXP_EEPROM_EEDO) 656 data[i] |= (1 << (x - 1)); 657 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 658 DELAY(4); 659 } 660 data[i] = letoh16(data[i]); 661 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 662 DELAY(4); 663 } 664 } 665 666 /* 667 * Start packet transmission on the interface. 668 */ 669 void 670 fxp_start(struct ifnet *ifp) 671 { 672 struct fxp_softc *sc = ifp->if_softc; 673 struct fxp_txsw *txs = sc->sc_cbt_prod; 674 struct fxp_cb_tx *txc; 675 struct mbuf *m0; 676 int cnt = sc->sc_cbt_cnt, seg, error; 677 678 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 679 return; 680 681 while (1) { 682 if (cnt >= (FXP_NTXCB - 2)) { 683 ifq_set_oactive(&ifp->if_snd); 684 break; 685 } 686 687 txs = txs->tx_next; 688 689 m0 = ifq_dequeue(&ifp->if_snd); 690 if (m0 == NULL) 691 break; 692 693 error = bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 694 m0, BUS_DMA_NOWAIT); 695 switch (error) { 696 case 0: 697 break; 698 case EFBIG: 699 if (m_defrag(m0, M_DONTWAIT) == 0 && 700 bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 701 m0, BUS_DMA_NOWAIT) == 0) 702 break; 703 /* FALLTHROUGH */ 704 default: 705 ifp->if_oerrors++; 706 m_freem(m0); 707 /* try next packet */ 708 continue; 709 } 710 711 txs->tx_mbuf = m0; 712 713 #if NBPFILTER > 0 714 if (ifp->if_bpf) 715 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 716 #endif 717 718 FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE); 719 720 txc = txs->tx_cb; 721 txc->tbd_number = txs->tx_map->dm_nsegs; 722 txc->cb_status = 0; 723 txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF); 724 txc->tx_threshold = tx_threshold; 725 for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) { 726 txc->tbd[seg].tb_addr = 727 htole32(txs->tx_map->dm_segs[seg].ds_addr); 728 txc->tbd[seg].tb_size = 729 htole32(txs->tx_map->dm_segs[seg].ds_len); 730 } 731 FXP_TXCB_SYNC(sc, txs, 732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 733 734 ++cnt; 735 sc->sc_cbt_prod = txs; 736 } 737 738 if (cnt != sc->sc_cbt_cnt) { 739 /* We enqueued at least one. */ 740 ifp->if_timer = 5; 741 742 txs = sc->sc_cbt_prod; 743 txs = txs->tx_next; 744 sc->sc_cbt_prod = txs; 745 txs->tx_cb->cb_command = 746 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 747 FXP_TXCB_SYNC(sc, txs, 748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 749 750 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 751 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 752 sc->sc_cbt_prev->tx_cb->cb_command &= 753 htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I)); 754 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 755 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 756 757 sc->sc_cbt_prev = txs; 758 759 fxp_scb_wait(sc); 760 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 761 762 sc->sc_cbt_cnt = cnt + 1; 763 } 764 } 765 766 /* 767 * Process interface interrupts. 768 */ 769 int 770 fxp_intr(void *arg) 771 { 772 struct fxp_softc *sc = arg; 773 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 774 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 775 u_int16_t statack; 776 bus_dmamap_t rxmap; 777 int claimed = 0; 778 int rnr = 0; 779 780 /* 781 * If the interface isn't running, don't try to 782 * service the interrupt.. just ack it and bail. 783 */ 784 if ((ifp->if_flags & IFF_RUNNING) == 0) { 785 statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS); 786 if (statack) { 787 claimed = 1; 788 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 789 statack & FXP_SCB_STATACK_MASK); 790 } 791 return claimed; 792 } 793 794 while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) & 795 FXP_SCB_STATACK_MASK) { 796 claimed = 1; 797 rnr = (statack & (FXP_SCB_STATACK_RNR | 798 FXP_SCB_STATACK_SWI)) ? 1 : 0; 799 /* 800 * First ACK all the interrupts in this pass. 801 */ 802 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 803 statack & FXP_SCB_STATACK_MASK); 804 805 /* 806 * Free any finished transmit mbuf chains. 807 */ 808 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { 809 int txcnt = sc->sc_cbt_cnt; 810 struct fxp_txsw *txs = sc->sc_cbt_cons; 811 812 FXP_TXCB_SYNC(sc, txs, 813 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 814 815 while ((txcnt > 0) && 816 ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) || 817 (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) { 818 if (txs->tx_mbuf != NULL) { 819 FXP_MBUF_SYNC(sc, txs->tx_map, 820 BUS_DMASYNC_POSTWRITE); 821 bus_dmamap_unload(sc->sc_dmat, 822 txs->tx_map); 823 m_freem(txs->tx_mbuf); 824 txs->tx_mbuf = NULL; 825 } 826 --txcnt; 827 txs = txs->tx_next; 828 FXP_TXCB_SYNC(sc, txs, 829 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 830 } 831 sc->sc_cbt_cnt = txcnt; 832 /* Did we transmit any packets? */ 833 if (sc->sc_cbt_cons != txs) 834 ifq_clr_oactive(&ifp->if_snd); 835 ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0; 836 sc->sc_cbt_cons = txs; 837 838 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 839 /* 840 * Try to start more packets transmitting. 841 */ 842 fxp_start(ifp); 843 } 844 } 845 /* 846 * Process receiver interrupts. If a Receive Unit 847 * not ready (RNR) condition exists, get whatever 848 * packets we can and re-start the receiver. 849 */ 850 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | 851 FXP_SCB_STATACK_SWI)) { 852 struct mbuf *m; 853 u_int8_t *rfap; 854 rcvloop: 855 m = sc->rfa_headm; 856 rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 857 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 858 bus_dmamap_sync(sc->sc_dmat, rxmap, 859 0, MCLBYTES, BUS_DMASYNC_POSTREAD | 860 BUS_DMASYNC_POSTWRITE); 861 862 if (*(u_int16_t *)(rfap + 863 offsetof(struct fxp_rfa, rfa_status)) & 864 htole16(FXP_RFA_STATUS_C)) { 865 if (*(u_int16_t *)(rfap + 866 offsetof(struct fxp_rfa, rfa_status)) & 867 htole16(FXP_RFA_STATUS_RNR)) 868 rnr = 1; 869 870 /* 871 * Remove first packet from the chain. 872 */ 873 sc->rfa_headm = m->m_next; 874 m->m_next = NULL; 875 876 /* 877 * Add a new buffer to the receive chain. 878 * If this fails, the old buffer is recycled 879 * instead. 880 */ 881 if (fxp_add_rfabuf(sc, m) == 0) { 882 u_int16_t total_len; 883 884 total_len = htole16(*(u_int16_t *)(rfap + 885 offsetof(struct fxp_rfa, 886 actual_size))) & 887 (MCLBYTES - 1); 888 if (total_len < 889 sizeof(struct ether_header)) { 890 m_freem(m); 891 goto rcvloop; 892 } 893 if (*(u_int16_t *)(rfap + 894 offsetof(struct fxp_rfa, 895 rfa_status)) & 896 htole16(FXP_RFA_STATUS_CRC)) { 897 m_freem(m); 898 goto rcvloop; 899 } 900 901 m->m_pkthdr.len = m->m_len = total_len; 902 ml_enqueue(&ml, m); 903 } 904 goto rcvloop; 905 } 906 } 907 if (rnr) { 908 rxmap = *((bus_dmamap_t *) 909 sc->rfa_headm->m_ext.ext_buf); 910 fxp_scb_wait(sc); 911 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 912 rxmap->dm_segs[0].ds_addr + 913 RFA_ALIGNMENT_FUDGE); 914 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 915 916 } 917 } 918 919 if_input(ifp, &ml); 920 921 return (claimed); 922 } 923 924 /* 925 * Update packet in/out/collision statistics. The i82557 doesn't 926 * allow you to access these counters without doing a fairly 927 * expensive DMA to get _all_ of the statistics it maintains, so 928 * we do this operation here only once per second. The statistics 929 * counters in the kernel are updated from the previous dump-stats 930 * DMA and then a new dump-stats DMA is started. The on-chip 931 * counters are zeroed when the DMA completes. If we can't start 932 * the DMA immediately, we don't wait - we just prepare to read 933 * them again next time. 934 */ 935 void 936 fxp_stats_update(void *arg) 937 { 938 struct fxp_softc *sc = arg; 939 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 940 struct fxp_stats *sp = &sc->sc_ctrl->stats; 941 int s; 942 943 FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 944 ifp->if_opackets += letoh32(sp->tx_good); 945 ifp->if_collisions += letoh32(sp->tx_total_collisions); 946 if (sp->rx_good) { 947 sc->rx_idle_secs = 0; 948 } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) 949 sc->rx_idle_secs++; 950 ifp->if_ierrors += 951 letoh32(sp->rx_crc_errors) + 952 letoh32(sp->rx_alignment_errors) + 953 letoh32(sp->rx_rnr_errors) + 954 letoh32(sp->rx_overrun_errors); 955 /* 956 * If any transmit underruns occurred, bump up the transmit 957 * threshold by another 512 bytes (64 * 8). 958 */ 959 if (sp->tx_underruns) { 960 ifp->if_oerrors += letoh32(sp->tx_underruns); 961 if (tx_threshold < 192) 962 tx_threshold += 64; 963 } 964 s = splnet(); 965 /* 966 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, 967 * then assume the receiver has locked up and attempt to clear 968 * the condition by reprogramming the multicast filter. This is 969 * a work-around for a bug in the 82557 where the receiver locks 970 * up if it gets certain types of garbage in the synchronization 971 * bits prior to the packet header. This bug is supposed to only 972 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 973 * mode as well (perhaps due to a 10/100 speed transition). 974 */ 975 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 976 sc->rx_idle_secs = 0; 977 fxp_init(sc); 978 splx(s); 979 return; 980 } 981 /* 982 * If there is no pending command, start another stats 983 * dump. Otherwise punt for now. 984 */ 985 FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 986 if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) { 987 /* 988 * Start another stats dump. 989 */ 990 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 991 } else { 992 /* 993 * A previous command is still waiting to be accepted. 994 * Just zero our copy of the stats and wait for the 995 * next timer event to update them. 996 */ 997 sp->tx_good = 0; 998 sp->tx_underruns = 0; 999 sp->tx_total_collisions = 0; 1000 1001 sp->rx_good = 0; 1002 sp->rx_crc_errors = 0; 1003 sp->rx_alignment_errors = 0; 1004 sp->rx_rnr_errors = 0; 1005 sp->rx_overrun_errors = 0; 1006 } 1007 1008 /* Tick the MII clock. */ 1009 mii_tick(&sc->sc_mii); 1010 1011 splx(s); 1012 /* 1013 * Schedule another timeout one second from now. 1014 */ 1015 timeout_add_sec(&sc->stats_update_to, 1); 1016 } 1017 1018 void 1019 fxp_detach(struct fxp_softc *sc) 1020 { 1021 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1022 1023 /* Get rid of our timeouts and mbufs */ 1024 fxp_stop(sc, 1, 1); 1025 1026 /* Detach any PHYs we might have. */ 1027 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1028 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1029 1030 /* Delete any remaining media. */ 1031 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 1032 1033 ether_ifdetach(ifp); 1034 if_detach(ifp); 1035 1036 #ifndef SMALL_KERNEL 1037 if (sc->sc_ucodebuf) 1038 free(sc->sc_ucodebuf, M_DEVBUF, sc->sc_ucodelen); 1039 #endif 1040 } 1041 1042 /* 1043 * Stop the interface. Cancels the statistics updater and resets 1044 * the interface. 1045 */ 1046 void 1047 fxp_stop(struct fxp_softc *sc, int drain, int softonly) 1048 { 1049 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1050 int i; 1051 1052 /* 1053 * Cancel stats updater. 1054 */ 1055 timeout_del(&sc->stats_update_to); 1056 1057 /* 1058 * Turn down interface (done early to avoid bad interactions 1059 * between panics, and the watchdog timer) 1060 */ 1061 ifp->if_timer = 0; 1062 ifp->if_flags &= ~IFF_RUNNING; 1063 ifq_clr_oactive(&ifp->if_snd); 1064 1065 if (!softonly) 1066 mii_down(&sc->sc_mii); 1067 1068 /* 1069 * Issue software reset. 1070 */ 1071 if (!softonly) { 1072 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1073 DELAY(10); 1074 } 1075 1076 /* 1077 * Release any xmit buffers. 1078 */ 1079 for (i = 0; i < FXP_NTXCB; i++) { 1080 if (sc->txs[i].tx_mbuf != NULL) { 1081 bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map); 1082 m_freem(sc->txs[i].tx_mbuf); 1083 sc->txs[i].tx_mbuf = NULL; 1084 } 1085 } 1086 sc->sc_cbt_cnt = 0; 1087 1088 if (drain) { 1089 bus_dmamap_t rxmap; 1090 struct mbuf *m; 1091 1092 /* 1093 * Free all the receive buffers then reallocate/reinitialize 1094 */ 1095 m = sc->rfa_headm; 1096 while (m != NULL) { 1097 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 1098 bus_dmamap_unload(sc->sc_dmat, rxmap); 1099 FXP_RXMAP_PUT(sc, rxmap); 1100 m = m_free(m); 1101 sc->rx_bufs--; 1102 } 1103 sc->rfa_headm = NULL; 1104 sc->rfa_tailm = NULL; 1105 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 1106 if (fxp_add_rfabuf(sc, NULL) != 0) { 1107 /* 1108 * This "can't happen" - we're at splnet() 1109 * and we just freed all the buffers we need 1110 * above. 1111 */ 1112 panic("fxp_stop: no buffers!"); 1113 } 1114 sc->rx_bufs++; 1115 } 1116 } 1117 } 1118 1119 /* 1120 * Watchdog/transmission transmit timeout handler. Called when a 1121 * transmission is started on the interface, but no interrupt is 1122 * received before the timeout. This usually indicates that the 1123 * card has wedged for some reason. 1124 */ 1125 void 1126 fxp_watchdog(struct ifnet *ifp) 1127 { 1128 struct fxp_softc *sc = ifp->if_softc; 1129 1130 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1131 ifp->if_oerrors++; 1132 1133 fxp_init(sc); 1134 } 1135 1136 /* 1137 * Submit a command to the i82557. 1138 */ 1139 void 1140 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd) 1141 { 1142 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd); 1143 } 1144 1145 void 1146 fxp_init(void *xsc) 1147 { 1148 struct fxp_softc *sc = xsc; 1149 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1150 struct fxp_cb_config *cbp; 1151 struct fxp_cb_ias *cb_ias; 1152 struct fxp_cb_tx *txp; 1153 bus_dmamap_t rxmap; 1154 int i, prm, save_bf, lrxen, allm, bufs; 1155 1156 splassert(IPL_NET); 1157 1158 /* 1159 * Cancel any pending I/O 1160 */ 1161 fxp_stop(sc, 0, 0); 1162 1163 /* 1164 * Initialize base of CBL and RFA memory. Loading with zero 1165 * sets it up for regular linear addressing. 1166 */ 1167 fxp_scb_wait(sc); 1168 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1169 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 1170 1171 fxp_scb_wait(sc); 1172 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1173 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 1174 1175 #ifndef SMALL_KERNEL 1176 fxp_load_ucode(sc); 1177 #endif 1178 /* Once through to set flags */ 1179 fxp_mc_setup(sc, 0); 1180 1181 /* 1182 * In order to support receiving 802.1Q VLAN frames, we have to 1183 * enable "save bad frames", since they are 4 bytes larger than 1184 * the normal Ethernet maximum frame length. On i82558 and later, 1185 * we have a better mechanism for this. 1186 */ 1187 save_bf = 0; 1188 lrxen = 0; 1189 1190 if (sc->sc_revision >= FXP_REV_82558_A4) 1191 lrxen = 1; 1192 else 1193 save_bf = 1; 1194 1195 /* 1196 * Initialize base of dump-stats buffer. 1197 */ 1198 fxp_scb_wait(sc); 1199 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1200 sc->tx_cb_map->dm_segs->ds_addr + 1201 offsetof(struct fxp_ctrl, stats)); 1202 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 1203 1204 cbp = &sc->sc_ctrl->u.cfg; 1205 /* 1206 * This bcopy is kind of disgusting, but there are a bunch of must be 1207 * zero and must be one bits in this structure and this is the easiest 1208 * way to initialize them all to proper values. 1209 */ 1210 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status, 1211 sizeof(fxp_cb_config_template)); 1212 1213 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1214 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1215 1216 #if 0 1217 cbp->cb_status = 0; 1218 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1219 cbp->link_addr = 0xffffffff; /* (no) next command */ 1220 cbp->byte_count = 22; /* (22) bytes to config */ 1221 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1222 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1223 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1224 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1225 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1226 cbp->dma_bce = 0; /* (disable) dma max counters */ 1227 cbp->late_scb = 0; /* (don't) defer SCB update */ 1228 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1229 cbp->ci_int = 1; /* interrupt on CU idle */ 1230 cbp->save_bf = save_bf ? 1 : prm; /* save bad frames */ 1231 cbp->disc_short_rx = !prm; /* discard short packets */ 1232 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1233 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1234 cbp->nsai = 1; /* (don't) disable source addr insert */ 1235 cbp->preamble_length = 2; /* (7 byte) preamble */ 1236 cbp->loopback = 0; /* (don't) loopback */ 1237 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1238 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1239 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1240 cbp->promiscuous = prm; /* promiscuous mode */ 1241 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1242 cbp->crscdt = 0; /* (CRS only) */ 1243 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1244 cbp->padding = 1; /* (do) pad short tx packets */ 1245 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1246 cbp->long_rx = lrxen; /* (enable) long packets */ 1247 cbp->force_fdx = 0; /* (don't) force full duplex */ 1248 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1249 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1250 cbp->mc_all = allm; 1251 #else 1252 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); 1253 1254 if (allm && !prm) 1255 cbp->mc_all |= 0x08; /* accept all multicasts */ 1256 else 1257 cbp->mc_all &= ~0x08; /* reject all multicasts */ 1258 1259 if (prm) { 1260 cbp->promiscuous |= 1; /* promiscuous mode */ 1261 cbp->ctrl2 &= ~0x01; /* save short packets */ 1262 cbp->stripping &= ~0x01; /* don't truncate rx packets */ 1263 } else { 1264 cbp->promiscuous &= ~1; /* no promiscuous mode */ 1265 cbp->ctrl2 |= 0x01; /* discard short packets */ 1266 cbp->stripping |= 0x01; /* truncate rx packets */ 1267 } 1268 1269 if (prm || save_bf) 1270 cbp->ctrl1 |= 0x80; /* save bad frames */ 1271 else 1272 cbp->ctrl1 &= ~0x80; /* discard bad frames */ 1273 1274 if (sc->sc_flags & FXPF_MWI_ENABLE) 1275 cbp->ctrl0 |= 0x01; /* enable PCI MWI command */ 1276 1277 if(!sc->phy_10Mbps_only) /* interface mode */ 1278 cbp->mediatype |= 0x01; 1279 else 1280 cbp->mediatype &= ~0x01; 1281 1282 if(lrxen) /* long packets */ 1283 cbp->stripping |= 0x08; 1284 else 1285 cbp->stripping &= ~0x08; 1286 1287 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */ 1288 cbp->ctrl1 |= 0x08; /* ci_int = 1 */ 1289 cbp->ctrl3 |= 0x08; /* nsai */ 1290 cbp->fifo_limit = 0x08; /* tx and rx fifo limit */ 1291 cbp->fdx_pin |= 0x80; /* Enable full duplex setting by pin */ 1292 #endif 1293 1294 /* 1295 * Start the config command/DMA. 1296 */ 1297 fxp_scb_wait(sc); 1298 FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1299 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1300 offsetof(struct fxp_ctrl, u.cfg)); 1301 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1302 /* ...and wait for it to complete. */ 1303 i = FXP_CMD_TMO; 1304 do { 1305 DELAY(1); 1306 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1307 } while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--); 1308 1309 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1310 if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) { 1311 printf("%s: config command timeout\n", sc->sc_dev.dv_xname); 1312 return; 1313 } 1314 1315 /* 1316 * Now initialize the station address. 1317 */ 1318 cb_ias = &sc->sc_ctrl->u.ias; 1319 cb_ias->cb_status = htole16(0); 1320 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 1321 cb_ias->link_addr = htole32(0xffffffff); 1322 bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr, 1323 sizeof(sc->sc_arpcom.ac_enaddr)); 1324 1325 /* 1326 * Start the IAS (Individual Address Setup) command/DMA. 1327 */ 1328 fxp_scb_wait(sc); 1329 FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1330 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1331 offsetof(struct fxp_ctrl, u.ias)); 1332 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1333 /* ...and wait for it to complete. */ 1334 i = FXP_CMD_TMO; 1335 do { 1336 DELAY(1); 1337 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1338 } while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1339 1340 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1341 if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) { 1342 printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname); 1343 return; 1344 } 1345 1346 /* Again, this time really upload the multicast addresses */ 1347 fxp_mc_setup(sc, 1); 1348 1349 /* 1350 * Initialize transmit control block (TxCB) list. 1351 */ 1352 bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1353 txp = sc->sc_ctrl->tx_cb; 1354 for (i = 0; i < FXP_NTXCB; i++) { 1355 txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 1356 txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr + 1357 offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK])); 1358 txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr + 1359 offsetof(struct fxp_ctrl, tx_cb[i].tbd[0])); 1360 } 1361 /* 1362 * Set the suspend flag on the first TxCB and start the control 1363 * unit. It will execute the NOP and then suspend. 1364 */ 1365 sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs; 1366 sc->sc_cbt_cnt = 1; 1367 sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP | 1368 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1369 bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0, 1370 sc->tx_cb_map->dm_mapsize, 1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1372 1373 fxp_scb_wait(sc); 1374 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1375 offsetof(struct fxp_ctrl, tx_cb[0])); 1376 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1377 1378 /* 1379 * Initialize receiver buffer area - RFA. 1380 */ 1381 if (ifp->if_flags & IFF_UP) 1382 bufs = FXP_NRFABUFS_MAX; 1383 else 1384 bufs = FXP_NRFABUFS_MIN; 1385 if (sc->rx_bufs > bufs) { 1386 while (sc->rfa_headm != NULL && sc->rx_bufs-- > bufs) { 1387 rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf); 1388 bus_dmamap_unload(sc->sc_dmat, rxmap); 1389 FXP_RXMAP_PUT(sc, rxmap); 1390 sc->rfa_headm = m_free(sc->rfa_headm); 1391 } 1392 } else if (sc->rx_bufs < bufs) { 1393 int err, tmp_rx_bufs = sc->rx_bufs; 1394 for (i = sc->rx_bufs; i < bufs; i++) { 1395 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1396 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 1397 printf("%s: unable to create rx dma map %d, " 1398 "error %d\n", sc->sc_dev.dv_xname, i, err); 1399 break; 1400 } 1401 sc->rx_bufs++; 1402 } 1403 for (i = tmp_rx_bufs; i < sc->rx_bufs; i++) 1404 if (fxp_add_rfabuf(sc, NULL) != 0) 1405 break; 1406 } 1407 fxp_scb_wait(sc); 1408 1409 /* 1410 * Set current media. 1411 */ 1412 mii_mediachg(&sc->sc_mii); 1413 1414 ifp->if_flags |= IFF_RUNNING; 1415 ifq_clr_oactive(&ifp->if_snd); 1416 1417 /* 1418 * Request a software generated interrupt that will be used to 1419 * (re)start the RU processing. If we direct the chip to start 1420 * receiving from the start of queue now, instead of letting the 1421 * interrupt handler first process all received packets, we run 1422 * the risk of having it overwrite mbuf clusters while they are 1423 * being processed or after they have been returned to the pool. 1424 */ 1425 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, 1426 CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) | 1427 FXP_SCB_INTRCNTL_REQUEST_SWI); 1428 1429 /* 1430 * Start stats updater. 1431 */ 1432 timeout_add_sec(&sc->stats_update_to, 1); 1433 } 1434 1435 /* 1436 * Change media according to request. 1437 */ 1438 int 1439 fxp_mediachange(struct ifnet *ifp) 1440 { 1441 struct fxp_softc *sc = ifp->if_softc; 1442 struct mii_data *mii = &sc->sc_mii; 1443 1444 if (mii->mii_instance) { 1445 struct mii_softc *miisc; 1446 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1447 mii_phy_reset(miisc); 1448 } 1449 mii_mediachg(&sc->sc_mii); 1450 return (0); 1451 } 1452 1453 /* 1454 * Notify the world which media we're using. 1455 */ 1456 void 1457 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1458 { 1459 struct fxp_softc *sc = ifp->if_softc; 1460 1461 mii_pollstat(&sc->sc_mii); 1462 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1463 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1464 } 1465 1466 /* 1467 * Add a buffer to the end of the RFA buffer list. 1468 * Return 0 if successful, 1 for failure. A failure results in 1469 * adding the 'oldm' (if non-NULL) on to the end of the list - 1470 * tossing out its old contents and recycling it. 1471 * The RFA struct is stuck at the beginning of mbuf cluster and the 1472 * data pointer is fixed up to point just past it. 1473 */ 1474 int 1475 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm) 1476 { 1477 u_int32_t v; 1478 struct mbuf *m; 1479 u_int8_t *rfap; 1480 bus_dmamap_t rxmap = NULL; 1481 1482 MGETHDR(m, M_DONTWAIT, MT_DATA); 1483 if (m != NULL) { 1484 MCLGET(m, M_DONTWAIT); 1485 if ((m->m_flags & M_EXT) == 0) { 1486 m_freem(m); 1487 if (oldm == NULL) 1488 return 1; 1489 m = oldm; 1490 m->m_data = m->m_ext.ext_buf; 1491 } 1492 if (oldm == NULL) { 1493 rxmap = FXP_RXMAP_GET(sc); 1494 *((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap; 1495 bus_dmamap_load(sc->sc_dmat, rxmap, 1496 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1497 BUS_DMA_NOWAIT); 1498 } else if (oldm == m) 1499 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1500 else { 1501 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1502 bus_dmamap_unload(sc->sc_dmat, rxmap); 1503 bus_dmamap_load(sc->sc_dmat, rxmap, 1504 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1505 BUS_DMA_NOWAIT); 1506 *mtod(m, bus_dmamap_t *) = rxmap; 1507 } 1508 } else { 1509 if (oldm == NULL) 1510 return 1; 1511 m = oldm; 1512 m->m_data = m->m_ext.ext_buf; 1513 rxmap = *mtod(m, bus_dmamap_t *); 1514 } 1515 1516 /* 1517 * Move the data pointer up so that the incoming data packet 1518 * will be 32-bit aligned. 1519 */ 1520 m->m_data += RFA_ALIGNMENT_FUDGE; 1521 1522 /* 1523 * Get a pointer to the base of the mbuf cluster and move 1524 * data start past it. 1525 */ 1526 rfap = m->m_data; 1527 m->m_data += sizeof(struct fxp_rfa); 1528 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) = 1529 htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1530 1531 /* 1532 * Initialize the rest of the RFA. Note that since the RFA 1533 * is misaligned, we cannot store values directly. Instead, 1534 * we use an optimized, inline copy. 1535 */ 1536 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0; 1537 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) = 1538 htole16(FXP_RFA_CONTROL_EL); 1539 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0; 1540 1541 v = -1; 1542 fxp_lwcopy(&v, 1543 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1544 fxp_lwcopy(&v, 1545 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr))); 1546 1547 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES, 1548 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1549 1550 /* 1551 * If there are other buffers already on the list, attach this 1552 * one to the end by fixing up the tail to point to this one. 1553 */ 1554 if (sc->rfa_headm != NULL) { 1555 sc->rfa_tailm->m_next = m; 1556 v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); 1557 rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 1558 fxp_lwcopy(&v, 1559 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1560 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &= 1561 htole16((u_int16_t)~FXP_RFA_CONTROL_EL); 1562 /* XXX we only need to sync the control struct */ 1563 bus_dmamap_sync(sc->sc_dmat, 1564 *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0, 1565 MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1566 } else 1567 sc->rfa_headm = m; 1568 1569 sc->rfa_tailm = m; 1570 1571 return (m == oldm); 1572 } 1573 1574 int 1575 fxp_mdi_read(struct device *self, int phy, int reg) 1576 { 1577 struct fxp_softc *sc = (struct fxp_softc *)self; 1578 int count = FXP_CMD_TMO; 1579 int value; 1580 1581 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1582 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1583 1584 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1585 && count--) 1586 DELAY(10); 1587 1588 if (count <= 0) 1589 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname); 1590 1591 return (value & 0xffff); 1592 } 1593 1594 void 1595 fxp_statchg(struct device *self) 1596 { 1597 /* Nothing to do. */ 1598 } 1599 1600 void 1601 fxp_mdi_write(struct device *self, int phy, int reg, int value) 1602 { 1603 struct fxp_softc *sc = (struct fxp_softc *)self; 1604 int count = FXP_CMD_TMO; 1605 1606 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1607 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1608 (value & 0xffff)); 1609 1610 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1611 count--) 1612 DELAY(10); 1613 1614 if (count <= 0) 1615 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname); 1616 } 1617 1618 int 1619 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1620 { 1621 struct fxp_softc *sc = ifp->if_softc; 1622 struct ifreq *ifr = (struct ifreq *)data; 1623 int s, error = 0; 1624 1625 s = splnet(); 1626 1627 switch (command) { 1628 case SIOCSIFADDR: 1629 ifp->if_flags |= IFF_UP; 1630 if (!(ifp->if_flags & IFF_RUNNING)) 1631 fxp_init(sc); 1632 break; 1633 1634 case SIOCSIFFLAGS: 1635 if (ifp->if_flags & IFF_UP) { 1636 if (ifp->if_flags & IFF_RUNNING) 1637 error = ENETRESET; 1638 else 1639 fxp_init(sc); 1640 } else { 1641 if (ifp->if_flags & IFF_RUNNING) 1642 fxp_stop(sc, 1, 0); 1643 } 1644 break; 1645 1646 case SIOCSIFMEDIA: 1647 case SIOCGIFMEDIA: 1648 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1649 break; 1650 1651 default: 1652 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1653 } 1654 1655 if (error == ENETRESET) { 1656 if (ifp->if_flags & IFF_RUNNING) 1657 fxp_init(sc); 1658 error = 0; 1659 } 1660 1661 splx(s); 1662 return (error); 1663 } 1664 1665 /* 1666 * Program the multicast filter. 1667 * 1668 * We have an artificial restriction that the multicast setup command 1669 * must be the first command in the chain, so we take steps to ensure 1670 * this. By requiring this, it allows us to keep up the performance of 1671 * the pre-initialized command ring (esp. link pointers) by not actually 1672 * inserting the mcsetup command in the ring - i.e. its link pointer 1673 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1674 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1675 * lead into the regular TxCB ring when it completes. 1676 * 1677 * This function must be called at splnet. 1678 */ 1679 void 1680 fxp_mc_setup(struct fxp_softc *sc, int doit) 1681 { 1682 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1683 struct arpcom *ac = &sc->sc_arpcom; 1684 struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs; 1685 struct ether_multistep step; 1686 struct ether_multi *enm; 1687 int i, nmcasts = 0; 1688 1689 splassert(IPL_NET); 1690 1691 ifp->if_flags &= ~IFF_ALLMULTI; 1692 1693 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1694 ac->ac_multicnt >= MAXMCADDR) { 1695 ifp->if_flags |= IFF_ALLMULTI; 1696 } else { 1697 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1698 while (enm != NULL) { 1699 bcopy(enm->enm_addrlo, 1700 (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 1701 1702 nmcasts++; 1703 1704 ETHER_NEXT_MULTI(step, enm); 1705 } 1706 } 1707 1708 if (doit == 0) 1709 return; 1710 1711 /* 1712 * Initialize multicast setup descriptor. 1713 */ 1714 mcsp->cb_status = htole16(0); 1715 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1716 mcsp->link_addr = htole32(-1); 1717 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 1718 1719 /* 1720 * Wait until command unit is not active. This should never 1721 * be the case when nothing is queued, but make sure anyway. 1722 */ 1723 for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1724 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1)); 1725 1726 if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1727 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) { 1728 printf("%s: timeout waiting for CU ready\n", 1729 sc->sc_dev.dv_xname); 1730 return; 1731 } 1732 1733 /* 1734 * Start the multicast setup command. 1735 */ 1736 fxp_scb_wait(sc); 1737 FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1738 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1739 offsetof(struct fxp_ctrl, u.mcs)); 1740 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1741 1742 i = FXP_CMD_TMO; 1743 do { 1744 DELAY(1); 1745 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1746 } while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1747 1748 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1749 if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) { 1750 printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname); 1751 return; 1752 } 1753 1754 } 1755 1756 #ifndef SMALL_KERNEL 1757 #include <dev/microcode/fxp/rcvbundl.h> 1758 struct ucode { 1759 u_int16_t revision; 1760 u_int16_t int_delay_offset; 1761 u_int16_t bundle_max_offset; 1762 u_int16_t min_size_mask_offset; 1763 const char *uname; 1764 } const ucode_table[] = { 1765 { FXP_REV_82558_A4, D101_CPUSAVER_DWORD, 1766 0, 0, 1767 "fxp-d101a" }, 1768 1769 { FXP_REV_82558_B0, D101_CPUSAVER_DWORD, 1770 0, 0, 1771 "fxp-d101b0" }, 1772 1773 { FXP_REV_82559_A0, D101M_CPUSAVER_DWORD, 1774 D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD, 1775 "fxp-d101ma" }, 1776 1777 { FXP_REV_82559S_A, D101S_CPUSAVER_DWORD, 1778 D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD, 1779 "fxp-d101s" }, 1780 1781 { FXP_REV_82550, D102_B_CPUSAVER_DWORD, 1782 D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD, 1783 "fxp-d102" }, 1784 1785 { FXP_REV_82550_C, D102_C_CPUSAVER_DWORD, 1786 D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD, 1787 "fxp-d102c" }, 1788 1789 { FXP_REV_82551_F, D102_E_CPUSAVER_DWORD, 1790 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1791 "fxp-d102e" }, 1792 1793 { FXP_REV_82551_10, D102_E_CPUSAVER_DWORD, 1794 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1795 "fxp-d102e" }, 1796 1797 { 0, 0, 1798 0, 0, 1799 NULL } 1800 }; 1801 1802 void 1803 fxp_load_ucode(struct fxp_softc *sc) 1804 { 1805 const struct ucode *uc; 1806 struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code; 1807 int i, error; 1808 1809 if (sc->sc_flags & FXPF_NOUCODE) 1810 return; 1811 1812 for (uc = ucode_table; uc->revision != 0; uc++) 1813 if (sc->sc_revision == uc->revision) 1814 break; 1815 if (uc->revision == 0) { 1816 sc->sc_flags |= FXPF_NOUCODE; 1817 return; /* no ucode for this chip is found */ 1818 } 1819 1820 if (sc->sc_ucodebuf) 1821 goto reloadit; 1822 1823 if (sc->sc_revision == FXP_REV_82550_C) { 1824 u_int16_t data; 1825 1826 /* 1827 * 82550C without the server extensions 1828 * locks up with the microcode patch. 1829 */ 1830 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1); 1831 if ((data & FXP_EEPROM_REG_COMPAT_SRV) == 0) { 1832 sc->sc_flags |= FXPF_NOUCODE; 1833 return; 1834 } 1835 } 1836 1837 error = loadfirmware(uc->uname, (u_char **)&sc->sc_ucodebuf, 1838 &sc->sc_ucodelen); 1839 if (error) { 1840 printf("%s: error %d, could not read firmware %s\n", 1841 sc->sc_dev.dv_xname, error, uc->uname); 1842 return; 1843 } 1844 1845 reloadit: 1846 if (sc->sc_flags & FXPF_UCODELOADED) 1847 return; 1848 1849 cbp->cb_status = 0; 1850 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL); 1851 cbp->link_addr = 0xffffffff; /* (no) next command */ 1852 for (i = 0; i < (sc->sc_ucodelen / sizeof(u_int32_t)); i++) 1853 cbp->ucode[i] = sc->sc_ucodebuf[i]; 1854 1855 if (uc->int_delay_offset) 1856 *((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) = 1857 htole16(sc->sc_int_delay + sc->sc_int_delay / 2); 1858 1859 if (uc->bundle_max_offset) 1860 *((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) = 1861 htole16(sc->sc_bundle_max); 1862 1863 if (uc->min_size_mask_offset) 1864 *((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) = 1865 htole16(sc->sc_min_size_mask); 1866 1867 FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1868 1869 /* 1870 * Download the ucode to the chip. 1871 */ 1872 fxp_scb_wait(sc); 1873 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr 1874 + offsetof(struct fxp_ctrl, u.code)); 1875 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1876 1877 /* ...and wait for it to complete. */ 1878 i = FXP_CMD_TMO; 1879 do { 1880 DELAY(2); 1881 FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1882 } while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i); 1883 if (i == 0) { 1884 printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname); 1885 return; 1886 } 1887 sc->sc_flags |= FXPF_UCODELOADED; 1888 1889 #ifdef DEBUG 1890 printf("%s: microcode loaded, int_delay: %d usec", 1891 sc->sc_dev.dv_xname, sc->sc_int_delay); 1892 1893 if (uc->bundle_max_offset) 1894 printf(", bundle_max %d\n", sc->sc_bundle_max); 1895 else 1896 printf("\n"); 1897 #endif 1898 } 1899 #endif /* SMALL_KERNEL */ 1900