1 /* $OpenBSD: fxp.c,v 1.108 2011/04/07 15:30:16 miod Exp $ */ 2 /* $NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1995, David Greenman 6 * All rights reserved. 7 * 8 * Modifications to support NetBSD: 9 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp 34 */ 35 36 /* 37 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 38 */ 39 40 #include "bpfilter.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/syslog.h> 49 #include <sys/timeout.h> 50 #include <sys/workq.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip.h> 62 #endif 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <sys/ioctl.h> 69 #include <sys/errno.h> 70 #include <sys/device.h> 71 72 #include <netinet/if_ether.h> 73 74 #include <machine/cpu.h> 75 #include <machine/bus.h> 76 #include <machine/intr.h> 77 78 #include <dev/mii/miivar.h> 79 80 #include <dev/ic/fxpreg.h> 81 #include <dev/ic/fxpvar.h> 82 83 /* 84 * NOTE! On the Alpha, we have an alignment constraint. The 85 * card DMAs the packet immediately following the RFA. However, 86 * the first thing in the packet is a 14-byte Ethernet header. 87 * This means that the packet is misaligned. To compensate, 88 * we actually offset the RFA 2 bytes into the cluster. This 89 * aligns the packet after the Ethernet header at a 32-bit 90 * boundary. HOWEVER! This means that the RFA is misaligned! 91 */ 92 #define RFA_ALIGNMENT_FUDGE (2 + sizeof(bus_dmamap_t *)) 93 94 /* 95 * Inline function to copy a 16-bit aligned 32-bit quantity. 96 */ 97 static __inline void fxp_lwcopy(volatile u_int32_t *, 98 volatile u_int32_t *); 99 100 static __inline void 101 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst) 102 { 103 volatile u_int16_t *a = (u_int16_t *)src; 104 volatile u_int16_t *b = (u_int16_t *)dst; 105 106 b[0] = a[0]; 107 b[1] = a[1]; 108 } 109 110 /* 111 * Template for default configuration parameters. 112 * See struct fxp_cb_config for the bit definitions. 113 * Note, cb_command is filled in later. 114 */ 115 static u_char fxp_cb_config_template[] = { 116 0x0, 0x0, /* cb_status */ 117 0x0, 0x0, /* cb_command */ 118 0xff, 0xff, 0xff, 0xff, /* link_addr */ 119 0x16, /* 0 Byte count. */ 120 0x08, /* 1 Fifo limit */ 121 0x00, /* 2 Adaptive ifs */ 122 0x00, /* 3 ctrl0 */ 123 0x00, /* 4 rx_dma_bytecount */ 124 0x80, /* 5 tx_dma_bytecount */ 125 0xb2, /* 6 ctrl 1*/ 126 0x03, /* 7 ctrl 2*/ 127 0x01, /* 8 mediatype */ 128 0x00, /* 9 void2 */ 129 0x26, /* 10 ctrl3 */ 130 0x00, /* 11 linear priority */ 131 0x60, /* 12 interfrm_spacing */ 132 0x00, /* 13 void31 */ 133 0xf2, /* 14 void32 */ 134 0x48, /* 15 promiscuous */ 135 0x00, /* 16 void41 */ 136 0x40, /* 17 void42 */ 137 0xf3, /* 18 stripping */ 138 0x00, /* 19 fdx_pin */ 139 0x3f, /* 20 multi_ia */ 140 0x05 /* 21 mc_all */ 141 }; 142 143 void fxp_eeprom_shiftin(struct fxp_softc *, int, int); 144 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t); 145 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int); 146 int fxp_mediachange(struct ifnet *); 147 void fxp_mediastatus(struct ifnet *, struct ifmediareq *); 148 void fxp_scb_wait(struct fxp_softc *); 149 void fxp_start(struct ifnet *); 150 int fxp_ioctl(struct ifnet *, u_long, caddr_t); 151 void fxp_load_ucode(struct fxp_softc *); 152 void fxp_watchdog(struct ifnet *); 153 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *); 154 int fxp_mdi_read(struct device *, int, int); 155 void fxp_mdi_write(struct device *, int, int, int); 156 void fxp_autosize_eeprom(struct fxp_softc *); 157 void fxp_statchg(struct device *); 158 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *, 159 int, int); 160 void fxp_stats_update(void *); 161 void fxp_mc_setup(struct fxp_softc *, int); 162 void fxp_scb_cmd(struct fxp_softc *, u_int16_t); 163 164 /* 165 * Set initial transmit threshold at 64 (512 bytes). This is 166 * increased by 64 (512 bytes) at a time, to maximum of 192 167 * (1536 bytes), if an underrun occurs. 168 */ 169 static int tx_threshold = 64; 170 171 /* 172 * Interrupts coalescing code params 173 */ 174 int fxp_int_delay = FXP_INT_DELAY; 175 int fxp_bundle_max = FXP_BUNDLE_MAX; 176 int fxp_min_size_mask = FXP_MIN_SIZE_MASK; 177 178 /* 179 * TxCB list index mask. This is used to do list wrap-around. 180 */ 181 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 182 183 /* 184 * Maximum number of seconds that the receiver can be idle before we 185 * assume it's dead and attempt to reset it by reprogramming the 186 * multicast filter. This is part of a work-around for a bug in the 187 * NIC. See fxp_stats_update(). 188 */ 189 #define FXP_MAX_RX_IDLE 15 190 191 /* 192 * Wait for the previous command to be accepted (but not necessarily 193 * completed). 194 */ 195 void 196 fxp_scb_wait(struct fxp_softc *sc) 197 { 198 int i = FXP_CMD_TMO; 199 200 while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i) 201 DELAY(2); 202 if (i == 0) 203 printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname); 204 } 205 206 void 207 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 208 { 209 u_int16_t reg; 210 int x; 211 212 /* 213 * Shift in data. 214 */ 215 for (x = 1 << (length - 1); x; x >>= 1) { 216 if (data & x) 217 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 218 else 219 reg = FXP_EEPROM_EECS; 220 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 221 DELAY(1); 222 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 223 DELAY(1); 224 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 225 DELAY(1); 226 } 227 } 228 229 void 230 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) 231 { 232 int i; 233 234 /* 235 * Erase/write enable. 236 */ 237 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 238 fxp_eeprom_shiftin(sc, 0x4, 3); 239 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 240 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 241 DELAY(1); 242 /* 243 * Shift in write opcode, address, data. 244 */ 245 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 246 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 247 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 248 fxp_eeprom_shiftin(sc, data, 16); 249 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 250 DELAY(1); 251 /* 252 * Wait for EEPROM to finish up. 253 */ 254 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 255 DELAY(1); 256 for (i = 0; i < 1000; i++) { 257 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 258 break; 259 DELAY(50); 260 } 261 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 262 DELAY(1); 263 /* 264 * Erase/write disable. 265 */ 266 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 267 fxp_eeprom_shiftin(sc, 0x4, 3); 268 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 269 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 270 DELAY(1); 271 } 272 273 void 274 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 275 { 276 int i; 277 278 for (i = 0; i < words; i++) 279 fxp_eeprom_putword(sc, offset + i, data[i]); 280 } 281 282 /************************************************************* 283 * Operating system-specific autoconfiguration glue 284 *************************************************************/ 285 286 struct cfdriver fxp_cd = { 287 NULL, "fxp", DV_IFNET 288 }; 289 290 int 291 fxp_activate(struct device *self, int act) 292 { 293 struct fxp_softc *sc = (struct fxp_softc *)self; 294 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 295 int rv = 0; 296 297 switch (act) { 298 case DVACT_QUIESCE: 299 rv = config_activate_children(self, act); 300 break; 301 case DVACT_SUSPEND: 302 if (ifp->if_flags & IFF_RUNNING) 303 fxp_stop(sc, 1, 0); 304 rv = config_activate_children(self, act); 305 break; 306 case DVACT_RESUME: 307 rv = config_activate_children(self, act); 308 if (ifp->if_flags & IFF_UP) 309 workq_queue_task(NULL, &sc->sc_resume_wqt, 0, 310 fxp_resume, sc, NULL); 311 break; 312 } 313 return (rv); 314 } 315 316 void 317 fxp_resume(void *arg1, void *arg2) 318 { 319 struct fxp_softc *sc = arg1; 320 321 fxp_init(sc); 322 } 323 324 /************************************************************* 325 * End of operating system-specific autoconfiguration glue 326 *************************************************************/ 327 328 /* 329 * Do generic parts of attach. 330 */ 331 int 332 fxp_attach(struct fxp_softc *sc, const char *intrstr) 333 { 334 struct ifnet *ifp; 335 struct mbuf *m; 336 bus_dmamap_t rxmap; 337 u_int16_t data; 338 u_int8_t enaddr[6]; 339 int i, err; 340 341 /* 342 * Reset to a stable state. 343 */ 344 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 345 DELAY(10); 346 347 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl), 348 PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg, 349 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) 350 goto fail; 351 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg, 352 sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl, 353 BUS_DMA_NOWAIT)) { 354 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 355 goto fail; 356 } 357 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl), 358 1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT, 359 &sc->tx_cb_map)) { 360 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 361 sizeof(struct fxp_ctrl)); 362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 363 goto fail; 364 } 365 if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl, 366 sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) { 367 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 368 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 369 sizeof(struct fxp_ctrl)); 370 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 371 goto fail; 372 } 373 374 for (i = 0; i < FXP_NTXCB; i++) { 375 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 376 FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) { 377 printf("%s: unable to create tx dma map %d, error %d\n", 378 sc->sc_dev.dv_xname, i, err); 379 goto fail; 380 } 381 sc->txs[i].tx_mbuf = NULL; 382 sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i; 383 sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]); 384 sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK]; 385 } 386 387 /* 388 * Pre-allocate some receive buffers. 389 */ 390 sc->sc_rxfree = 0; 391 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 392 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 393 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 394 printf("%s: unable to create rx dma map %d, error %d\n", 395 sc->sc_dev.dv_xname, i, err); 396 goto fail; 397 } 398 sc->rx_bufs++; 399 } 400 for (i = 0; i < FXP_NRFABUFS_MIN; i++) 401 if (fxp_add_rfabuf(sc, NULL) != 0) 402 goto fail; 403 404 /* 405 * Find out how large of an SEEPROM we have. 406 */ 407 fxp_autosize_eeprom(sc); 408 409 /* 410 * Get info about the primary PHY 411 */ 412 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 413 sc->phy_primary_addr = data & 0xff; 414 sc->phy_primary_device = (data >> 8) & 0x3f; 415 sc->phy_10Mbps_only = data >> 15; 416 417 /* 418 * Only 82558 and newer cards can do this. 419 */ 420 if (sc->sc_revision >= FXP_REV_82558_A4) { 421 sc->sc_int_delay = fxp_int_delay; 422 sc->sc_bundle_max = fxp_bundle_max; 423 sc->sc_min_size_mask = fxp_min_size_mask; 424 } 425 /* 426 * Read MAC address. 427 */ 428 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 429 430 ifp = &sc->sc_arpcom.ac_if; 431 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 432 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 433 ifp->if_softc = sc; 434 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 435 ifp->if_ioctl = fxp_ioctl; 436 ifp->if_start = fxp_start; 437 ifp->if_watchdog = fxp_watchdog; 438 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 439 IFQ_SET_READY(&ifp->if_snd); 440 441 ifp->if_capabilities = IFCAP_VLAN_MTU; 442 443 printf(": %s, address %s\n", intrstr, 444 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 445 446 if (sc->sc_flags & FXPF_DISABLE_STANDBY) { 447 fxp_read_eeprom(sc, &data, 10, 1); 448 if (data & 0x02) { /* STB enable */ 449 u_int16_t cksum; 450 451 printf("%s: Disabling dynamic standby mode in EEPROM", 452 sc->sc_dev.dv_xname); 453 data &= ~0x02; 454 fxp_write_eeprom(sc, &data, 10, 1); 455 printf(", New ID 0x%x", data); 456 cksum = 0; 457 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 458 fxp_read_eeprom(sc, &data, i, 1); 459 cksum += data; 460 } 461 i = (1 << sc->eeprom_size) - 1; 462 cksum = 0xBABA - cksum; 463 fxp_read_eeprom(sc, &data, i, 1); 464 fxp_write_eeprom(sc, &cksum, i, 1); 465 printf(", cksum @ 0x%x: 0x%x -> 0x%x\n", 466 i, data, cksum); 467 } 468 } 469 470 /* Receiver lock-up workaround detection. */ 471 fxp_read_eeprom(sc, &data, 3, 1); 472 if ((data & 0x03) != 0x03) 473 sc->sc_flags |= FXPF_RECV_WORKAROUND; 474 475 /* 476 * Initialize our media structures and probe the MII. 477 */ 478 sc->sc_mii.mii_ifp = ifp; 479 sc->sc_mii.mii_readreg = fxp_mdi_read; 480 sc->sc_mii.mii_writereg = fxp_mdi_write; 481 sc->sc_mii.mii_statchg = fxp_statchg; 482 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange, 483 fxp_mediastatus); 484 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 485 MII_OFFSET_ANY, MIIF_NOISOLATE); 486 /* If no phy found, just use auto mode */ 487 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 488 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 489 0, NULL); 490 printf("%s: no phy found, using manual mode\n", 491 sc->sc_dev.dv_xname); 492 } 493 494 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0)) 495 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 496 else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) 497 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 498 else 499 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 500 501 /* 502 * Attach the interface. 503 */ 504 if_attach(ifp); 505 ether_ifattach(ifp); 506 507 /* 508 * Initialize timeout for statistics update. 509 */ 510 timeout_set(&sc->stats_update_to, fxp_stats_update, sc); 511 512 return (0); 513 514 fail: 515 printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname); 516 if (sc->tx_cb_map != NULL) { 517 bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map); 518 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 519 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 520 sizeof(struct fxp_cb_tx) * FXP_NTXCB); 521 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 522 } 523 m = sc->rfa_headm; 524 while (m != NULL) { 525 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 526 bus_dmamap_unload(sc->sc_dmat, rxmap); 527 FXP_RXMAP_PUT(sc, rxmap); 528 m = m_free(m); 529 } 530 return (ENOMEM); 531 } 532 533 /* 534 * From NetBSD: 535 * 536 * Figure out EEPROM size. 537 * 538 * 559's can have either 64-word or 256-word EEPROMs, the 558 539 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 540 * talks about the existence of 16 to 256 word EEPROMs. 541 * 542 * The only known sizes are 64 and 256, where the 256 version is used 543 * by CardBus cards to store CIS information. 544 * 545 * The address is shifted in msb-to-lsb, and after the last 546 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 547 * after which follows the actual data. We try to detect this zero, by 548 * probing the data-out bit in the EEPROM control register just after 549 * having shifted in a bit. If the bit is zero, we assume we've 550 * shifted enough address bits. The data-out should be tri-state, 551 * before this, which should translate to a logical one. 552 * 553 * Other ways to do this would be to try to read a register with known 554 * contents with a varying number of address bits, but no such 555 * register seem to be available. The high bits of register 10 are 01 556 * on the 558 and 559, but apparently not on the 557. 557 * 558 * The Linux driver computes a checksum on the EEPROM data, but the 559 * value of this checksum is not very well documented. 560 */ 561 void 562 fxp_autosize_eeprom(struct fxp_softc *sc) 563 { 564 u_int16_t reg; 565 int x; 566 567 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 568 /* 569 * Shift in read opcode. 570 */ 571 for (x = 3; x > 0; x--) { 572 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 573 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 574 } else { 575 reg = FXP_EEPROM_EECS; 576 } 577 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 578 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 579 reg | FXP_EEPROM_EESK); 580 DELAY(4); 581 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 582 DELAY(4); 583 } 584 /* 585 * Shift in address. 586 * Wait for the dummy zero following a correct address shift. 587 */ 588 for (x = 1; x <= 8; x++) { 589 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 590 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 591 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 592 DELAY(4); 593 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 594 break; 595 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 596 DELAY(4); 597 } 598 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 599 DELAY(4); 600 sc->eeprom_size = x; 601 } 602 603 /* 604 * Read from the serial EEPROM. Basically, you manually shift in 605 * the read opcode (one bit at a time) and then shift in the address, 606 * and then you shift out the data (all of this one bit at a time). 607 * The word size is 16 bits, so you have to provide the address for 608 * every 16 bits of data. 609 */ 610 void 611 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, 612 int words) 613 { 614 u_int16_t reg; 615 int i, x; 616 617 for (i = 0; i < words; i++) { 618 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 619 /* 620 * Shift in read opcode. 621 */ 622 for (x = 3; x > 0; x--) { 623 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 624 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 625 } else { 626 reg = FXP_EEPROM_EECS; 627 } 628 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 629 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 630 reg | FXP_EEPROM_EESK); 631 DELAY(4); 632 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 633 DELAY(4); 634 } 635 /* 636 * Shift in address. 637 */ 638 for (x = sc->eeprom_size; x > 0; x--) { 639 if ((i + offset) & (1 << (x - 1))) { 640 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 641 } else { 642 reg = FXP_EEPROM_EECS; 643 } 644 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 645 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 646 reg | FXP_EEPROM_EESK); 647 DELAY(4); 648 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 649 DELAY(4); 650 } 651 reg = FXP_EEPROM_EECS; 652 data[i] = 0; 653 /* 654 * Shift out data. 655 */ 656 for (x = 16; x > 0; x--) { 657 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 658 reg | FXP_EEPROM_EESK); 659 DELAY(4); 660 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 661 FXP_EEPROM_EEDO) 662 data[i] |= (1 << (x - 1)); 663 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 664 DELAY(4); 665 } 666 data[i] = letoh16(data[i]); 667 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 668 DELAY(4); 669 } 670 } 671 672 /* 673 * Start packet transmission on the interface. 674 */ 675 void 676 fxp_start(struct ifnet *ifp) 677 { 678 struct fxp_softc *sc = ifp->if_softc; 679 struct fxp_txsw *txs = sc->sc_cbt_prod; 680 struct fxp_cb_tx *txc; 681 struct mbuf *m0, *m = NULL; 682 int cnt = sc->sc_cbt_cnt, seg; 683 684 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 685 return; 686 687 while (1) { 688 if (cnt >= (FXP_NTXCB - 2)) { 689 ifp->if_flags |= IFF_OACTIVE; 690 break; 691 } 692 693 txs = txs->tx_next; 694 695 IFQ_POLL(&ifp->if_snd, m0); 696 if (m0 == NULL) 697 break; 698 699 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 700 m0, BUS_DMA_NOWAIT) != 0) { 701 MGETHDR(m, M_DONTWAIT, MT_DATA); 702 if (m == NULL) 703 break; 704 if (m0->m_pkthdr.len > MHLEN) { 705 MCLGET(m, M_DONTWAIT); 706 if (!(m->m_flags & M_EXT)) { 707 m_freem(m); 708 break; 709 } 710 } 711 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 712 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 713 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 714 m, BUS_DMA_NOWAIT) != 0) { 715 m_freem(m); 716 break; 717 } 718 } 719 720 IFQ_DEQUEUE(&ifp->if_snd, m0); 721 if (m != NULL) { 722 m_freem(m0); 723 m0 = m; 724 m = NULL; 725 } 726 727 txs->tx_mbuf = m0; 728 729 #if NBPFILTER > 0 730 if (ifp->if_bpf) 731 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 732 #endif 733 734 FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE); 735 736 txc = txs->tx_cb; 737 txc->tbd_number = txs->tx_map->dm_nsegs; 738 txc->cb_status = 0; 739 txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF); 740 txc->tx_threshold = tx_threshold; 741 for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) { 742 txc->tbd[seg].tb_addr = 743 htole32(txs->tx_map->dm_segs[seg].ds_addr); 744 txc->tbd[seg].tb_size = 745 htole32(txs->tx_map->dm_segs[seg].ds_len); 746 } 747 FXP_TXCB_SYNC(sc, txs, 748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 749 750 ++cnt; 751 sc->sc_cbt_prod = txs; 752 } 753 754 if (cnt != sc->sc_cbt_cnt) { 755 /* We enqueued at least one. */ 756 ifp->if_timer = 5; 757 758 txs = sc->sc_cbt_prod; 759 txs = txs->tx_next; 760 sc->sc_cbt_prod = txs; 761 txs->tx_cb->cb_command = 762 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 763 FXP_TXCB_SYNC(sc, txs, 764 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 765 766 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 767 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 768 sc->sc_cbt_prev->tx_cb->cb_command &= 769 htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I)); 770 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 771 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 772 773 sc->sc_cbt_prev = txs; 774 775 fxp_scb_wait(sc); 776 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 777 778 sc->sc_cbt_cnt = cnt + 1; 779 } 780 } 781 782 /* 783 * Process interface interrupts. 784 */ 785 int 786 fxp_intr(void *arg) 787 { 788 struct fxp_softc *sc = arg; 789 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 790 u_int16_t statack; 791 bus_dmamap_t rxmap; 792 int claimed = 0; 793 int rnr = 0; 794 795 /* 796 * If the interface isn't running, don't try to 797 * service the interrupt.. just ack it and bail. 798 */ 799 if ((ifp->if_flags & IFF_RUNNING) == 0) { 800 statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS); 801 if (statack) { 802 claimed = 1; 803 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 804 statack & FXP_SCB_STATACK_MASK); 805 } 806 return claimed; 807 } 808 809 while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) & 810 FXP_SCB_STATACK_MASK) { 811 claimed = 1; 812 rnr = (statack & (FXP_SCB_STATACK_RNR | 813 FXP_SCB_STATACK_SWI)) ? 1 : 0; 814 /* 815 * First ACK all the interrupts in this pass. 816 */ 817 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 818 statack & FXP_SCB_STATACK_MASK); 819 820 /* 821 * Free any finished transmit mbuf chains. 822 */ 823 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { 824 int txcnt = sc->sc_cbt_cnt; 825 struct fxp_txsw *txs = sc->sc_cbt_cons; 826 827 FXP_TXCB_SYNC(sc, txs, 828 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 829 830 while ((txcnt > 0) && 831 ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) || 832 (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) { 833 if (txs->tx_mbuf != NULL) { 834 FXP_MBUF_SYNC(sc, txs->tx_map, 835 BUS_DMASYNC_POSTWRITE); 836 bus_dmamap_unload(sc->sc_dmat, 837 txs->tx_map); 838 m_freem(txs->tx_mbuf); 839 txs->tx_mbuf = NULL; 840 } 841 --txcnt; 842 txs = txs->tx_next; 843 FXP_TXCB_SYNC(sc, txs, 844 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 845 } 846 sc->sc_cbt_cnt = txcnt; 847 /* Did we transmit any packets? */ 848 if (sc->sc_cbt_cons != txs) 849 ifp->if_flags &= ~IFF_OACTIVE; 850 ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0; 851 sc->sc_cbt_cons = txs; 852 853 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 854 /* 855 * Try to start more packets transmitting. 856 */ 857 fxp_start(ifp); 858 } 859 } 860 /* 861 * Process receiver interrupts. If a Receive Unit 862 * not ready (RNR) condition exists, get whatever 863 * packets we can and re-start the receiver. 864 */ 865 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | 866 FXP_SCB_STATACK_SWI)) { 867 struct mbuf *m; 868 u_int8_t *rfap; 869 rcvloop: 870 m = sc->rfa_headm; 871 rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 872 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 873 bus_dmamap_sync(sc->sc_dmat, rxmap, 874 0, MCLBYTES, BUS_DMASYNC_POSTREAD | 875 BUS_DMASYNC_POSTWRITE); 876 877 if (*(u_int16_t *)(rfap + 878 offsetof(struct fxp_rfa, rfa_status)) & 879 htole16(FXP_RFA_STATUS_C)) { 880 if (*(u_int16_t *)(rfap + 881 offsetof(struct fxp_rfa, rfa_status)) & 882 htole16(FXP_RFA_STATUS_RNR)) 883 rnr = 1; 884 885 /* 886 * Remove first packet from the chain. 887 */ 888 sc->rfa_headm = m->m_next; 889 m->m_next = NULL; 890 891 /* 892 * Add a new buffer to the receive chain. 893 * If this fails, the old buffer is recycled 894 * instead. 895 */ 896 if (fxp_add_rfabuf(sc, m) == 0) { 897 u_int16_t total_len; 898 899 total_len = htole16(*(u_int16_t *)(rfap + 900 offsetof(struct fxp_rfa, 901 actual_size))) & 902 (MCLBYTES - 1); 903 if (total_len < 904 sizeof(struct ether_header)) { 905 m_freem(m); 906 goto rcvloop; 907 } 908 if (*(u_int16_t *)(rfap + 909 offsetof(struct fxp_rfa, 910 rfa_status)) & 911 htole16(FXP_RFA_STATUS_CRC)) { 912 m_freem(m); 913 goto rcvloop; 914 } 915 916 m->m_pkthdr.rcvif = ifp; 917 m->m_pkthdr.len = m->m_len = 918 total_len; 919 #if NBPFILTER > 0 920 if (ifp->if_bpf) 921 bpf_mtap(ifp->if_bpf, m, 922 BPF_DIRECTION_IN); 923 #endif /* NBPFILTER > 0 */ 924 ether_input_mbuf(ifp, m); 925 } 926 goto rcvloop; 927 } 928 } 929 if (rnr) { 930 rxmap = *((bus_dmamap_t *) 931 sc->rfa_headm->m_ext.ext_buf); 932 fxp_scb_wait(sc); 933 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 934 rxmap->dm_segs[0].ds_addr + 935 RFA_ALIGNMENT_FUDGE); 936 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 937 938 } 939 } 940 return (claimed); 941 } 942 943 /* 944 * Update packet in/out/collision statistics. The i82557 doesn't 945 * allow you to access these counters without doing a fairly 946 * expensive DMA to get _all_ of the statistics it maintains, so 947 * we do this operation here only once per second. The statistics 948 * counters in the kernel are updated from the previous dump-stats 949 * DMA and then a new dump-stats DMA is started. The on-chip 950 * counters are zeroed when the DMA completes. If we can't start 951 * the DMA immediately, we don't wait - we just prepare to read 952 * them again next time. 953 */ 954 void 955 fxp_stats_update(void *arg) 956 { 957 struct fxp_softc *sc = arg; 958 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 959 struct fxp_stats *sp = &sc->sc_ctrl->stats; 960 int s; 961 962 FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 963 ifp->if_opackets += letoh32(sp->tx_good); 964 ifp->if_collisions += letoh32(sp->tx_total_collisions); 965 if (sp->rx_good) { 966 ifp->if_ipackets += letoh32(sp->rx_good); 967 sc->rx_idle_secs = 0; 968 } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) 969 sc->rx_idle_secs++; 970 ifp->if_ierrors += 971 letoh32(sp->rx_crc_errors) + 972 letoh32(sp->rx_alignment_errors) + 973 letoh32(sp->rx_rnr_errors) + 974 letoh32(sp->rx_overrun_errors); 975 /* 976 * If any transmit underruns occurred, bump up the transmit 977 * threshold by another 512 bytes (64 * 8). 978 */ 979 if (sp->tx_underruns) { 980 ifp->if_oerrors += letoh32(sp->tx_underruns); 981 if (tx_threshold < 192) 982 tx_threshold += 64; 983 } 984 s = splnet(); 985 /* 986 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, 987 * then assume the receiver has locked up and attempt to clear 988 * the condition by reprogramming the multicast filter. This is 989 * a work-around for a bug in the 82557 where the receiver locks 990 * up if it gets certain types of garbage in the synchronization 991 * bits prior to the packet header. This bug is supposed to only 992 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 993 * mode as well (perhaps due to a 10/100 speed transition). 994 */ 995 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 996 sc->rx_idle_secs = 0; 997 fxp_init(sc); 998 splx(s); 999 return; 1000 } 1001 /* 1002 * If there is no pending command, start another stats 1003 * dump. Otherwise punt for now. 1004 */ 1005 FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1006 if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) { 1007 /* 1008 * Start another stats dump. 1009 */ 1010 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 1011 } else { 1012 /* 1013 * A previous command is still waiting to be accepted. 1014 * Just zero our copy of the stats and wait for the 1015 * next timer event to update them. 1016 */ 1017 sp->tx_good = 0; 1018 sp->tx_underruns = 0; 1019 sp->tx_total_collisions = 0; 1020 1021 sp->rx_good = 0; 1022 sp->rx_crc_errors = 0; 1023 sp->rx_alignment_errors = 0; 1024 sp->rx_rnr_errors = 0; 1025 sp->rx_overrun_errors = 0; 1026 } 1027 1028 /* Tick the MII clock. */ 1029 mii_tick(&sc->sc_mii); 1030 1031 splx(s); 1032 /* 1033 * Schedule another timeout one second from now. 1034 */ 1035 timeout_add_sec(&sc->stats_update_to, 1); 1036 } 1037 1038 void 1039 fxp_detach(struct fxp_softc *sc) 1040 { 1041 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1042 1043 /* Get rid of our timeouts and mbufs */ 1044 fxp_stop(sc, 1, 1); 1045 1046 /* Detach any PHYs we might have. */ 1047 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1048 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1049 1050 /* Delete any remaining media. */ 1051 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 1052 1053 ether_ifdetach(ifp); 1054 if_detach(ifp); 1055 } 1056 1057 /* 1058 * Stop the interface. Cancels the statistics updater and resets 1059 * the interface. 1060 */ 1061 void 1062 fxp_stop(struct fxp_softc *sc, int drain, int softonly) 1063 { 1064 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1065 int i; 1066 1067 /* 1068 * Cancel stats updater. 1069 */ 1070 timeout_del(&sc->stats_update_to); 1071 1072 /* 1073 * Turn down interface (done early to avoid bad interactions 1074 * between panics, and the watchdog timer) 1075 */ 1076 ifp->if_timer = 0; 1077 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1078 1079 if (!softonly) 1080 mii_down(&sc->sc_mii); 1081 1082 /* 1083 * Issue software reset. 1084 */ 1085 if (!softonly) { 1086 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1087 DELAY(10); 1088 } 1089 1090 /* 1091 * Release any xmit buffers. 1092 */ 1093 for (i = 0; i < FXP_NTXCB; i++) { 1094 if (sc->txs[i].tx_mbuf != NULL) { 1095 bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map); 1096 m_freem(sc->txs[i].tx_mbuf); 1097 sc->txs[i].tx_mbuf = NULL; 1098 } 1099 } 1100 sc->sc_cbt_cnt = 0; 1101 1102 if (drain) { 1103 bus_dmamap_t rxmap; 1104 struct mbuf *m; 1105 1106 /* 1107 * Free all the receive buffers then reallocate/reinitialize 1108 */ 1109 m = sc->rfa_headm; 1110 while (m != NULL) { 1111 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 1112 bus_dmamap_unload(sc->sc_dmat, rxmap); 1113 FXP_RXMAP_PUT(sc, rxmap); 1114 m = m_free(m); 1115 sc->rx_bufs--; 1116 } 1117 sc->rfa_headm = NULL; 1118 sc->rfa_tailm = NULL; 1119 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 1120 if (fxp_add_rfabuf(sc, NULL) != 0) { 1121 /* 1122 * This "can't happen" - we're at splnet() 1123 * and we just freed all the buffers we need 1124 * above. 1125 */ 1126 panic("fxp_stop: no buffers!"); 1127 } 1128 sc->rx_bufs++; 1129 } 1130 } 1131 } 1132 1133 /* 1134 * Watchdog/transmission transmit timeout handler. Called when a 1135 * transmission is started on the interface, but no interrupt is 1136 * received before the timeout. This usually indicates that the 1137 * card has wedged for some reason. 1138 */ 1139 void 1140 fxp_watchdog(struct ifnet *ifp) 1141 { 1142 struct fxp_softc *sc = ifp->if_softc; 1143 1144 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1145 ifp->if_oerrors++; 1146 1147 fxp_init(sc); 1148 } 1149 1150 /* 1151 * Submit a command to the i82557. 1152 */ 1153 void 1154 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd) 1155 { 1156 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd); 1157 } 1158 1159 void 1160 fxp_init(void *xsc) 1161 { 1162 struct fxp_softc *sc = xsc; 1163 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1164 struct fxp_cb_config *cbp; 1165 struct fxp_cb_ias *cb_ias; 1166 struct fxp_cb_tx *txp; 1167 bus_dmamap_t rxmap; 1168 int i, prm, save_bf, lrxen, allm, s, bufs; 1169 1170 s = splnet(); 1171 1172 /* 1173 * Cancel any pending I/O 1174 */ 1175 fxp_stop(sc, 0, 0); 1176 1177 /* 1178 * Initialize base of CBL and RFA memory. Loading with zero 1179 * sets it up for regular linear addressing. 1180 */ 1181 fxp_scb_wait(sc); 1182 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1183 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 1184 1185 fxp_scb_wait(sc); 1186 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1187 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 1188 1189 #ifndef SMALL_KERNEL 1190 fxp_load_ucode(sc); 1191 #endif 1192 /* Once through to set flags */ 1193 fxp_mc_setup(sc, 0); 1194 1195 /* 1196 * In order to support receiving 802.1Q VLAN frames, we have to 1197 * enable "save bad frames", since they are 4 bytes larger than 1198 * the normal Ethernet maximum frame length. On i82558 and later, 1199 * we have a better mechanism for this. 1200 */ 1201 save_bf = 0; 1202 lrxen = 0; 1203 1204 if (sc->sc_revision >= FXP_REV_82558_A4) 1205 lrxen = 1; 1206 else 1207 save_bf = 1; 1208 1209 /* 1210 * Initialize base of dump-stats buffer. 1211 */ 1212 fxp_scb_wait(sc); 1213 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1214 sc->tx_cb_map->dm_segs->ds_addr + 1215 offsetof(struct fxp_ctrl, stats)); 1216 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 1217 1218 cbp = &sc->sc_ctrl->u.cfg; 1219 /* 1220 * This bcopy is kind of disgusting, but there are a bunch of must be 1221 * zero and must be one bits in this structure and this is the easiest 1222 * way to initialize them all to proper values. 1223 */ 1224 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status, 1225 sizeof(fxp_cb_config_template)); 1226 1227 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1228 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1229 1230 #if 0 1231 cbp->cb_status = 0; 1232 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1233 cbp->link_addr = 0xffffffff; /* (no) next command */ 1234 cbp->byte_count = 22; /* (22) bytes to config */ 1235 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1236 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1237 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1238 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1239 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1240 cbp->dma_bce = 0; /* (disable) dma max counters */ 1241 cbp->late_scb = 0; /* (don't) defer SCB update */ 1242 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1243 cbp->ci_int = 1; /* interrupt on CU idle */ 1244 cbp->save_bf = save_bf ? 1 : prm; /* save bad frames */ 1245 cbp->disc_short_rx = !prm; /* discard short packets */ 1246 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1247 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1248 cbp->nsai = 1; /* (don't) disable source addr insert */ 1249 cbp->preamble_length = 2; /* (7 byte) preamble */ 1250 cbp->loopback = 0; /* (don't) loopback */ 1251 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1252 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1253 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1254 cbp->promiscuous = prm; /* promiscuous mode */ 1255 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1256 cbp->crscdt = 0; /* (CRS only) */ 1257 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1258 cbp->padding = 1; /* (do) pad short tx packets */ 1259 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1260 cbp->long_rx = lrxen; /* (enable) long packets */ 1261 cbp->force_fdx = 0; /* (don't) force full duplex */ 1262 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1263 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1264 cbp->mc_all = allm; 1265 #else 1266 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); 1267 1268 if (allm && !prm) 1269 cbp->mc_all |= 0x08; /* accept all multicasts */ 1270 else 1271 cbp->mc_all &= ~0x08; /* reject all multicasts */ 1272 1273 if (prm) { 1274 cbp->promiscuous |= 1; /* promiscuous mode */ 1275 cbp->ctrl2 &= ~0x01; /* save short packets */ 1276 cbp->stripping &= ~0x01; /* don't truncate rx packets */ 1277 } else { 1278 cbp->promiscuous &= ~1; /* no promiscuous mode */ 1279 cbp->ctrl2 |= 0x01; /* discard short packets */ 1280 cbp->stripping |= 0x01; /* truncate rx packets */ 1281 } 1282 1283 if (prm || save_bf) 1284 cbp->ctrl1 |= 0x80; /* save bad frames */ 1285 else 1286 cbp->ctrl1 &= ~0x80; /* discard bad frames */ 1287 1288 if (sc->sc_flags & FXPF_MWI_ENABLE) 1289 cbp->ctrl0 |= 0x01; /* enable PCI MWI command */ 1290 1291 if(!sc->phy_10Mbps_only) /* interface mode */ 1292 cbp->mediatype |= 0x01; 1293 else 1294 cbp->mediatype &= ~0x01; 1295 1296 if(lrxen) /* long packets */ 1297 cbp->stripping |= 0x08; 1298 else 1299 cbp->stripping &= ~0x08; 1300 1301 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */ 1302 cbp->ctrl1 |= 0x08; /* ci_int = 1 */ 1303 cbp->ctrl3 |= 0x08; /* nsai */ 1304 cbp->fifo_limit = 0x08; /* tx and rx fifo limit */ 1305 cbp->fdx_pin |= 0x80; /* Enable full duplex setting by pin */ 1306 #endif 1307 1308 /* 1309 * Start the config command/DMA. 1310 */ 1311 fxp_scb_wait(sc); 1312 FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1313 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1314 offsetof(struct fxp_ctrl, u.cfg)); 1315 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1316 /* ...and wait for it to complete. */ 1317 i = FXP_CMD_TMO; 1318 do { 1319 DELAY(1); 1320 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1321 } while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--); 1322 1323 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1324 if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) { 1325 printf("%s: config command timeout\n", sc->sc_dev.dv_xname); 1326 return; 1327 } 1328 1329 /* 1330 * Now initialize the station address. 1331 */ 1332 cb_ias = &sc->sc_ctrl->u.ias; 1333 cb_ias->cb_status = htole16(0); 1334 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 1335 cb_ias->link_addr = htole32(0xffffffff); 1336 bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr, 1337 sizeof(sc->sc_arpcom.ac_enaddr)); 1338 1339 /* 1340 * Start the IAS (Individual Address Setup) command/DMA. 1341 */ 1342 fxp_scb_wait(sc); 1343 FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1344 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1345 offsetof(struct fxp_ctrl, u.ias)); 1346 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1347 /* ...and wait for it to complete. */ 1348 i = FXP_CMD_TMO; 1349 do { 1350 DELAY(1); 1351 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1352 } while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1353 1354 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1355 if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) { 1356 printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname); 1357 return; 1358 } 1359 1360 /* Again, this time really upload the multicast addresses */ 1361 fxp_mc_setup(sc, 1); 1362 1363 /* 1364 * Initialize transmit control block (TxCB) list. 1365 */ 1366 bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1367 txp = sc->sc_ctrl->tx_cb; 1368 for (i = 0; i < FXP_NTXCB; i++) { 1369 txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 1370 txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr + 1371 offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK])); 1372 txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr + 1373 offsetof(struct fxp_ctrl, tx_cb[i].tbd[0])); 1374 } 1375 /* 1376 * Set the suspend flag on the first TxCB and start the control 1377 * unit. It will execute the NOP and then suspend. 1378 */ 1379 sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs; 1380 sc->sc_cbt_cnt = 1; 1381 sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP | 1382 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1383 bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0, 1384 sc->tx_cb_map->dm_mapsize, 1385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1386 1387 fxp_scb_wait(sc); 1388 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1389 offsetof(struct fxp_ctrl, tx_cb[0])); 1390 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1391 1392 /* 1393 * Initialize receiver buffer area - RFA. 1394 */ 1395 if (ifp->if_flags & IFF_UP) 1396 bufs = FXP_NRFABUFS_MAX; 1397 else 1398 bufs = FXP_NRFABUFS_MIN; 1399 if (sc->rx_bufs > bufs) { 1400 while (sc->rfa_headm != NULL && sc->rx_bufs-- > bufs) { 1401 rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf); 1402 bus_dmamap_unload(sc->sc_dmat, rxmap); 1403 FXP_RXMAP_PUT(sc, rxmap); 1404 sc->rfa_headm = m_free(sc->rfa_headm); 1405 } 1406 } else if (sc->rx_bufs < bufs) { 1407 int err, tmp_rx_bufs = sc->rx_bufs; 1408 for (i = sc->rx_bufs; i < bufs; i++) { 1409 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1410 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 1411 printf("%s: unable to create rx dma map %d, " 1412 "error %d\n", sc->sc_dev.dv_xname, i, err); 1413 break; 1414 } 1415 sc->rx_bufs++; 1416 } 1417 for (i = tmp_rx_bufs; i < sc->rx_bufs; i++) 1418 if (fxp_add_rfabuf(sc, NULL) != 0) 1419 break; 1420 } 1421 fxp_scb_wait(sc); 1422 1423 /* 1424 * Set current media. 1425 */ 1426 mii_mediachg(&sc->sc_mii); 1427 1428 ifp->if_flags |= IFF_RUNNING; 1429 ifp->if_flags &= ~IFF_OACTIVE; 1430 1431 /* 1432 * Request a software generated interrupt that will be used to 1433 * (re)start the RU processing. If we direct the chip to start 1434 * receiving from the start of queue now, instead of letting the 1435 * interrupt handler first process all received packets, we run 1436 * the risk of having it overwrite mbuf clusters while they are 1437 * being processed or after they have been returned to the pool. 1438 */ 1439 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, 1440 CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) | 1441 FXP_SCB_INTRCNTL_REQUEST_SWI); 1442 splx(s); 1443 1444 /* 1445 * Start stats updater. 1446 */ 1447 timeout_add_sec(&sc->stats_update_to, 1); 1448 } 1449 1450 /* 1451 * Change media according to request. 1452 */ 1453 int 1454 fxp_mediachange(struct ifnet *ifp) 1455 { 1456 struct fxp_softc *sc = ifp->if_softc; 1457 struct mii_data *mii = &sc->sc_mii; 1458 1459 if (mii->mii_instance) { 1460 struct mii_softc *miisc; 1461 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1462 mii_phy_reset(miisc); 1463 } 1464 mii_mediachg(&sc->sc_mii); 1465 return (0); 1466 } 1467 1468 /* 1469 * Notify the world which media we're using. 1470 */ 1471 void 1472 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1473 { 1474 struct fxp_softc *sc = ifp->if_softc; 1475 1476 mii_pollstat(&sc->sc_mii); 1477 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1478 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1479 } 1480 1481 /* 1482 * Add a buffer to the end of the RFA buffer list. 1483 * Return 0 if successful, 1 for failure. A failure results in 1484 * adding the 'oldm' (if non-NULL) on to the end of the list - 1485 * tossing out its old contents and recycling it. 1486 * The RFA struct is stuck at the beginning of mbuf cluster and the 1487 * data pointer is fixed up to point just past it. 1488 */ 1489 int 1490 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm) 1491 { 1492 u_int32_t v; 1493 struct mbuf *m; 1494 u_int8_t *rfap; 1495 bus_dmamap_t rxmap = NULL; 1496 1497 MGETHDR(m, M_DONTWAIT, MT_DATA); 1498 if (m != NULL) { 1499 MCLGET(m, M_DONTWAIT); 1500 if ((m->m_flags & M_EXT) == 0) { 1501 m_freem(m); 1502 if (oldm == NULL) 1503 return 1; 1504 m = oldm; 1505 m->m_data = m->m_ext.ext_buf; 1506 } 1507 if (oldm == NULL) { 1508 rxmap = FXP_RXMAP_GET(sc); 1509 *((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap; 1510 bus_dmamap_load(sc->sc_dmat, rxmap, 1511 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1512 BUS_DMA_NOWAIT); 1513 } else if (oldm == m) 1514 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1515 else { 1516 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1517 bus_dmamap_unload(sc->sc_dmat, rxmap); 1518 bus_dmamap_load(sc->sc_dmat, rxmap, 1519 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1520 BUS_DMA_NOWAIT); 1521 *mtod(m, bus_dmamap_t *) = rxmap; 1522 } 1523 } else { 1524 if (oldm == NULL) 1525 return 1; 1526 m = oldm; 1527 m->m_data = m->m_ext.ext_buf; 1528 rxmap = *mtod(m, bus_dmamap_t *); 1529 } 1530 1531 /* 1532 * Move the data pointer up so that the incoming data packet 1533 * will be 32-bit aligned. 1534 */ 1535 m->m_data += RFA_ALIGNMENT_FUDGE; 1536 1537 /* 1538 * Get a pointer to the base of the mbuf cluster and move 1539 * data start past it. 1540 */ 1541 rfap = m->m_data; 1542 m->m_data += sizeof(struct fxp_rfa); 1543 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) = 1544 htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1545 1546 /* 1547 * Initialize the rest of the RFA. Note that since the RFA 1548 * is misaligned, we cannot store values directly. Instead, 1549 * we use an optimized, inline copy. 1550 */ 1551 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0; 1552 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) = 1553 htole16(FXP_RFA_CONTROL_EL); 1554 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0; 1555 1556 v = -1; 1557 fxp_lwcopy(&v, 1558 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1559 fxp_lwcopy(&v, 1560 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr))); 1561 1562 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES, 1563 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1564 1565 /* 1566 * If there are other buffers already on the list, attach this 1567 * one to the end by fixing up the tail to point to this one. 1568 */ 1569 if (sc->rfa_headm != NULL) { 1570 sc->rfa_tailm->m_next = m; 1571 v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); 1572 rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 1573 fxp_lwcopy(&v, 1574 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1575 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &= 1576 htole16((u_int16_t)~FXP_RFA_CONTROL_EL); 1577 /* XXX we only need to sync the control struct */ 1578 bus_dmamap_sync(sc->sc_dmat, 1579 *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0, 1580 MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1581 } else 1582 sc->rfa_headm = m; 1583 1584 sc->rfa_tailm = m; 1585 1586 return (m == oldm); 1587 } 1588 1589 int 1590 fxp_mdi_read(struct device *self, int phy, int reg) 1591 { 1592 struct fxp_softc *sc = (struct fxp_softc *)self; 1593 int count = FXP_CMD_TMO; 1594 int value; 1595 1596 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1597 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1598 1599 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1600 && count--) 1601 DELAY(10); 1602 1603 if (count <= 0) 1604 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname); 1605 1606 return (value & 0xffff); 1607 } 1608 1609 void 1610 fxp_statchg(struct device *self) 1611 { 1612 /* Nothing to do. */ 1613 } 1614 1615 void 1616 fxp_mdi_write(struct device *self, int phy, int reg, int value) 1617 { 1618 struct fxp_softc *sc = (struct fxp_softc *)self; 1619 int count = FXP_CMD_TMO; 1620 1621 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1622 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1623 (value & 0xffff)); 1624 1625 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1626 count--) 1627 DELAY(10); 1628 1629 if (count <= 0) 1630 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname); 1631 } 1632 1633 int 1634 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1635 { 1636 struct fxp_softc *sc = ifp->if_softc; 1637 struct ifreq *ifr = (struct ifreq *)data; 1638 struct ifaddr *ifa = (struct ifaddr *)data; 1639 int s, error = 0; 1640 1641 s = splnet(); 1642 1643 switch (command) { 1644 case SIOCSIFADDR: 1645 ifp->if_flags |= IFF_UP; 1646 if (!(ifp->if_flags & IFF_RUNNING)) 1647 fxp_init(sc); 1648 #ifdef INET 1649 if (ifa->ifa_addr->sa_family == AF_INET) 1650 arp_ifinit(&sc->sc_arpcom, ifa); 1651 #endif 1652 break; 1653 1654 case SIOCSIFFLAGS: 1655 if (ifp->if_flags & IFF_UP) { 1656 if (ifp->if_flags & IFF_RUNNING) 1657 error = ENETRESET; 1658 else 1659 fxp_init(sc); 1660 } else { 1661 if (ifp->if_flags & IFF_RUNNING) 1662 fxp_stop(sc, 1, 0); 1663 } 1664 break; 1665 1666 case SIOCSIFMEDIA: 1667 case SIOCGIFMEDIA: 1668 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1669 break; 1670 1671 default: 1672 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1673 } 1674 1675 if (error == ENETRESET) { 1676 if (ifp->if_flags & IFF_RUNNING) 1677 fxp_init(sc); 1678 error = 0; 1679 } 1680 1681 splx(s); 1682 return (error); 1683 } 1684 1685 /* 1686 * Program the multicast filter. 1687 * 1688 * We have an artificial restriction that the multicast setup command 1689 * must be the first command in the chain, so we take steps to ensure 1690 * this. By requiring this, it allows us to keep up the performance of 1691 * the pre-initialized command ring (esp. link pointers) by not actually 1692 * inserting the mcsetup command in the ring - i.e. its link pointer 1693 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1694 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1695 * lead into the regular TxCB ring when it completes. 1696 * 1697 * This function must be called at splnet. 1698 */ 1699 void 1700 fxp_mc_setup(struct fxp_softc *sc, int doit) 1701 { 1702 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1703 struct arpcom *ac = &sc->sc_arpcom; 1704 struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs; 1705 struct ether_multistep step; 1706 struct ether_multi *enm; 1707 int i, nmcasts = 0; 1708 1709 ifp->if_flags &= ~IFF_ALLMULTI; 1710 1711 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1712 ac->ac_multicnt >= MAXMCADDR) { 1713 ifp->if_flags |= IFF_ALLMULTI; 1714 } else { 1715 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1716 while (enm != NULL) { 1717 bcopy(enm->enm_addrlo, 1718 (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 1719 1720 nmcasts++; 1721 1722 ETHER_NEXT_MULTI(step, enm); 1723 } 1724 } 1725 1726 if (doit == 0) 1727 return; 1728 1729 /* 1730 * Initialize multicast setup descriptor. 1731 */ 1732 mcsp->cb_status = htole16(0); 1733 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1734 mcsp->link_addr = htole32(-1); 1735 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 1736 1737 /* 1738 * Wait until command unit is not active. This should never 1739 * be the case when nothing is queued, but make sure anyway. 1740 */ 1741 for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1742 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1)); 1743 1744 if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1745 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) { 1746 printf("%s: timeout waiting for CU ready\n", 1747 sc->sc_dev.dv_xname); 1748 return; 1749 } 1750 1751 /* 1752 * Start the multicast setup command. 1753 */ 1754 fxp_scb_wait(sc); 1755 FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1756 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1757 offsetof(struct fxp_ctrl, u.mcs)); 1758 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1759 1760 i = FXP_CMD_TMO; 1761 do { 1762 DELAY(1); 1763 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1764 } while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1765 1766 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1767 if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) { 1768 printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname); 1769 return; 1770 } 1771 1772 } 1773 1774 #ifndef SMALL_KERNEL 1775 #include <dev/microcode/fxp/rcvbundl.h> 1776 struct ucode { 1777 u_int16_t revision; 1778 u_int16_t int_delay_offset; 1779 u_int16_t bundle_max_offset; 1780 u_int16_t min_size_mask_offset; 1781 const char *uname; 1782 } const ucode_table[] = { 1783 { FXP_REV_82558_A4, D101_CPUSAVER_DWORD, 1784 0, 0, 1785 "fxp-d101a" }, 1786 1787 { FXP_REV_82558_B0, D101_CPUSAVER_DWORD, 1788 0, 0, 1789 "fxp-d101b0" }, 1790 1791 { FXP_REV_82559_A0, D101M_CPUSAVER_DWORD, 1792 D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD, 1793 "fxp-d101ma" }, 1794 1795 { FXP_REV_82559S_A, D101S_CPUSAVER_DWORD, 1796 D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD, 1797 "fxp-d101s" }, 1798 1799 { FXP_REV_82550, D102_B_CPUSAVER_DWORD, 1800 D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD, 1801 "fxp-d102" }, 1802 1803 { FXP_REV_82550_C, D102_C_CPUSAVER_DWORD, 1804 D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD, 1805 "fxp-d102c" }, 1806 1807 { FXP_REV_82551_F, D102_E_CPUSAVER_DWORD, 1808 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1809 "fxp-d102e" }, 1810 1811 { FXP_REV_82551_10, D102_E_CPUSAVER_DWORD, 1812 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1813 "fxp-d102e" }, 1814 1815 { 0, 0, 1816 0, 0, 1817 NULL } 1818 }; 1819 1820 void 1821 fxp_load_ucode(struct fxp_softc *sc) 1822 { 1823 const struct ucode *uc; 1824 struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code; 1825 int i, error; 1826 u_int32_t *ucode_buf; 1827 size_t ucode_len; 1828 1829 if (sc->sc_flags & FXPF_UCODE) 1830 return; 1831 1832 for (uc = ucode_table; uc->revision != 0; uc++) 1833 if (sc->sc_revision == uc->revision) 1834 break; 1835 if (uc->revision == 0) 1836 return; /* no ucode for this chip is found */ 1837 1838 error = loadfirmware(uc->uname, (u_char **)&ucode_buf, &ucode_len); 1839 if (error) { 1840 printf("%s: error %d, could not read firmware %s\n", 1841 sc->sc_dev.dv_xname, error, uc->uname); 1842 sc->sc_flags |= FXPF_UCODE; 1843 return; 1844 } 1845 1846 cbp->cb_status = 0; 1847 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL); 1848 cbp->link_addr = 0xffffffff; /* (no) next command */ 1849 for (i = 0; i < (ucode_len / sizeof(u_int32_t)); i++) 1850 cbp->ucode[i] = ucode_buf[i]; 1851 1852 if (uc->int_delay_offset) 1853 *((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) = 1854 htole16(sc->sc_int_delay + sc->sc_int_delay / 2); 1855 1856 if (uc->bundle_max_offset) 1857 *((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) = 1858 htole16(sc->sc_bundle_max); 1859 1860 if (uc->min_size_mask_offset) 1861 *((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) = 1862 htole16(sc->sc_min_size_mask); 1863 1864 FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1865 1866 /* 1867 * Download the ucode to the chip. 1868 */ 1869 fxp_scb_wait(sc); 1870 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr 1871 + offsetof(struct fxp_ctrl, u.code)); 1872 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1873 1874 /* ...and wait for it to complete. */ 1875 i = FXP_CMD_TMO; 1876 do { 1877 DELAY(2); 1878 FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1879 } while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i); 1880 if (i == 0) { 1881 printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname); 1882 free(ucode_buf, M_DEVBUF); 1883 return; 1884 } 1885 1886 #ifdef DEBUG 1887 printf("%s: microcode loaded, int_delay: %d usec", 1888 sc->sc_dev.dv_xname, sc->sc_int_delay); 1889 1890 if (uc->bundle_max_offset) 1891 printf(", bundle_max %d\n", sc->sc_bundle_max); 1892 else 1893 printf("\n"); 1894 #endif 1895 1896 free(ucode_buf, M_DEVBUF); 1897 sc->sc_flags |= FXPF_UCODE; 1898 } 1899 #endif /* SMALL_KERNEL */ 1900