1 /* $NetBSD: if_ste.c,v 1.15 2002/10/21 23:38:11 fair Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Device driver for the Sundance Tech. ST-201 10/100 41 * Ethernet controller. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_ste.c,v 1.15 2002/10/21 23:38:11 fair Exp $"); 46 47 #include "bpfilter.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/callout.h> 52 #include <sys/mbuf.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/socket.h> 56 #include <sys/ioctl.h> 57 #include <sys/errno.h> 58 #include <sys/device.h> 59 #include <sys/queue.h> 60 61 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 62 63 #include <net/if.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_ether.h> 67 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 72 #include <machine/bus.h> 73 #include <machine/intr.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 #include <dev/mii/mii_bitbang.h> 78 79 #include <dev/pci/pcireg.h> 80 #include <dev/pci/pcivar.h> 81 #include <dev/pci/pcidevs.h> 82 83 #include <dev/pci/if_stereg.h> 84 85 /* 86 * Transmit descriptor list size. 87 */ 88 #define STE_NTXDESC 256 89 #define STE_NTXDESC_MASK (STE_NTXDESC - 1) 90 #define STE_NEXTTX(x) (((x) + 1) & STE_NTXDESC_MASK) 91 92 /* 93 * Receive descriptor list size. 94 */ 95 #define STE_NRXDESC 128 96 #define STE_NRXDESC_MASK (STE_NRXDESC - 1) 97 #define STE_NEXTRX(x) (((x) + 1) & STE_NRXDESC_MASK) 98 99 /* 100 * Control structures are DMA'd to the ST-201 chip. We allocate them in 101 * a single clump that maps to a single DMA segment to make several things 102 * easier. 103 */ 104 struct ste_control_data { 105 /* 106 * The transmit descriptors. 107 */ 108 struct ste_tfd scd_txdescs[STE_NTXDESC]; 109 110 /* 111 * The receive descriptors. 112 */ 113 struct ste_rfd scd_rxdescs[STE_NRXDESC]; 114 }; 115 116 #define STE_CDOFF(x) offsetof(struct ste_control_data, x) 117 #define STE_CDTXOFF(x) STE_CDOFF(scd_txdescs[(x)]) 118 #define STE_CDRXOFF(x) STE_CDOFF(scd_rxdescs[(x)]) 119 120 /* 121 * Software state for transmit and receive jobs. 122 */ 123 struct ste_descsoft { 124 struct mbuf *ds_mbuf; /* head of our mbuf chain */ 125 bus_dmamap_t ds_dmamap; /* our DMA map */ 126 }; 127 128 /* 129 * Software state per device. 130 */ 131 struct ste_softc { 132 struct device sc_dev; /* generic device information */ 133 bus_space_tag_t sc_st; /* bus space tag */ 134 bus_space_handle_t sc_sh; /* bus space handle */ 135 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 136 struct ethercom sc_ethercom; /* ethernet common data */ 137 void *sc_sdhook; /* shutdown hook */ 138 139 void *sc_ih; /* interrupt cookie */ 140 141 struct mii_data sc_mii; /* MII/media information */ 142 143 struct callout sc_tick_ch; /* tick callout */ 144 145 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 146 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 147 148 /* 149 * Software state for transmit and receive descriptors. 150 */ 151 struct ste_descsoft sc_txsoft[STE_NTXDESC]; 152 struct ste_descsoft sc_rxsoft[STE_NRXDESC]; 153 154 /* 155 * Control data structures. 156 */ 157 struct ste_control_data *sc_control_data; 158 #define sc_txdescs sc_control_data->scd_txdescs 159 #define sc_rxdescs sc_control_data->scd_rxdescs 160 161 int sc_txpending; /* number of Tx requests pending */ 162 int sc_txdirty; /* first dirty Tx descriptor */ 163 int sc_txlast; /* last used Tx descriptor */ 164 165 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 166 167 int sc_txthresh; /* Tx threshold */ 168 uint32_t sc_DMACtrl; /* prototype DMACtrl register */ 169 uint16_t sc_IntEnable; /* prototype IntEnable register */ 170 uint16_t sc_MacCtrl0; /* prototype MacCtrl0 register */ 171 uint8_t sc_ReceiveMode; /* prototype ReceiveMode register */ 172 }; 173 174 #define STE_CDTXADDR(sc, x) ((sc)->sc_cddma + STE_CDTXOFF((x))) 175 #define STE_CDRXADDR(sc, x) ((sc)->sc_cddma + STE_CDRXOFF((x))) 176 177 #define STE_CDTXSYNC(sc, x, ops) \ 178 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 179 STE_CDTXOFF((x)), sizeof(struct ste_tfd), (ops)) 180 181 #define STE_CDRXSYNC(sc, x, ops) \ 182 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 183 STE_CDRXOFF((x)), sizeof(struct ste_rfd), (ops)) 184 185 #define STE_INIT_RXDESC(sc, x) \ 186 do { \ 187 struct ste_descsoft *__ds = &(sc)->sc_rxsoft[(x)]; \ 188 struct ste_rfd *__rfd = &(sc)->sc_rxdescs[(x)]; \ 189 struct mbuf *__m = __ds->ds_mbuf; \ 190 \ 191 /* \ 192 * Note: We scoot the packet forward 2 bytes in the buffer \ 193 * so that the payload after the Ethernet header is aligned \ 194 * to a 4-byte boundary. \ 195 */ \ 196 __m->m_data = __m->m_ext.ext_buf + 2; \ 197 __rfd->rfd_frag.frag_addr = \ 198 htole32(__ds->ds_dmamap->dm_segs[0].ds_addr + 2); \ 199 __rfd->rfd_frag.frag_len = htole32((MCLBYTES - 2) | FRAG_LAST); \ 200 __rfd->rfd_next = htole32(STE_CDRXADDR((sc), STE_NEXTRX((x)))); \ 201 __rfd->rfd_status = 0; \ 202 STE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 203 } while (/*CONSTCOND*/0) 204 205 #define STE_TIMEOUT 1000 206 207 void ste_start(struct ifnet *); 208 void ste_watchdog(struct ifnet *); 209 int ste_ioctl(struct ifnet *, u_long, caddr_t); 210 int ste_init(struct ifnet *); 211 void ste_stop(struct ifnet *, int); 212 213 void ste_shutdown(void *); 214 215 void ste_reset(struct ste_softc *, u_int32_t); 216 void ste_setthresh(struct ste_softc *); 217 void ste_txrestart(struct ste_softc *, u_int8_t); 218 void ste_rxdrain(struct ste_softc *); 219 int ste_add_rxbuf(struct ste_softc *, int); 220 void ste_read_eeprom(struct ste_softc *, int, uint16_t *); 221 void ste_tick(void *); 222 223 void ste_stats_update(struct ste_softc *); 224 225 void ste_set_filter(struct ste_softc *); 226 227 int ste_intr(void *); 228 void ste_txintr(struct ste_softc *); 229 void ste_rxintr(struct ste_softc *); 230 231 int ste_mii_readreg(struct device *, int, int); 232 void ste_mii_writereg(struct device *, int, int, int); 233 void ste_mii_statchg(struct device *); 234 235 int ste_mediachange(struct ifnet *); 236 void ste_mediastatus(struct ifnet *, struct ifmediareq *); 237 238 int ste_match(struct device *, struct cfdata *, void *); 239 void ste_attach(struct device *, struct device *, void *); 240 241 int ste_copy_small = 0; 242 243 CFATTACH_DECL(ste, sizeof(struct ste_softc), 244 ste_match, ste_attach, NULL, NULL); 245 246 uint32_t ste_mii_bitbang_read(struct device *); 247 void ste_mii_bitbang_write(struct device *, uint32_t); 248 249 const struct mii_bitbang_ops ste_mii_bitbang_ops = { 250 ste_mii_bitbang_read, 251 ste_mii_bitbang_write, 252 { 253 PC_MgmtData, /* MII_BIT_MDO */ 254 PC_MgmtData, /* MII_BIT_MDI */ 255 PC_MgmtClk, /* MII_BIT_MDC */ 256 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */ 257 0, /* MII_BIT_DIR_PHY_HOST */ 258 } 259 }; 260 261 /* 262 * Devices supported by this driver. 263 */ 264 const struct ste_product { 265 pci_vendor_id_t ste_vendor; 266 pci_product_id_t ste_product; 267 const char *ste_name; 268 } ste_products[] = { 269 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201, 270 "Sundance ST-201 10/100 Ethernet" }, 271 272 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DL1002, 273 "D-Link DL-1002 10/100 Ethernet" }, 274 275 { 0, 0, 276 NULL }, 277 }; 278 279 static const struct ste_product * 280 ste_lookup(const struct pci_attach_args *pa) 281 { 282 const struct ste_product *sp; 283 284 for (sp = ste_products; sp->ste_name != NULL; sp++) { 285 if (PCI_VENDOR(pa->pa_id) == sp->ste_vendor && 286 PCI_PRODUCT(pa->pa_id) == sp->ste_product) 287 return (sp); 288 } 289 return (NULL); 290 } 291 292 int 293 ste_match(struct device *parent, struct cfdata *cf, void *aux) 294 { 295 struct pci_attach_args *pa = aux; 296 297 if (ste_lookup(pa) != NULL) 298 return (1); 299 300 return (0); 301 } 302 303 void 304 ste_attach(struct device *parent, struct device *self, void *aux) 305 { 306 struct ste_softc *sc = (struct ste_softc *) self; 307 struct pci_attach_args *pa = aux; 308 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 309 pci_chipset_tag_t pc = pa->pa_pc; 310 pci_intr_handle_t ih; 311 const char *intrstr = NULL; 312 bus_space_tag_t iot, memt; 313 bus_space_handle_t ioh, memh; 314 bus_dma_segment_t seg; 315 int ioh_valid, memh_valid; 316 int i, rseg, error; 317 const struct ste_product *sp; 318 pcireg_t pmode; 319 uint8_t enaddr[ETHER_ADDR_LEN]; 320 uint16_t myea[ETHER_ADDR_LEN / 2]; 321 int pmreg; 322 323 callout_init(&sc->sc_tick_ch); 324 325 sp = ste_lookup(pa); 326 if (sp == NULL) { 327 printf("\n"); 328 panic("ste_attach: impossible"); 329 } 330 331 printf(": %s\n", sp->ste_name); 332 333 /* 334 * Map the device. 335 */ 336 ioh_valid = (pci_mapreg_map(pa, STE_PCI_IOBA, 337 PCI_MAPREG_TYPE_IO, 0, 338 &iot, &ioh, NULL, NULL) == 0); 339 memh_valid = (pci_mapreg_map(pa, STE_PCI_MMBA, 340 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 341 &memt, &memh, NULL, NULL) == 0); 342 343 if (memh_valid) { 344 sc->sc_st = memt; 345 sc->sc_sh = memh; 346 } else if (ioh_valid) { 347 sc->sc_st = iot; 348 sc->sc_sh = ioh; 349 } else { 350 printf("%s: unable to map device registers\n", 351 sc->sc_dev.dv_xname); 352 return; 353 } 354 355 sc->sc_dmat = pa->pa_dmat; 356 357 /* Enable bus mastering. */ 358 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 359 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 360 PCI_COMMAND_MASTER_ENABLE); 361 362 /* Get it out of power save mode if needed. */ 363 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 364 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 365 if (pmode == 3) { 366 /* 367 * The card has lost all configuration data in 368 * this state, so punt. 369 */ 370 printf("%s: unable to wake up from power state D3\n", 371 sc->sc_dev.dv_xname); 372 return; 373 } 374 if (pmode != 0) { 375 printf("%s: waking up from power state D%d\n", 376 sc->sc_dev.dv_xname, pmode); 377 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 378 } 379 } 380 381 /* 382 * Map and establish our interrupt. 383 */ 384 if (pci_intr_map(pa, &ih)) { 385 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 386 return; 387 } 388 intrstr = pci_intr_string(pc, ih); 389 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc); 390 if (sc->sc_ih == NULL) { 391 printf("%s: unable to establish interrupt", 392 sc->sc_dev.dv_xname); 393 if (intrstr != NULL) 394 printf(" at %s", intrstr); 395 printf("\n"); 396 return; 397 } 398 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 399 400 /* 401 * Allocate the control data structures, and create and load the 402 * DMA map for it. 403 */ 404 if ((error = bus_dmamem_alloc(sc->sc_dmat, 405 sizeof(struct ste_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 406 0)) != 0) { 407 printf("%s: unable to allocate control data, error = %d\n", 408 sc->sc_dev.dv_xname, error); 409 goto fail_0; 410 } 411 412 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 413 sizeof(struct ste_control_data), (caddr_t *)&sc->sc_control_data, 414 BUS_DMA_COHERENT)) != 0) { 415 printf("%s: unable to map control data, error = %d\n", 416 sc->sc_dev.dv_xname, error); 417 goto fail_1; 418 } 419 420 if ((error = bus_dmamap_create(sc->sc_dmat, 421 sizeof(struct ste_control_data), 1, 422 sizeof(struct ste_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 423 printf("%s: unable to create control data DMA map, " 424 "error = %d\n", sc->sc_dev.dv_xname, error); 425 goto fail_2; 426 } 427 428 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 429 sc->sc_control_data, sizeof(struct ste_control_data), NULL, 430 0)) != 0) { 431 printf("%s: unable to load control data DMA map, error = %d\n", 432 sc->sc_dev.dv_xname, error); 433 goto fail_3; 434 } 435 436 /* 437 * Create the transmit buffer DMA maps. 438 */ 439 for (i = 0; i < STE_NTXDESC; i++) { 440 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 441 STE_NTXFRAGS, MCLBYTES, 0, 0, 442 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 443 printf("%s: unable to create tx DMA map %d, " 444 "error = %d\n", sc->sc_dev.dv_xname, i, error); 445 goto fail_4; 446 } 447 } 448 449 /* 450 * Create the receive buffer DMA maps. 451 */ 452 for (i = 0; i < STE_NRXDESC; i++) { 453 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 454 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 455 printf("%s: unable to create rx DMA map %d, " 456 "error = %d\n", sc->sc_dev.dv_xname, i, error); 457 goto fail_5; 458 } 459 sc->sc_rxsoft[i].ds_mbuf = NULL; 460 } 461 462 /* 463 * Reset the chip to a known state. 464 */ 465 ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA | 466 AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut); 467 468 /* 469 * Read the Ethernet address from the EEPROM. 470 */ 471 for (i = 0; i < 3; i++) { 472 ste_read_eeprom(sc, STE_EEPROM_StationAddress0 + i, &myea[i]); 473 myea[i] = le16toh(myea[i]); 474 } 475 memcpy(enaddr, myea, sizeof(enaddr)); 476 477 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 478 ether_sprintf(enaddr)); 479 480 /* 481 * Initialize our media structures and probe the MII. 482 */ 483 sc->sc_mii.mii_ifp = ifp; 484 sc->sc_mii.mii_readreg = ste_mii_readreg; 485 sc->sc_mii.mii_writereg = ste_mii_writereg; 486 sc->sc_mii.mii_statchg = ste_mii_statchg; 487 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ste_mediachange, 488 ste_mediastatus); 489 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 490 MII_OFFSET_ANY, 0); 491 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 492 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 493 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 494 } else 495 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 496 497 ifp = &sc->sc_ethercom.ec_if; 498 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 499 ifp->if_softc = sc; 500 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 501 ifp->if_ioctl = ste_ioctl; 502 ifp->if_start = ste_start; 503 ifp->if_watchdog = ste_watchdog; 504 ifp->if_init = ste_init; 505 ifp->if_stop = ste_stop; 506 IFQ_SET_READY(&ifp->if_snd); 507 508 /* 509 * Default the transmit threshold to 128 bytes. 510 */ 511 sc->sc_txthresh = 128; 512 513 /* 514 * Disable MWI if the PCI layer tells us to. 515 */ 516 sc->sc_DMACtrl = 0; 517 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0) 518 sc->sc_DMACtrl |= DC_MWIDisable; 519 520 /* 521 * We can support 802.1Q VLAN-sized frames. 522 */ 523 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 524 525 /* 526 * Attach the interface. 527 */ 528 if_attach(ifp); 529 ether_ifattach(ifp, enaddr); 530 531 /* 532 * Make sure the interface is shutdown during reboot. 533 */ 534 sc->sc_sdhook = shutdownhook_establish(ste_shutdown, sc); 535 if (sc->sc_sdhook == NULL) 536 printf("%s: WARNING: unable to establish shutdown hook\n", 537 sc->sc_dev.dv_xname); 538 return; 539 540 /* 541 * Free any resources we've allocated during the failed attach 542 * attempt. Do this in reverse order and fall through. 543 */ 544 fail_5: 545 for (i = 0; i < STE_NRXDESC; i++) { 546 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 547 bus_dmamap_destroy(sc->sc_dmat, 548 sc->sc_rxsoft[i].ds_dmamap); 549 } 550 fail_4: 551 for (i = 0; i < STE_NTXDESC; i++) { 552 if (sc->sc_txsoft[i].ds_dmamap != NULL) 553 bus_dmamap_destroy(sc->sc_dmat, 554 sc->sc_txsoft[i].ds_dmamap); 555 } 556 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 557 fail_3: 558 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 559 fail_2: 560 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 561 sizeof(struct ste_control_data)); 562 fail_1: 563 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 564 fail_0: 565 return; 566 } 567 568 /* 569 * ste_shutdown: 570 * 571 * Make sure the interface is stopped at reboot time. 572 */ 573 void 574 ste_shutdown(void *arg) 575 { 576 struct ste_softc *sc = arg; 577 578 ste_stop(&sc->sc_ethercom.ec_if, 1); 579 } 580 581 static void 582 ste_dmahalt_wait(struct ste_softc *sc) 583 { 584 int i; 585 586 for (i = 0; i < STE_TIMEOUT; i++) { 587 delay(2); 588 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_DMACtrl) & 589 DC_DMAHaltBusy) == 0) 590 break; 591 } 592 593 if (i == STE_TIMEOUT) 594 printf("%s: DMA halt timed out\n", sc->sc_dev.dv_xname); 595 } 596 597 /* 598 * ste_start: [ifnet interface function] 599 * 600 * Start packet transmission on the interface. 601 */ 602 void 603 ste_start(struct ifnet *ifp) 604 { 605 struct ste_softc *sc = ifp->if_softc; 606 struct mbuf *m0, *m; 607 struct ste_descsoft *ds; 608 struct ste_tfd *tfd; 609 bus_dmamap_t dmamap; 610 int error, olasttx, nexttx, opending, seg, totlen; 611 612 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 613 return; 614 615 /* 616 * Remember the previous number of pending transmissions 617 * and the current last descriptor in the list. 618 */ 619 opending = sc->sc_txpending; 620 olasttx = sc->sc_txlast; 621 622 /* 623 * Loop through the send queue, setting up transmit descriptors 624 * until we drain the queue, or use up all available transmit 625 * descriptors. 626 */ 627 while (sc->sc_txpending < STE_NTXDESC) { 628 /* 629 * Grab a packet off the queue. 630 */ 631 IFQ_POLL(&ifp->if_snd, m0); 632 if (m0 == NULL) 633 break; 634 m = NULL; 635 636 /* 637 * Get the last and next available transmit descriptor. 638 */ 639 nexttx = STE_NEXTTX(sc->sc_txlast); 640 tfd = &sc->sc_txdescs[nexttx]; 641 ds = &sc->sc_txsoft[nexttx]; 642 643 dmamap = ds->ds_dmamap; 644 645 /* 646 * Load the DMA map. If this fails, the packet either 647 * didn't fit in the alloted number of segments, or we 648 * were short on resources. In this case, we'll copy 649 * and try again. 650 */ 651 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 652 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 653 MGETHDR(m, M_DONTWAIT, MT_DATA); 654 if (m == NULL) { 655 printf("%s: unable to allocate Tx mbuf\n", 656 sc->sc_dev.dv_xname); 657 break; 658 } 659 if (m0->m_pkthdr.len > MHLEN) { 660 MCLGET(m, M_DONTWAIT); 661 if ((m->m_flags & M_EXT) == 0) { 662 printf("%s: unable to allocate Tx " 663 "cluster\n", sc->sc_dev.dv_xname); 664 m_freem(m); 665 break; 666 } 667 } 668 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 669 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 670 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 671 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 672 if (error) { 673 printf("%s: unable to load Tx buffer, " 674 "error = %d\n", sc->sc_dev.dv_xname, error); 675 break; 676 } 677 } 678 679 IFQ_DEQUEUE(&ifp->if_snd, m0); 680 if (m != NULL) { 681 m_freem(m0); 682 m0 = m; 683 } 684 685 /* 686 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 687 */ 688 689 /* Sync the DMA map. */ 690 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 691 BUS_DMASYNC_PREWRITE); 692 693 /* Initialize the fragment list. */ 694 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) { 695 tfd->tfd_frags[seg].frag_addr = 696 htole32(dmamap->dm_segs[seg].ds_addr); 697 tfd->tfd_frags[seg].frag_len = 698 htole32(dmamap->dm_segs[seg].ds_len); 699 totlen += dmamap->dm_segs[seg].ds_len; 700 } 701 tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST); 702 703 /* Initialize the descriptor. */ 704 tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx)); 705 tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3)); 706 707 /* Sync the descriptor. */ 708 STE_CDTXSYNC(sc, nexttx, 709 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 710 711 /* 712 * Store a pointer to the packet so we can free it later, 713 * and remember what txdirty will be once the packet is 714 * done. 715 */ 716 ds->ds_mbuf = m0; 717 718 /* Advance the tx pointer. */ 719 sc->sc_txpending++; 720 sc->sc_txlast = nexttx; 721 722 #if NBPFILTER > 0 723 /* 724 * Pass the packet to any BPF listeners. 725 */ 726 if (ifp->if_bpf) 727 bpf_mtap(ifp->if_bpf, m0); 728 #endif /* NBPFILTER > 0 */ 729 } 730 731 if (sc->sc_txpending == STE_NTXDESC) { 732 /* No more slots left; notify upper layer. */ 733 ifp->if_flags |= IFF_OACTIVE; 734 } 735 736 if (sc->sc_txpending != opending) { 737 /* 738 * We enqueued packets. If the transmitter was idle, 739 * reset the txdirty pointer. 740 */ 741 if (opending == 0) 742 sc->sc_txdirty = STE_NEXTTX(olasttx); 743 744 /* 745 * Cause a descriptor interrupt to happen on the 746 * last packet we enqueued, and also cause the 747 * DMA engine to wait after is has finished processing 748 * it. 749 */ 750 sc->sc_txdescs[sc->sc_txlast].tfd_next = 0; 751 sc->sc_txdescs[sc->sc_txlast].tfd_control |= 752 htole32(TFD_TxDMAIndicate); 753 STE_CDTXSYNC(sc, sc->sc_txlast, 754 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 755 756 /* 757 * Link up the new chain of descriptors to the 758 * last. 759 */ 760 sc->sc_txdescs[olasttx].tfd_next = 761 STE_CDTXADDR(sc, STE_NEXTTX(olasttx)); 762 STE_CDTXSYNC(sc, olasttx, 763 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 764 765 /* 766 * Kick the transmit DMA logic. Note that since we're 767 * using auto-polling, reading the Tx desc pointer will 768 * give it the nudge it needs to get going. 769 */ 770 if (bus_space_read_4(sc->sc_st, sc->sc_sh, 771 STE_TxDMAListPtr) == 0) { 772 bus_space_write_4(sc->sc_st, sc->sc_sh, 773 STE_DMACtrl, DC_TxDMAHalt); 774 ste_dmahalt_wait(sc); 775 bus_space_write_4(sc->sc_st, sc->sc_sh, 776 STE_TxDMAListPtr, 777 STE_CDTXADDR(sc, STE_NEXTTX(olasttx))); 778 bus_space_write_4(sc->sc_st, sc->sc_sh, 779 STE_DMACtrl, DC_TxDMAResume); 780 } 781 782 /* Set a watchdog timer in case the chip flakes out. */ 783 ifp->if_timer = 5; 784 } 785 } 786 787 /* 788 * ste_watchdog: [ifnet interface function] 789 * 790 * Watchdog timer handler. 791 */ 792 void 793 ste_watchdog(struct ifnet *ifp) 794 { 795 struct ste_softc *sc = ifp->if_softc; 796 797 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 798 ifp->if_oerrors++; 799 800 (void) ste_init(ifp); 801 802 /* Try to get more packets going. */ 803 ste_start(ifp); 804 } 805 806 /* 807 * ste_ioctl: [ifnet interface function] 808 * 809 * Handle control requests from the operator. 810 */ 811 int 812 ste_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 813 { 814 struct ste_softc *sc = ifp->if_softc; 815 struct ifreq *ifr = (struct ifreq *)data; 816 int s, error; 817 818 s = splnet(); 819 820 switch (cmd) { 821 case SIOCSIFMEDIA: 822 case SIOCGIFMEDIA: 823 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 824 break; 825 826 default: 827 error = ether_ioctl(ifp, cmd, data); 828 if (error == ENETRESET) { 829 /* 830 * Multicast list has changed; set the hardware filter 831 * accordingly. 832 */ 833 ste_set_filter(sc); 834 error = 0; 835 } 836 break; 837 } 838 839 /* Try to get more packets going. */ 840 ste_start(ifp); 841 842 splx(s); 843 return (error); 844 } 845 846 /* 847 * ste_intr: 848 * 849 * Interrupt service routine. 850 */ 851 int 852 ste_intr(void *arg) 853 { 854 struct ste_softc *sc = arg; 855 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 856 uint16_t isr; 857 uint8_t txstat; 858 int wantinit; 859 860 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatus) & 861 IS_InterruptStatus) == 0) 862 return (0); 863 864 for (wantinit = 0; wantinit == 0;) { 865 isr = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatusAck); 866 if ((isr & sc->sc_IntEnable) == 0) 867 break; 868 869 /* Receive interrupts. */ 870 if (isr & IE_RxDMAComplete) 871 ste_rxintr(sc); 872 873 /* Transmit interrupts. */ 874 if (isr & (IE_TxDMAComplete|IE_TxComplete)) 875 ste_txintr(sc); 876 877 /* Statistics overflow. */ 878 if (isr & IE_UpdateStats) 879 ste_stats_update(sc); 880 881 /* Transmission errors. */ 882 if (isr & IE_TxComplete) { 883 for (;;) { 884 txstat = bus_space_read_1(sc->sc_st, sc->sc_sh, 885 STE_TxStatus); 886 if ((txstat & TS_TxComplete) == 0) 887 break; 888 if (txstat & TS_TxUnderrun) { 889 sc->sc_txthresh += 32; 890 if (sc->sc_txthresh > 0x1ffc) 891 sc->sc_txthresh = 0x1ffc; 892 printf("%s: transmit underrun, new " 893 "threshold: %d bytes\n", 894 sc->sc_dev.dv_xname, 895 sc->sc_txthresh); 896 ste_reset(sc, AC_TxReset | AC_DMA | 897 AC_FIFO | AC_Network); 898 ste_setthresh(sc); 899 bus_space_write_1(sc->sc_st, sc->sc_sh, 900 STE_TxDMAPollPeriod, 127); 901 ste_txrestart(sc, 902 bus_space_read_1(sc->sc_st, 903 sc->sc_sh, STE_TxFrameId)); 904 } 905 if (txstat & TS_TxReleaseError) { 906 printf("%s: Tx FIFO release error\n", 907 sc->sc_dev.dv_xname); 908 wantinit = 1; 909 } 910 if (txstat & TS_MaxCollisions) { 911 printf("%s: excessive collisions\n", 912 sc->sc_dev.dv_xname); 913 wantinit = 1; 914 } 915 if (txstat & TS_TxStatusOverflow) { 916 printf("%s: status overflow\n", 917 sc->sc_dev.dv_xname); 918 wantinit = 1; 919 } 920 bus_space_write_2(sc->sc_st, sc->sc_sh, 921 STE_TxStatus, 0); 922 } 923 } 924 925 /* Host interface errors. */ 926 if (isr & IE_HostError) { 927 printf("%s: Host interface error\n", 928 sc->sc_dev.dv_xname); 929 wantinit = 1; 930 } 931 } 932 933 if (wantinit) 934 ste_init(ifp); 935 936 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable, 937 sc->sc_IntEnable); 938 939 /* Try to get more packets going. */ 940 ste_start(ifp); 941 942 return (1); 943 } 944 945 /* 946 * ste_txintr: 947 * 948 * Helper; handle transmit interrupts. 949 */ 950 void 951 ste_txintr(struct ste_softc *sc) 952 { 953 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 954 struct ste_descsoft *ds; 955 uint32_t control; 956 int i; 957 958 ifp->if_flags &= ~IFF_OACTIVE; 959 960 /* 961 * Go through our Tx list and free mbufs for those 962 * frames which have been transmitted. 963 */ 964 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 965 i = STE_NEXTTX(i), sc->sc_txpending--) { 966 ds = &sc->sc_txsoft[i]; 967 968 STE_CDTXSYNC(sc, i, 969 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 970 971 control = le32toh(sc->sc_txdescs[i].tfd_control); 972 if ((control & TFD_TxDMAComplete) == 0) 973 break; 974 975 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 976 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 977 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 978 m_freem(ds->ds_mbuf); 979 ds->ds_mbuf = NULL; 980 } 981 982 /* Update the dirty transmit buffer pointer. */ 983 sc->sc_txdirty = i; 984 985 /* 986 * If there are no more pending transmissions, cancel the watchdog 987 * timer. 988 */ 989 if (sc->sc_txpending == 0) 990 ifp->if_timer = 0; 991 } 992 993 /* 994 * ste_rxintr: 995 * 996 * Helper; handle receive interrupts. 997 */ 998 void 999 ste_rxintr(struct ste_softc *sc) 1000 { 1001 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1002 struct ste_descsoft *ds; 1003 struct mbuf *m; 1004 uint32_t status; 1005 int i, len; 1006 1007 for (i = sc->sc_rxptr;; i = STE_NEXTRX(i)) { 1008 ds = &sc->sc_rxsoft[i]; 1009 1010 STE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1011 1012 status = le32toh(sc->sc_rxdescs[i].rfd_status); 1013 1014 if ((status & RFD_RxDMAComplete) == 0) 1015 break; 1016 1017 /* 1018 * If the packet had an error, simply recycle the 1019 * buffer. Note, we count the error later in the 1020 * periodic stats update. 1021 */ 1022 if (status & RFD_RxFrameError) { 1023 STE_INIT_RXDESC(sc, i); 1024 continue; 1025 } 1026 1027 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1028 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1029 1030 /* 1031 * No errors; receive the packet. Note, we have 1032 * configured the chip to not include the CRC at 1033 * the end of the packet. 1034 */ 1035 len = RFD_RxDMAFrameLen(status); 1036 1037 /* 1038 * If the packet is small enough to fit in a 1039 * single header mbuf, allocate one and copy 1040 * the data into it. This greatly reduces 1041 * memory consumption when we receive lots 1042 * of small packets. 1043 * 1044 * Otherwise, we add a new buffer to the receive 1045 * chain. If this fails, we drop the packet and 1046 * recycle the old buffer. 1047 */ 1048 if (ste_copy_small != 0 && len <= (MHLEN - 2)) { 1049 MGETHDR(m, M_DONTWAIT, MT_DATA); 1050 if (m == NULL) 1051 goto dropit; 1052 m->m_data += 2; 1053 memcpy(mtod(m, caddr_t), 1054 mtod(ds->ds_mbuf, caddr_t), len); 1055 STE_INIT_RXDESC(sc, i); 1056 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1057 ds->ds_dmamap->dm_mapsize, 1058 BUS_DMASYNC_PREREAD); 1059 } else { 1060 m = ds->ds_mbuf; 1061 if (ste_add_rxbuf(sc, i) != 0) { 1062 dropit: 1063 ifp->if_ierrors++; 1064 STE_INIT_RXDESC(sc, i); 1065 bus_dmamap_sync(sc->sc_dmat, 1066 ds->ds_dmamap, 0, 1067 ds->ds_dmamap->dm_mapsize, 1068 BUS_DMASYNC_PREREAD); 1069 continue; 1070 } 1071 } 1072 1073 m->m_pkthdr.rcvif = ifp; 1074 m->m_pkthdr.len = m->m_len = len; 1075 1076 #if NBPFILTER > 0 1077 /* 1078 * Pass this up to any BPF listeners, but only 1079 * pass if up the stack if it's for us. 1080 */ 1081 if (ifp->if_bpf) 1082 bpf_mtap(ifp->if_bpf, m); 1083 #endif /* NBPFILTER > 0 */ 1084 1085 /* Pass it on. */ 1086 (*ifp->if_input)(ifp, m); 1087 } 1088 1089 /* Update the receive pointer. */ 1090 sc->sc_rxptr = i; 1091 } 1092 1093 /* 1094 * ste_tick: 1095 * 1096 * One second timer, used to tick the MII. 1097 */ 1098 void 1099 ste_tick(void *arg) 1100 { 1101 struct ste_softc *sc = arg; 1102 int s; 1103 1104 s = splnet(); 1105 mii_tick(&sc->sc_mii); 1106 ste_stats_update(sc); 1107 splx(s); 1108 1109 callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc); 1110 } 1111 1112 /* 1113 * ste_stats_update: 1114 * 1115 * Read the ST-201 statistics counters. 1116 */ 1117 void 1118 ste_stats_update(struct ste_softc *sc) 1119 { 1120 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1121 bus_space_tag_t st = sc->sc_st; 1122 bus_space_handle_t sh = sc->sc_sh; 1123 1124 (void) bus_space_read_2(st, sh, STE_OctetsReceivedOk0); 1125 (void) bus_space_read_2(st, sh, STE_OctetsReceivedOk1); 1126 1127 (void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk0); 1128 (void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk1); 1129 1130 ifp->if_opackets += 1131 (u_int) bus_space_read_2(st, sh, STE_FramesTransmittedOK); 1132 ifp->if_ipackets += 1133 (u_int) bus_space_read_2(st, sh, STE_FramesReceivedOK); 1134 1135 ifp->if_collisions += 1136 (u_int) bus_space_read_1(st, sh, STE_LateCollisions) + 1137 (u_int) bus_space_read_1(st, sh, STE_MultipleColFrames) + 1138 (u_int) bus_space_read_1(st, sh, STE_SingleColFrames); 1139 1140 (void) bus_space_read_1(st, sh, STE_FramesWDeferredXmt); 1141 1142 ifp->if_ierrors += 1143 (u_int) bus_space_read_1(st, sh, STE_FramesLostRxErrors); 1144 1145 ifp->if_oerrors += 1146 (u_int) bus_space_read_1(st, sh, STE_FramesWExDeferral) + 1147 (u_int) bus_space_read_1(st, sh, STE_FramesXbortXSColls) + 1148 bus_space_read_1(st, sh, STE_CarrierSenseErrors); 1149 1150 (void) bus_space_read_1(st, sh, STE_BcstFramesXmtdOk); 1151 (void) bus_space_read_1(st, sh, STE_BcstFramesRcvdOk); 1152 (void) bus_space_read_1(st, sh, STE_McstFramesXmtdOk); 1153 (void) bus_space_read_1(st, sh, STE_McstFramesRcvdOk); 1154 } 1155 1156 /* 1157 * ste_reset: 1158 * 1159 * Perform a soft reset on the ST-201. 1160 */ 1161 void 1162 ste_reset(struct ste_softc *sc, u_int32_t rstbits) 1163 { 1164 uint32_t ac; 1165 int i; 1166 1167 ac = bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl); 1168 1169 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl, ac | rstbits); 1170 1171 delay(50000); 1172 1173 for (i = 0; i < STE_TIMEOUT; i++) { 1174 delay(1000); 1175 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl) & 1176 AC_ResetBusy) == 0) 1177 break; 1178 } 1179 1180 if (i == STE_TIMEOUT) 1181 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 1182 1183 delay(1000); 1184 } 1185 1186 /* 1187 * ste_setthresh: 1188 * 1189 * set the various transmit threshold registers 1190 */ 1191 void 1192 ste_setthresh(struct ste_softc *sc) 1193 { 1194 /* set the TX threhold */ 1195 bus_space_write_2(sc->sc_st, sc->sc_sh, 1196 STE_TxStartThresh, sc->sc_txthresh); 1197 /* Urgent threshold: set to sc_txthresh / 2 */ 1198 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_TxDMAUrgentThresh, 1199 sc->sc_txthresh >> 6); 1200 /* Burst threshold: use default value (256 bytes) */ 1201 } 1202 1203 /* 1204 * restart TX at the given frame ID in the transmitter ring 1205 */ 1206 1207 void 1208 ste_txrestart(struct ste_softc *sc, u_int8_t id) 1209 { 1210 u_int32_t control; 1211 1212 STE_CDTXSYNC(sc, id, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1213 control = le32toh(sc->sc_txdescs[id].tfd_control); 1214 control &= ~TFD_TxDMAComplete; 1215 sc->sc_txdescs[id].tfd_control = htole32(control); 1216 STE_CDTXSYNC(sc, id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1217 1218 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr, 0); 1219 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1, MC1_TxEnable); 1220 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAHalt); 1221 ste_dmahalt_wait(sc); 1222 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr, 1223 STE_CDTXADDR(sc, id)); 1224 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAResume); 1225 } 1226 1227 /* 1228 * ste_init: [ ifnet interface function ] 1229 * 1230 * Initialize the interface. Must be called at splnet(). 1231 */ 1232 int 1233 ste_init(struct ifnet *ifp) 1234 { 1235 struct ste_softc *sc = ifp->if_softc; 1236 bus_space_tag_t st = sc->sc_st; 1237 bus_space_handle_t sh = sc->sc_sh; 1238 struct ste_descsoft *ds; 1239 int i, error = 0; 1240 1241 /* 1242 * Cancel any pending I/O. 1243 */ 1244 ste_stop(ifp, 0); 1245 1246 /* 1247 * Reset the chip to a known state. 1248 */ 1249 ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA | 1250 AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut); 1251 1252 /* 1253 * Initialize the transmit descriptor ring. 1254 */ 1255 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1256 sc->sc_txpending = 0; 1257 sc->sc_txdirty = 0; 1258 sc->sc_txlast = STE_NTXDESC - 1; 1259 1260 /* 1261 * Initialize the receive descriptor and receive job 1262 * descriptor rings. 1263 */ 1264 for (i = 0; i < STE_NRXDESC; i++) { 1265 ds = &sc->sc_rxsoft[i]; 1266 if (ds->ds_mbuf == NULL) { 1267 if ((error = ste_add_rxbuf(sc, i)) != 0) { 1268 printf("%s: unable to allocate or map rx " 1269 "buffer %d, error = %d\n", 1270 sc->sc_dev.dv_xname, i, error); 1271 /* 1272 * XXX Should attempt to run with fewer receive 1273 * XXX buffers instead of just failing. 1274 */ 1275 ste_rxdrain(sc); 1276 goto out; 1277 } 1278 } else 1279 STE_INIT_RXDESC(sc, i); 1280 } 1281 sc->sc_rxptr = 0; 1282 1283 /* Set the station address. */ 1284 for (i = 0; i < ETHER_ADDR_LEN; i++) 1285 bus_space_write_1(st, sh, STE_StationAddress0 + 1, 1286 LLADDR(ifp->if_sadl)[i]); 1287 1288 /* Set up the receive filter. */ 1289 ste_set_filter(sc); 1290 1291 /* 1292 * Give the receive ring to the chip. 1293 */ 1294 bus_space_write_4(st, sh, STE_RxDMAListPtr, 1295 STE_CDRXADDR(sc, sc->sc_rxptr)); 1296 1297 /* 1298 * We defer giving the transmit ring to the chip until we 1299 * transmit the first packet. 1300 */ 1301 1302 /* 1303 * Initialize the Tx auto-poll period. It's OK to make this number 1304 * large (127 is the max) -- we explicitly kick the transmit engine 1305 * when there's actually a packet. We are using auto-polling only 1306 * to make the interface to the transmit engine not suck. 1307 */ 1308 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_TxDMAPollPeriod, 127); 1309 1310 /* ..and the Rx auto-poll period. */ 1311 bus_space_write_1(st, sh, STE_RxDMAPollPeriod, 64); 1312 1313 /* Initialize the Tx start threshold. */ 1314 ste_setthresh(sc); 1315 1316 /* Set the FIFO release threshold to 512 bytes. */ 1317 bus_space_write_1(st, sh, STE_TxReleaseThresh, 512 >> 4); 1318 1319 /* 1320 * Initialize the interrupt mask. 1321 */ 1322 sc->sc_IntEnable = IE_HostError | IE_TxComplete | IE_UpdateStats | 1323 IE_TxDMAComplete | IE_RxDMAComplete; 1324 1325 bus_space_write_2(st, sh, STE_IntStatus, 0xffff); 1326 bus_space_write_2(st, sh, STE_IntEnable, sc->sc_IntEnable); 1327 1328 /* 1329 * Start the receive DMA engine. 1330 */ 1331 bus_space_write_4(st, sh, STE_DMACtrl, sc->sc_DMACtrl | DC_RxDMAResume); 1332 1333 /* 1334 * Initialize MacCtrl0 -- do it before setting the media, 1335 * as setting the media will actually program the register. 1336 */ 1337 sc->sc_MacCtrl0 = MC0_IFSSelect(0); 1338 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) 1339 sc->sc_MacCtrl0 |= MC0_RcvLargeFrames; 1340 1341 /* 1342 * Set the current media. 1343 */ 1344 mii_mediachg(&sc->sc_mii); 1345 1346 /* 1347 * Start the MAC. 1348 */ 1349 bus_space_write_2(st, sh, STE_MacCtrl1, 1350 MC1_StatisticsEnable | MC1_TxEnable | MC1_RxEnable); 1351 1352 /* 1353 * Start the one second MII clock. 1354 */ 1355 callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc); 1356 1357 /* 1358 * ...all done! 1359 */ 1360 ifp->if_flags |= IFF_RUNNING; 1361 ifp->if_flags &= ~IFF_OACTIVE; 1362 1363 out: 1364 if (error) 1365 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1366 return (error); 1367 } 1368 1369 /* 1370 * ste_drain: 1371 * 1372 * Drain the receive queue. 1373 */ 1374 void 1375 ste_rxdrain(struct ste_softc *sc) 1376 { 1377 struct ste_descsoft *ds; 1378 int i; 1379 1380 for (i = 0; i < STE_NRXDESC; i++) { 1381 ds = &sc->sc_rxsoft[i]; 1382 if (ds->ds_mbuf != NULL) { 1383 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1384 m_freem(ds->ds_mbuf); 1385 ds->ds_mbuf = NULL; 1386 } 1387 } 1388 } 1389 1390 /* 1391 * ste_stop: [ ifnet interface function ] 1392 * 1393 * Stop transmission on the interface. 1394 */ 1395 void 1396 ste_stop(struct ifnet *ifp, int disable) 1397 { 1398 struct ste_softc *sc = ifp->if_softc; 1399 struct ste_descsoft *ds; 1400 int i; 1401 1402 /* 1403 * Stop the one second clock. 1404 */ 1405 callout_stop(&sc->sc_tick_ch); 1406 1407 /* Down the MII. */ 1408 mii_down(&sc->sc_mii); 1409 1410 /* 1411 * Disable interrupts. 1412 */ 1413 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable, 0); 1414 1415 /* 1416 * Stop receiver, transmitter, and stats update. 1417 */ 1418 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1, 1419 MC1_StatisticsDisable | MC1_TxDisable | MC1_RxDisable); 1420 1421 /* 1422 * Stop the transmit and receive DMA. 1423 */ 1424 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, 1425 DC_RxDMAHalt | DC_TxDMAHalt); 1426 ste_dmahalt_wait(sc); 1427 1428 /* 1429 * Release any queued transmit buffers. 1430 */ 1431 for (i = 0; i < STE_NTXDESC; i++) { 1432 ds = &sc->sc_txsoft[i]; 1433 if (ds->ds_mbuf != NULL) { 1434 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1435 m_freem(ds->ds_mbuf); 1436 ds->ds_mbuf = NULL; 1437 } 1438 } 1439 1440 if (disable) 1441 ste_rxdrain(sc); 1442 1443 /* 1444 * Mark the interface down and cancel the watchdog timer. 1445 */ 1446 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1447 ifp->if_timer = 0; 1448 } 1449 1450 static int 1451 ste_eeprom_wait(struct ste_softc *sc) 1452 { 1453 int i; 1454 1455 for (i = 0; i < STE_TIMEOUT; i++) { 1456 delay(1000); 1457 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl) & 1458 EC_EepromBusy) == 0) 1459 return (0); 1460 } 1461 return (1); 1462 } 1463 1464 /* 1465 * ste_read_eeprom: 1466 * 1467 * Read data from the serial EEPROM. 1468 */ 1469 void 1470 ste_read_eeprom(struct ste_softc *sc, int offset, uint16_t *data) 1471 { 1472 1473 if (ste_eeprom_wait(sc)) 1474 printf("%s: EEPROM failed to come ready\n", 1475 sc->sc_dev.dv_xname); 1476 1477 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl, 1478 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_R)); 1479 if (ste_eeprom_wait(sc)) 1480 printf("%s: EEPROM read timed out\n", 1481 sc->sc_dev.dv_xname); 1482 *data = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromData); 1483 } 1484 1485 /* 1486 * ste_add_rxbuf: 1487 * 1488 * Add a receive buffer to the indicated descriptor. 1489 */ 1490 int 1491 ste_add_rxbuf(struct ste_softc *sc, int idx) 1492 { 1493 struct ste_descsoft *ds = &sc->sc_rxsoft[idx]; 1494 struct mbuf *m; 1495 int error; 1496 1497 MGETHDR(m, M_DONTWAIT, MT_DATA); 1498 if (m == NULL) 1499 return (ENOBUFS); 1500 1501 MCLGET(m, M_DONTWAIT); 1502 if ((m->m_flags & M_EXT) == 0) { 1503 m_freem(m); 1504 return (ENOBUFS); 1505 } 1506 1507 if (ds->ds_mbuf != NULL) 1508 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1509 1510 ds->ds_mbuf = m; 1511 1512 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1513 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1514 BUS_DMA_READ|BUS_DMA_NOWAIT); 1515 if (error) { 1516 printf("%s: can't load rx DMA map %d, error = %d\n", 1517 sc->sc_dev.dv_xname, idx, error); 1518 panic("ste_add_rxbuf"); /* XXX */ 1519 } 1520 1521 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1522 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1523 1524 STE_INIT_RXDESC(sc, idx); 1525 1526 return (0); 1527 } 1528 1529 /* 1530 * ste_set_filter: 1531 * 1532 * Set up the receive filter. 1533 */ 1534 void 1535 ste_set_filter(struct ste_softc *sc) 1536 { 1537 struct ethercom *ec = &sc->sc_ethercom; 1538 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1539 struct ether_multi *enm; 1540 struct ether_multistep step; 1541 uint32_t crc; 1542 uint16_t mchash[4]; 1543 1544 sc->sc_ReceiveMode = RM_ReceiveUnicast; 1545 if (ifp->if_flags & IFF_BROADCAST) 1546 sc->sc_ReceiveMode |= RM_ReceiveBroadcast; 1547 1548 if (ifp->if_flags & IFF_PROMISC) { 1549 sc->sc_ReceiveMode |= RM_ReceiveAllFrames; 1550 goto allmulti; 1551 } 1552 1553 /* 1554 * Set up the multicast address filter by passing all multicast 1555 * addresses through a CRC generator, and then using the low-order 1556 * 6 bits as an index into the 64 bit multicast hash table. The 1557 * high order bits select the register, while the rest of the bits 1558 * select the bit within the register. 1559 */ 1560 1561 memset(mchash, 0, sizeof(mchash)); 1562 1563 ETHER_FIRST_MULTI(step, ec, enm); 1564 if (enm == NULL) 1565 goto done; 1566 1567 while (enm != NULL) { 1568 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1569 /* 1570 * We must listen to a range of multicast addresses. 1571 * For now, just accept all multicasts, rather than 1572 * trying to set only those filter bits needed to match 1573 * the range. (At this time, the only use of address 1574 * ranges is for IP multicast routing, for which the 1575 * range is big enough to require all bits set.) 1576 */ 1577 goto allmulti; 1578 } 1579 1580 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1581 1582 /* Just want the 6 least significant bits. */ 1583 crc &= 0x3f; 1584 1585 /* Set the corresponding bit in the hash table. */ 1586 mchash[crc >> 4] |= 1 << (crc & 0xf); 1587 1588 ETHER_NEXT_MULTI(step, enm); 1589 } 1590 1591 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash; 1592 1593 ifp->if_flags &= ~IFF_ALLMULTI; 1594 goto done; 1595 1596 allmulti: 1597 ifp->if_flags |= IFF_ALLMULTI; 1598 sc->sc_ReceiveMode |= RM_ReceiveMulticast; 1599 1600 done: 1601 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1602 /* 1603 * Program the multicast hash table. 1604 */ 1605 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable0, 1606 mchash[0]); 1607 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable1, 1608 mchash[1]); 1609 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable2, 1610 mchash[2]); 1611 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable3, 1612 mchash[3]); 1613 } 1614 1615 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_ReceiveMode, 1616 sc->sc_ReceiveMode); 1617 } 1618 1619 /* 1620 * ste_mii_readreg: [mii interface function] 1621 * 1622 * Read a PHY register on the MII of the ST-201. 1623 */ 1624 int 1625 ste_mii_readreg(struct device *self, int phy, int reg) 1626 { 1627 1628 return (mii_bitbang_readreg(self, &ste_mii_bitbang_ops, phy, reg)); 1629 } 1630 1631 /* 1632 * ste_mii_writereg: [mii interface function] 1633 * 1634 * Write a PHY register on the MII of the ST-201. 1635 */ 1636 void 1637 ste_mii_writereg(struct device *self, int phy, int reg, int val) 1638 { 1639 1640 mii_bitbang_writereg(self, &ste_mii_bitbang_ops, phy, reg, val); 1641 } 1642 1643 /* 1644 * ste_mii_statchg: [mii interface function] 1645 * 1646 * Callback from MII layer when media changes. 1647 */ 1648 void 1649 ste_mii_statchg(struct device *self) 1650 { 1651 struct ste_softc *sc = (struct ste_softc *) self; 1652 1653 if (sc->sc_mii.mii_media_active & IFM_FDX) 1654 sc->sc_MacCtrl0 |= MC0_FullDuplexEnable; 1655 else 1656 sc->sc_MacCtrl0 &= ~MC0_FullDuplexEnable; 1657 1658 /* XXX 802.1x flow-control? */ 1659 1660 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl0, sc->sc_MacCtrl0); 1661 } 1662 1663 /* 1664 * ste_mii_bitbang_read: [mii bit-bang interface function] 1665 * 1666 * Read the MII serial port for the MII bit-bang module. 1667 */ 1668 uint32_t 1669 ste_mii_bitbang_read(struct device *self) 1670 { 1671 struct ste_softc *sc = (void *) self; 1672 1673 return (bus_space_read_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl)); 1674 } 1675 1676 /* 1677 * ste_mii_bitbang_write: [mii big-bang interface function] 1678 * 1679 * Write the MII serial port for the MII bit-bang module. 1680 */ 1681 void 1682 ste_mii_bitbang_write(struct device *self, uint32_t val) 1683 { 1684 struct ste_softc *sc = (void *) self; 1685 1686 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl, val); 1687 } 1688 1689 /* 1690 * ste_mediastatus: [ifmedia interface function] 1691 * 1692 * Get the current interface media status. 1693 */ 1694 void 1695 ste_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1696 { 1697 struct ste_softc *sc = ifp->if_softc; 1698 1699 mii_pollstat(&sc->sc_mii); 1700 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1701 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1702 } 1703 1704 /* 1705 * ste_mediachange: [ifmedia interface function] 1706 * 1707 * Set hardware to newly-selected media. 1708 */ 1709 int 1710 ste_mediachange(struct ifnet *ifp) 1711 { 1712 struct ste_softc *sc = ifp->if_softc; 1713 1714 if (ifp->if_flags & IFF_UP) 1715 mii_mediachg(&sc->sc_mii); 1716 return (0); 1717 } 1718