1 /* $OpenBSD: if_cas.c,v 1.33 2013/08/21 05:21:44 dlg Exp $ */ 2 3 /* 4 * 5 * Copyright (C) 2007 Mark Kettenis. 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun Cassini ethernet controllers. 35 * 36 * There are basically two variants of this chip: Cassini and 37 * Cassini+. We can distinguish between the two by revision: 0x10 and 38 * up are Cassini+. The most important difference is that Cassini+ 39 * has a second RX descriptor ring. Cassini+ will not work without 40 * configuring that second ring. However, since we don't use it we 41 * don't actually fill the descriptors, and only hand off the first 42 * four to the chip. 43 */ 44 45 #include "bpfilter.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/timeout.h> 50 #include <sys/mbuf.h> 51 #include <sys/syslog.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/socket.h> 55 #include <sys/ioctl.h> 56 #include <sys/errno.h> 57 #include <sys/device.h> 58 59 #include <machine/endian.h> 60 61 #include <net/if.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/if_ether.h> 68 #endif 69 70 #if NBPFILTER > 0 71 #include <net/bpf.h> 72 #endif 73 74 #include <machine/bus.h> 75 #include <machine/intr.h> 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 #include <dev/mii/mii_bitbang.h> 80 81 #include <dev/pci/if_casreg.h> 82 #include <dev/pci/if_casvar.h> 83 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/pcireg.h> 86 #include <dev/pci/pcidevs.h> 87 88 #ifdef __sparc64__ 89 #include <dev/ofw/openfirm.h> 90 #endif 91 92 #define TRIES 10000 93 94 struct cfdriver cas_cd = { 95 NULL, "cas", DV_IFNET 96 }; 97 98 int cas_match(struct device *, void *, void *); 99 void cas_attach(struct device *, struct device *, void *); 100 int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *); 101 102 struct cfattach cas_ca = { 103 sizeof(struct cas_softc), cas_match, cas_attach 104 }; 105 106 void cas_config(struct cas_softc *); 107 void cas_start(struct ifnet *); 108 void cas_stop(struct ifnet *, int); 109 int cas_ioctl(struct ifnet *, u_long, caddr_t); 110 void cas_tick(void *); 111 void cas_watchdog(struct ifnet *); 112 int cas_init(struct ifnet *); 113 void cas_init_regs(struct cas_softc *); 114 int cas_ringsize(int); 115 int cas_cringsize(int); 116 int cas_meminit(struct cas_softc *); 117 void cas_mifinit(struct cas_softc *); 118 int cas_bitwait(struct cas_softc *, bus_space_handle_t, int, 119 u_int32_t, u_int32_t); 120 void cas_reset(struct cas_softc *); 121 int cas_reset_rx(struct cas_softc *); 122 int cas_reset_tx(struct cas_softc *); 123 int cas_disable_rx(struct cas_softc *); 124 int cas_disable_tx(struct cas_softc *); 125 void cas_rxdrain(struct cas_softc *); 126 int cas_add_rxbuf(struct cas_softc *, int idx); 127 void cas_iff(struct cas_softc *); 128 int cas_encap(struct cas_softc *, struct mbuf *, u_int32_t *); 129 130 /* MII methods & callbacks */ 131 int cas_mii_readreg(struct device *, int, int); 132 void cas_mii_writereg(struct device *, int, int, int); 133 void cas_mii_statchg(struct device *); 134 int cas_pcs_readreg(struct device *, int, int); 135 void cas_pcs_writereg(struct device *, int, int, int); 136 137 int cas_mediachange(struct ifnet *); 138 void cas_mediastatus(struct ifnet *, struct ifmediareq *); 139 140 int cas_eint(struct cas_softc *, u_int); 141 int cas_rint(struct cas_softc *); 142 int cas_tint(struct cas_softc *, u_int32_t); 143 int cas_pint(struct cas_softc *); 144 int cas_intr(void *); 145 146 #ifdef CAS_DEBUG 147 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 148 printf x 149 #else 150 #define DPRINTF(sc, x) /* nothing */ 151 #endif 152 153 const struct pci_matchid cas_pci_devices[] = { 154 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_CASSINI }, 155 { PCI_VENDOR_NS, PCI_PRODUCT_NS_SATURN } 156 }; 157 158 int 159 cas_match(struct device *parent, void *cf, void *aux) 160 { 161 return (pci_matchbyid((struct pci_attach_args *)aux, cas_pci_devices, 162 nitems(cas_pci_devices))); 163 } 164 165 #define PROMHDR_PTR_DATA 0x18 166 #define PROMDATA_PTR_VPD 0x08 167 #define PROMDATA_DATA2 0x0a 168 169 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa }; 170 static const u_int8_t cas_promdat[] = { 171 'P', 'C', 'I', 'R', 172 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 173 PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8 174 }; 175 176 static const u_int8_t cas_promdat2[] = { 177 0x18, 0x00, /* structure length */ 178 0x00, /* structure revision */ 179 0x00, /* interface revision */ 180 PCI_SUBCLASS_NETWORK_ETHERNET, /* subclass code */ 181 PCI_CLASS_NETWORK /* class code */ 182 }; 183 184 int 185 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa) 186 { 187 struct pci_vpd_largeres *res; 188 struct pci_vpd *vpd; 189 bus_space_handle_t romh; 190 bus_space_tag_t romt; 191 bus_size_t romsize = 0; 192 u_int8_t buf[32], *desc; 193 pcireg_t address; 194 int dataoff, vpdoff, len; 195 int rv = -1; 196 197 if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0, 198 &romt, &romh, 0, &romsize, 0)) 199 return (-1); 200 201 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 202 address |= PCI_ROM_ENABLE; 203 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address); 204 205 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf)); 206 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr))) 207 goto fail; 208 209 dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 210 if (dataoff < 0x1c) 211 goto fail; 212 213 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 214 if (bcmp(buf, cas_promdat, sizeof(cas_promdat)) || 215 bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2))) 216 goto fail; 217 218 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 219 if (vpdoff < 0x1c) 220 goto fail; 221 222 next: 223 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 224 if (!PCI_VPDRES_ISLARGE(buf[0])) 225 goto fail; 226 227 res = (struct pci_vpd_largeres *)buf; 228 vpdoff += sizeof(*res); 229 230 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 231 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 232 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 233 /* Skip identifier string. */ 234 vpdoff += len; 235 goto next; 236 237 case PCI_VPDRES_TYPE_VPD: 238 while (len > 0) { 239 bus_space_read_region_1(romt, romh, vpdoff, 240 buf, sizeof(buf)); 241 242 vpd = (struct pci_vpd *)buf; 243 vpdoff += sizeof(*vpd) + vpd->vpd_len; 244 len -= sizeof(*vpd) + vpd->vpd_len; 245 246 /* 247 * We're looking for an "Enhanced" VPD... 248 */ 249 if (vpd->vpd_key0 != 'Z') 250 continue; 251 252 desc = buf + sizeof(*vpd); 253 254 /* 255 * ...which is an instance property... 256 */ 257 if (desc[0] != 'I') 258 continue; 259 desc += 3; 260 261 /* 262 * ...that's a byte array with the proper 263 * length for a MAC address... 264 */ 265 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 266 continue; 267 desc += 2; 268 269 /* 270 * ...named "local-mac-address". 271 */ 272 if (strcmp(desc, "local-mac-address") != 0) 273 continue; 274 desc += strlen("local-mac-address") + 1; 275 276 bcopy(desc, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 277 rv = 0; 278 } 279 break; 280 281 default: 282 goto fail; 283 } 284 285 fail: 286 if (romsize != 0) 287 bus_space_unmap(romt, romh, romsize); 288 289 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 290 address &= ~PCI_ROM_ENABLE; 291 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address); 292 293 return (rv); 294 } 295 296 void 297 cas_attach(struct device *parent, struct device *self, void *aux) 298 { 299 struct pci_attach_args *pa = aux; 300 struct cas_softc *sc = (void *)self; 301 pci_intr_handle_t ih; 302 #ifdef __sparc64__ 303 /* XXX the following declarations should be elsewhere */ 304 extern void myetheraddr(u_char *); 305 #endif 306 const char *intrstr = NULL; 307 bus_size_t size; 308 int gotenaddr = 0; 309 310 sc->sc_rev = PCI_REVISION(pa->pa_class); 311 sc->sc_dmatag = pa->pa_dmat; 312 313 #define PCI_CAS_BASEADDR 0x10 314 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0, 315 &sc->sc_memt, &sc->sc_memh, NULL, &size, 0) != 0) { 316 printf(": can't map registers\n"); 317 return; 318 } 319 320 if (cas_pci_enaddr(sc, pa) == 0) 321 gotenaddr = 1; 322 323 #ifdef __sparc64__ 324 if (!gotenaddr) { 325 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", 326 sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN) <= 0) 327 myetheraddr(sc->sc_arpcom.ac_enaddr); 328 gotenaddr = 1; 329 } 330 #endif 331 #ifdef __powerpc__ 332 if (!gotenaddr) { 333 pci_ether_hw_addr(pa->pa_pc, sc->sc_arpcom.ac_enaddr); 334 gotenaddr = 1; 335 } 336 #endif 337 338 sc->sc_burst = 16; /* XXX */ 339 340 if (pci_intr_map(pa, &ih) != 0) { 341 printf(": couldn't map interrupt\n"); 342 bus_space_unmap(sc->sc_memt, sc->sc_memh, size); 343 return; 344 } 345 intrstr = pci_intr_string(pa->pa_pc, ih); 346 sc->sc_ih = pci_intr_establish(pa->pa_pc, 347 ih, IPL_NET, cas_intr, sc, self->dv_xname); 348 if (sc->sc_ih == NULL) { 349 printf(": couldn't establish interrupt"); 350 if (intrstr != NULL) 351 printf(" at %s", intrstr); 352 printf("\n"); 353 bus_space_unmap(sc->sc_memt, sc->sc_memh, size); 354 return; 355 } 356 357 printf(": %s", intrstr); 358 359 /* 360 * call the main configure 361 */ 362 cas_config(sc); 363 } 364 365 /* 366 * cas_config: 367 * 368 * Attach a Cassini interface to the system. 369 */ 370 void 371 cas_config(struct cas_softc *sc) 372 { 373 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 374 struct mii_data *mii = &sc->sc_mii; 375 struct mii_softc *child; 376 int i, error; 377 378 /* Make sure the chip is stopped. */ 379 ifp->if_softc = sc; 380 cas_reset(sc); 381 382 /* 383 * Allocate the control data structures, and create and load the 384 * DMA map for it. 385 */ 386 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 387 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg, 388 1, &sc->sc_cdnseg, BUS_DMA_ZERO)) != 0) { 389 printf("\n%s: unable to allocate control data, error = %d\n", 390 sc->sc_dev.dv_xname, error); 391 goto fail_0; 392 } 393 394 /* XXX should map this in with correct endianness */ 395 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 396 sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data, 397 BUS_DMA_COHERENT)) != 0) { 398 printf("\n%s: unable to map control data, error = %d\n", 399 sc->sc_dev.dv_xname, error); 400 goto fail_1; 401 } 402 403 if ((error = bus_dmamap_create(sc->sc_dmatag, 404 sizeof(struct cas_control_data), 1, 405 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 406 printf("\n%s: unable to create control data DMA map, " 407 "error = %d\n", sc->sc_dev.dv_xname, error); 408 goto fail_2; 409 } 410 411 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 412 sc->sc_control_data, sizeof(struct cas_control_data), NULL, 413 0)) != 0) { 414 printf("\n%s: unable to load control data DMA map, error = %d\n", 415 sc->sc_dev.dv_xname, error); 416 goto fail_3; 417 } 418 419 /* 420 * Create the receive buffer DMA maps. 421 */ 422 for (i = 0; i < CAS_NRXDESC; i++) { 423 bus_dma_segment_t seg; 424 caddr_t kva; 425 int rseg; 426 427 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE, 428 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 429 printf("\n%s: unable to alloc rx DMA mem %d, " 430 "error = %d\n", sc->sc_dev.dv_xname, i, error); 431 goto fail_5; 432 } 433 sc->sc_rxsoft[i].rxs_dmaseg = seg; 434 435 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 436 CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)) != 0) { 437 printf("\n%s: unable to alloc rx DMA mem %d, " 438 "error = %d\n", sc->sc_dev.dv_xname, i, error); 439 goto fail_5; 440 } 441 sc->sc_rxsoft[i].rxs_kva = kva; 442 443 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1, 444 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 445 printf("\n%s: unable to create rx DMA map %d, " 446 "error = %d\n", sc->sc_dev.dv_xname, i, error); 447 goto fail_5; 448 } 449 450 if ((error = bus_dmamap_load(sc->sc_dmatag, 451 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL, 452 BUS_DMA_NOWAIT)) != 0) { 453 printf("\n%s: unable to load rx DMA map %d, " 454 "error = %d\n", sc->sc_dev.dv_xname, i, error); 455 goto fail_5; 456 } 457 } 458 459 /* 460 * Create the transmit buffer DMA maps. 461 */ 462 for (i = 0; i < CAS_NTXDESC; i++) { 463 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 464 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 465 &sc->sc_txd[i].sd_map)) != 0) { 466 printf("\n%s: unable to create tx DMA map %d, " 467 "error = %d\n", sc->sc_dev.dv_xname, i, error); 468 goto fail_6; 469 } 470 sc->sc_txd[i].sd_mbuf = NULL; 471 } 472 473 /* 474 * From this point forward, the attachment cannot fail. A failure 475 * before this point releases all resources that may have been 476 * allocated. 477 */ 478 479 /* Announce ourselves. */ 480 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 481 482 /* Get RX FIFO size */ 483 sc->sc_rxfifosize = 16 * 1024; 484 485 /* Initialize ifnet structure. */ 486 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 487 ifp->if_softc = sc; 488 ifp->if_flags = 489 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 490 ifp->if_start = cas_start; 491 ifp->if_ioctl = cas_ioctl; 492 ifp->if_watchdog = cas_watchdog; 493 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1); 494 IFQ_SET_READY(&ifp->if_snd); 495 496 ifp->if_capabilities = IFCAP_VLAN_MTU; 497 498 /* Initialize ifmedia structures and MII info */ 499 mii->mii_ifp = ifp; 500 mii->mii_readreg = cas_mii_readreg; 501 mii->mii_writereg = cas_mii_writereg; 502 mii->mii_statchg = cas_mii_statchg; 503 504 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus); 505 506 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0); 507 508 cas_mifinit(sc); 509 510 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) { 511 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL; 512 bus_space_write_4(sc->sc_memt, sc->sc_memh, 513 CAS_MIF_CONFIG, sc->sc_mif_config); 514 } 515 516 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 517 MII_OFFSET_ANY, 0); 518 519 child = LIST_FIRST(&mii->mii_phys); 520 if (child == NULL && 521 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) { 522 /* 523 * Try the external PCS SERDES if we didn't find any 524 * MII devices. 525 */ 526 bus_space_write_4(sc->sc_memt, sc->sc_memh, 527 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES); 528 529 bus_space_write_4(sc->sc_memt, sc->sc_memh, 530 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE); 531 532 mii->mii_readreg = cas_pcs_readreg; 533 mii->mii_writereg = cas_pcs_writereg; 534 535 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 536 MII_OFFSET_ANY, MIIF_NOISOLATE); 537 } 538 539 child = LIST_FIRST(&mii->mii_phys); 540 if (child == NULL) { 541 /* No PHY attached */ 542 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 543 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 544 } else { 545 /* 546 * Walk along the list of attached MII devices and 547 * establish an `MII instance' to `phy number' 548 * mapping. We'll use this mapping in media change 549 * requests to determine which phy to use to program 550 * the MIF configuration register. 551 */ 552 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 553 /* 554 * Note: we support just two PHYs: the built-in 555 * internal device and an external on the MII 556 * connector. 557 */ 558 if (child->mii_phy > 1 || child->mii_inst > 1) { 559 printf("%s: cannot accommodate MII device %s" 560 " at phy %d, instance %d\n", 561 sc->sc_dev.dv_xname, 562 child->mii_dev.dv_xname, 563 child->mii_phy, child->mii_inst); 564 continue; 565 } 566 567 sc->sc_phys[child->mii_inst] = child->mii_phy; 568 } 569 570 /* 571 * XXX - we can really do the following ONLY if the 572 * phy indeed has the auto negotiation capability!! 573 */ 574 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 575 } 576 577 /* Attach the interface. */ 578 if_attach(ifp); 579 ether_ifattach(ifp); 580 581 timeout_set(&sc->sc_tick_ch, cas_tick, sc); 582 return; 583 584 /* 585 * Free any resources we've allocated during the failed attach 586 * attempt. Do this in reverse order and fall through. 587 */ 588 fail_6: 589 for (i = 0; i < CAS_NTXDESC; i++) { 590 if (sc->sc_txd[i].sd_map != NULL) 591 bus_dmamap_destroy(sc->sc_dmatag, 592 sc->sc_txd[i].sd_map); 593 } 594 fail_5: 595 for (i = 0; i < CAS_NRXDESC; i++) { 596 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 597 bus_dmamap_destroy(sc->sc_dmatag, 598 sc->sc_rxsoft[i].rxs_dmamap); 599 } 600 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 601 fail_3: 602 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 603 fail_2: 604 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 605 sizeof(struct cas_control_data)); 606 fail_1: 607 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 608 fail_0: 609 return; 610 } 611 612 613 void 614 cas_tick(void *arg) 615 { 616 struct cas_softc *sc = arg; 617 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 618 bus_space_tag_t t = sc->sc_memt; 619 bus_space_handle_t mac = sc->sc_memh; 620 int s; 621 u_int32_t v; 622 623 /* unload collisions counters */ 624 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) + 625 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT); 626 ifp->if_collisions += v + 627 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) + 628 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT); 629 ifp->if_oerrors += v; 630 631 /* read error counters */ 632 ifp->if_ierrors += 633 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) + 634 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) + 635 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) + 636 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL); 637 638 /* clear the hardware counters */ 639 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0); 640 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0); 641 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0); 642 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0); 643 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0); 644 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0); 645 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0); 646 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0); 647 648 s = splnet(); 649 mii_tick(&sc->sc_mii); 650 splx(s); 651 652 timeout_add_sec(&sc->sc_tick_ch, 1); 653 } 654 655 int 656 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r, 657 u_int32_t clr, u_int32_t set) 658 { 659 int i; 660 u_int32_t reg; 661 662 for (i = TRIES; i--; DELAY(100)) { 663 reg = bus_space_read_4(sc->sc_memt, h, r); 664 if ((reg & clr) == 0 && (reg & set) == set) 665 return (1); 666 } 667 668 return (0); 669 } 670 671 void 672 cas_reset(struct cas_softc *sc) 673 { 674 bus_space_tag_t t = sc->sc_memt; 675 bus_space_handle_t h = sc->sc_memh; 676 int s; 677 678 s = splnet(); 679 DPRINTF(sc, ("%s: cas_reset\n", sc->sc_dev.dv_xname)); 680 cas_reset_rx(sc); 681 cas_reset_tx(sc); 682 683 /* Do a full reset */ 684 bus_space_write_4(t, h, CAS_RESET, 685 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS); 686 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 687 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); 688 splx(s); 689 } 690 691 692 /* 693 * cas_rxdrain: 694 * 695 * Drain the receive queue. 696 */ 697 void 698 cas_rxdrain(struct cas_softc *sc) 699 { 700 /* Nothing to do yet. */ 701 } 702 703 /* 704 * Reset the whole thing. 705 */ 706 void 707 cas_stop(struct ifnet *ifp, int disable) 708 { 709 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 710 struct cas_sxd *sd; 711 u_int32_t i; 712 713 DPRINTF(sc, ("%s: cas_stop\n", sc->sc_dev.dv_xname)); 714 715 timeout_del(&sc->sc_tick_ch); 716 717 /* 718 * Mark the interface down and cancel the watchdog timer. 719 */ 720 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 721 ifp->if_timer = 0; 722 723 mii_down(&sc->sc_mii); 724 725 cas_reset_rx(sc); 726 cas_reset_tx(sc); 727 728 /* 729 * Release any queued transmit buffers. 730 */ 731 for (i = 0; i < CAS_NTXDESC; i++) { 732 sd = &sc->sc_txd[i]; 733 if (sd->sd_mbuf != NULL) { 734 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 735 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 736 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 737 m_freem(sd->sd_mbuf); 738 sd->sd_mbuf = NULL; 739 } 740 } 741 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 742 743 if (disable) 744 cas_rxdrain(sc); 745 } 746 747 748 /* 749 * Reset the receiver 750 */ 751 int 752 cas_reset_rx(struct cas_softc *sc) 753 { 754 bus_space_tag_t t = sc->sc_memt; 755 bus_space_handle_t h = sc->sc_memh; 756 757 /* 758 * Resetting while DMA is in progress can cause a bus hang, so we 759 * disable DMA first. 760 */ 761 cas_disable_rx(sc); 762 bus_space_write_4(t, h, CAS_RX_CONFIG, 0); 763 /* Wait till it finishes */ 764 if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0)) 765 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); 766 /* Wait 5ms extra. */ 767 delay(5000); 768 769 /* Finally, reset the ERX */ 770 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX); 771 /* Wait till it finishes */ 772 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) { 773 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); 774 return (1); 775 } 776 return (0); 777 } 778 779 780 /* 781 * Reset the transmitter 782 */ 783 int 784 cas_reset_tx(struct cas_softc *sc) 785 { 786 bus_space_tag_t t = sc->sc_memt; 787 bus_space_handle_t h = sc->sc_memh; 788 789 /* 790 * Resetting while DMA is in progress can cause a bus hang, so we 791 * disable DMA first. 792 */ 793 cas_disable_tx(sc); 794 bus_space_write_4(t, h, CAS_TX_CONFIG, 0); 795 /* Wait till it finishes */ 796 if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0)) 797 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); 798 /* Wait 5ms extra. */ 799 delay(5000); 800 801 /* Finally, reset the ETX */ 802 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX); 803 /* Wait till it finishes */ 804 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) { 805 printf("%s: cannot reset transmitter\n", 806 sc->sc_dev.dv_xname); 807 return (1); 808 } 809 return (0); 810 } 811 812 /* 813 * disable receiver. 814 */ 815 int 816 cas_disable_rx(struct cas_softc *sc) 817 { 818 bus_space_tag_t t = sc->sc_memt; 819 bus_space_handle_t h = sc->sc_memh; 820 u_int32_t cfg; 821 822 /* Flip the enable bit */ 823 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 824 cfg &= ~CAS_MAC_RX_ENABLE; 825 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg); 826 827 /* Wait for it to finish */ 828 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0)); 829 } 830 831 /* 832 * disable transmitter. 833 */ 834 int 835 cas_disable_tx(struct cas_softc *sc) 836 { 837 bus_space_tag_t t = sc->sc_memt; 838 bus_space_handle_t h = sc->sc_memh; 839 u_int32_t cfg; 840 841 /* Flip the enable bit */ 842 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG); 843 cfg &= ~CAS_MAC_TX_ENABLE; 844 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg); 845 846 /* Wait for it to finish */ 847 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0)); 848 } 849 850 /* 851 * Initialize interface. 852 */ 853 int 854 cas_meminit(struct cas_softc *sc) 855 { 856 struct cas_rxsoft *rxs; 857 int i, error; 858 859 rxs = (void *)&error; 860 861 /* 862 * Initialize the transmit descriptor ring. 863 */ 864 for (i = 0; i < CAS_NTXDESC; i++) { 865 sc->sc_txdescs[i].cd_flags = 0; 866 sc->sc_txdescs[i].cd_addr = 0; 867 } 868 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC, 869 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 870 871 /* 872 * Initialize the receive descriptor and receive job 873 * descriptor rings. 874 */ 875 for (i = 0; i < CAS_NRXDESC; i++) 876 CAS_INIT_RXDESC(sc, i, i); 877 sc->sc_rxdptr = 0; 878 sc->sc_rxptr = 0; 879 880 /* 881 * Initialize the receive completion ring. 882 */ 883 for (i = 0; i < CAS_NRXCOMP; i++) { 884 sc->sc_rxcomps[i].cc_word[0] = 0; 885 sc->sc_rxcomps[i].cc_word[1] = 0; 886 sc->sc_rxcomps[i].cc_word[2] = 0; 887 sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); 888 CAS_CDRXCSYNC(sc, i, 889 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 890 } 891 892 return (0); 893 } 894 895 int 896 cas_ringsize(int sz) 897 { 898 switch (sz) { 899 case 32: 900 return CAS_RING_SZ_32; 901 case 64: 902 return CAS_RING_SZ_64; 903 case 128: 904 return CAS_RING_SZ_128; 905 case 256: 906 return CAS_RING_SZ_256; 907 case 512: 908 return CAS_RING_SZ_512; 909 case 1024: 910 return CAS_RING_SZ_1024; 911 case 2048: 912 return CAS_RING_SZ_2048; 913 case 4096: 914 return CAS_RING_SZ_4096; 915 case 8192: 916 return CAS_RING_SZ_8192; 917 default: 918 printf("cas: invalid Receive Descriptor ring size %d\n", sz); 919 return CAS_RING_SZ_32; 920 } 921 } 922 923 int 924 cas_cringsize(int sz) 925 { 926 int i; 927 928 for (i = 0; i < 9; i++) 929 if (sz == (128 << i)) 930 return i; 931 932 printf("cas: invalid completion ring size %d\n", sz); 933 return 128; 934 } 935 936 /* 937 * Initialization of interface; set up initialization block 938 * and transmit/receive descriptor rings. 939 */ 940 int 941 cas_init(struct ifnet *ifp) 942 { 943 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 944 bus_space_tag_t t = sc->sc_memt; 945 bus_space_handle_t h = sc->sc_memh; 946 int s; 947 u_int max_frame_size; 948 u_int32_t v; 949 950 s = splnet(); 951 952 DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname)); 953 /* 954 * Initialization sequence. The numbered steps below correspond 955 * to the sequence outlined in section 6.3.5.1 in the Ethernet 956 * Channel Engine manual (part of the PCIO manual). 957 * See also the STP2002-STQ document from Sun Microsystems. 958 */ 959 960 /* step 1 & 2. Reset the Ethernet Channel */ 961 cas_stop(ifp, 0); 962 cas_reset(sc); 963 DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname)); 964 965 /* Re-initialize the MIF */ 966 cas_mifinit(sc); 967 968 /* step 3. Setup data structures in host memory */ 969 cas_meminit(sc); 970 971 /* step 4. TX MAC registers & counters */ 972 cas_init_regs(sc); 973 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 974 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 975 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 976 977 /* step 5. RX MAC registers & counters */ 978 cas_iff(sc); 979 980 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 981 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); 982 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, 983 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); 984 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); 985 986 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); 987 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, 988 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); 989 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); 990 991 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); 992 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, 993 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); 994 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); 995 996 if (CAS_PLUS(sc)) { 997 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); 998 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, 999 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); 1000 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, 1001 CAS_CDRXADDR2(sc, 0)); 1002 } 1003 1004 /* step 8. Global Configuration & Interrupt Mask */ 1005 bus_space_write_4(t, h, CAS_INTMASK, 1006 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| 1007 CAS_INTR_TX_TAG_ERR| 1008 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| 1009 CAS_INTR_RX_TAG_ERR| 1010 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| 1011 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| 1012 CAS_INTR_BERR)); 1013 bus_space_write_4(t, h, CAS_MAC_RX_MASK, 1014 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); 1015 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); 1016 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ 1017 1018 /* step 9. ETX Configuration: use mostly default values */ 1019 1020 /* Enable DMA */ 1021 v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; 1022 bus_space_write_4(t, h, CAS_TX_CONFIG, 1023 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); 1024 bus_space_write_4(t, h, CAS_TX_KICK, 0); 1025 1026 /* step 10. ERX Configuration */ 1027 1028 /* Encode Receive Descriptor ring size */ 1029 v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; 1030 if (CAS_PLUS(sc)) 1031 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; 1032 1033 /* Encode Receive Completion ring size */ 1034 v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; 1035 1036 /* Enable DMA */ 1037 bus_space_write_4(t, h, CAS_RX_CONFIG, 1038 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); 1039 1040 /* 1041 * The following value is for an OFF Threshold of about 3/4 full 1042 * and an ON Threshold of 1/4 full. 1043 */ 1044 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, 1045 (3 * sc->sc_rxfifosize / 256) | 1046 ((sc->sc_rxfifosize / 256) << 12)); 1047 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6); 1048 1049 /* step 11. Configure Media */ 1050 mii_mediachg(&sc->sc_mii); 1051 1052 /* step 12. RX_MAC Configuration Register */ 1053 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1054 v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; 1055 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); 1056 1057 /* step 14. Issue Transmit Pending command */ 1058 1059 /* step 15. Give the receiver a swift kick */ 1060 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); 1061 if (CAS_PLUS(sc)) 1062 bus_space_write_4(t, h, CAS_RX_KICK2, 4); 1063 1064 /* Start the one second timer. */ 1065 timeout_add_sec(&sc->sc_tick_ch, 1); 1066 1067 ifp->if_flags |= IFF_RUNNING; 1068 ifp->if_flags &= ~IFF_OACTIVE; 1069 ifp->if_timer = 0; 1070 splx(s); 1071 1072 return (0); 1073 } 1074 1075 void 1076 cas_init_regs(struct cas_softc *sc) 1077 { 1078 bus_space_tag_t t = sc->sc_memt; 1079 bus_space_handle_t h = sc->sc_memh; 1080 u_int32_t v, r; 1081 1082 /* These regs are not cleared on reset */ 1083 sc->sc_inited = 0; 1084 if (!sc->sc_inited) { 1085 /* Load recommended values */ 1086 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00); 1087 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08); 1088 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04); 1089 1090 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1091 /* Max frame and max burst size */ 1092 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 1093 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1094 1095 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07); 1096 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04); 1097 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1098 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088); 1099 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED, 1100 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff); 1101 1102 /* Secondary MAC addresses set to 0:0:0:0:0:0 */ 1103 for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4) 1104 bus_space_write_4(t, h, r, 0); 1105 1106 /* MAC control addr set to 0:1:c2:0:1:80 */ 1107 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001); 1108 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200); 1109 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180); 1110 1111 /* MAC filter addr set to 0:0:0:0:0:0 */ 1112 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0); 1113 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0); 1114 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0); 1115 1116 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0); 1117 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0); 1118 1119 /* Hash table initialized to 0 */ 1120 for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4) 1121 bus_space_write_4(t, h, r, 0); 1122 1123 sc->sc_inited = 1; 1124 } 1125 1126 /* Counters need to be zeroed */ 1127 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0); 1128 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0); 1129 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0); 1130 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0); 1131 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0); 1132 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0); 1133 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0); 1134 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0); 1135 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0); 1136 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0); 1137 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0); 1138 1139 /* Un-pause stuff */ 1140 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0); 1141 1142 /* 1143 * Set the station address. 1144 */ 1145 bus_space_write_4(t, h, CAS_MAC_ADDR0, 1146 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 1147 bus_space_write_4(t, h, CAS_MAC_ADDR1, 1148 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 1149 bus_space_write_4(t, h, CAS_MAC_ADDR2, 1150 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 1151 } 1152 1153 /* 1154 * Receive interrupt. 1155 */ 1156 int 1157 cas_rint(struct cas_softc *sc) 1158 { 1159 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1160 bus_space_tag_t t = sc->sc_memt; 1161 bus_space_handle_t h = sc->sc_memh; 1162 struct cas_rxsoft *rxs; 1163 struct mbuf *m; 1164 u_int64_t word[4]; 1165 int len, off, idx; 1166 int i, skip; 1167 caddr_t cp; 1168 1169 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { 1170 CAS_CDRXCSYNC(sc, i, 1171 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1172 1173 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); 1174 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); 1175 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); 1176 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); 1177 1178 /* Stop if the hardware still owns the descriptor. */ 1179 if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) 1180 break; 1181 1182 len = CAS_RC1_HDR_LEN(word[1]); 1183 if (len > 0) { 1184 off = CAS_RC1_HDR_OFF(word[1]); 1185 idx = CAS_RC1_HDR_IDX(word[1]); 1186 rxs = &sc->sc_rxsoft[idx]; 1187 1188 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", 1189 idx, off, len)); 1190 1191 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1192 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1193 1194 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN; 1195 m = m_devget(cp, len, ETHER_ALIGN, ifp); 1196 1197 if (word[0] & CAS_RC0_RELEASE_HDR) 1198 cas_add_rxbuf(sc, idx); 1199 1200 if (m != NULL) { 1201 1202 #if NBPFILTER > 0 1203 /* 1204 * Pass this up to any BPF listeners, but only 1205 * pass it up the stack if its for us. 1206 */ 1207 if (ifp->if_bpf) 1208 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1209 #endif /* NBPFILTER > 0 */ 1210 1211 ifp->if_ipackets++; 1212 ether_input_mbuf(ifp, m); 1213 } else 1214 ifp->if_ierrors++; 1215 } 1216 1217 len = CAS_RC0_DATA_LEN(word[0]); 1218 if (len > 0) { 1219 off = CAS_RC0_DATA_OFF(word[0]); 1220 idx = CAS_RC0_DATA_IDX(word[0]); 1221 rxs = &sc->sc_rxsoft[idx]; 1222 1223 DPRINTF(sc, ("data at idx %d, off %d, len %d\n", 1224 idx, off, len)); 1225 1226 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1227 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1228 1229 /* XXX We should not be copying the packet here. */ 1230 cp = rxs->rxs_kva + off + ETHER_ALIGN; 1231 m = m_devget(cp, len, ETHER_ALIGN, ifp); 1232 1233 if (word[0] & CAS_RC0_RELEASE_DATA) 1234 cas_add_rxbuf(sc, idx); 1235 1236 if (m != NULL) { 1237 #if NBPFILTER > 0 1238 /* 1239 * Pass this up to any BPF listeners, but only 1240 * pass it up the stack if its for us. 1241 */ 1242 if (ifp->if_bpf) 1243 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1244 #endif /* NBPFILTER > 0 */ 1245 1246 ifp->if_ipackets++; 1247 ether_input_mbuf(ifp, m); 1248 } else 1249 ifp->if_ierrors++; 1250 } 1251 1252 if (word[0] & CAS_RC0_SPLIT) 1253 printf("split packet\n"); 1254 1255 skip = CAS_RC0_SKIP(word[0]); 1256 } 1257 1258 while (sc->sc_rxptr != i) { 1259 sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; 1260 sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; 1261 sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; 1262 sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = 1263 CAS_DMA_WRITE(CAS_RC3_OWN); 1264 CAS_CDRXCSYNC(sc, sc->sc_rxptr, 1265 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1266 1267 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); 1268 } 1269 1270 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); 1271 1272 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", 1273 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); 1274 1275 return (1); 1276 } 1277 1278 /* 1279 * cas_add_rxbuf: 1280 * 1281 * Add a receive buffer to the indicated descriptor. 1282 */ 1283 int 1284 cas_add_rxbuf(struct cas_softc *sc, int idx) 1285 { 1286 bus_space_tag_t t = sc->sc_memt; 1287 bus_space_handle_t h = sc->sc_memh; 1288 1289 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx); 1290 1291 if ((sc->sc_rxdptr % 4) == 0) 1292 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr); 1293 1294 if (++sc->sc_rxdptr == CAS_NRXDESC) 1295 sc->sc_rxdptr = 0; 1296 1297 return (0); 1298 } 1299 1300 int 1301 cas_eint(struct cas_softc *sc, u_int status) 1302 { 1303 if ((status & CAS_INTR_MIF) != 0) { 1304 #ifdef CAS_DEBUG 1305 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1306 #endif 1307 return (1); 1308 } 1309 1310 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, CAS_INTR_BITS); 1311 return (1); 1312 } 1313 1314 int 1315 cas_pint(struct cas_softc *sc) 1316 { 1317 bus_space_tag_t t = sc->sc_memt; 1318 bus_space_handle_t seb = sc->sc_memh; 1319 u_int32_t status; 1320 1321 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1322 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1323 #ifdef CAS_DEBUG 1324 if (status) 1325 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1326 #endif 1327 return (1); 1328 } 1329 1330 int 1331 cas_intr(void *v) 1332 { 1333 struct cas_softc *sc = (struct cas_softc *)v; 1334 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1335 bus_space_tag_t t = sc->sc_memt; 1336 bus_space_handle_t seb = sc->sc_memh; 1337 u_int32_t status; 1338 int r = 0; 1339 1340 status = bus_space_read_4(t, seb, CAS_STATUS); 1341 DPRINTF(sc, ("%s: cas_intr: cplt %xstatus %b\n", 1342 sc->sc_dev.dv_xname, (status>>19), status, CAS_INTR_BITS)); 1343 1344 if ((status & CAS_INTR_PCS) != 0) 1345 r |= cas_pint(sc); 1346 1347 if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1348 CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0) 1349 r |= cas_eint(sc, status); 1350 1351 if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0) 1352 r |= cas_tint(sc, status); 1353 1354 if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0) 1355 r |= cas_rint(sc); 1356 1357 /* We should eventually do more than just print out error stats. */ 1358 if (status & CAS_INTR_TX_MAC) { 1359 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS); 1360 #ifdef CAS_DEBUG 1361 if (txstat & ~CAS_MAC_TX_XMIT_DONE) 1362 printf("%s: MAC tx fault, status %x\n", 1363 sc->sc_dev.dv_xname, txstat); 1364 #endif 1365 if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) 1366 cas_init(ifp); 1367 } 1368 if (status & CAS_INTR_RX_MAC) { 1369 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS); 1370 #ifdef CAS_DEBUG 1371 if (rxstat & ~CAS_MAC_RX_DONE) 1372 printf("%s: MAC rx fault, status %x\n", 1373 sc->sc_dev.dv_xname, rxstat); 1374 #endif 1375 /* 1376 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often 1377 * due to a silicon bug so handle them silently. 1378 */ 1379 if (rxstat & CAS_MAC_RX_OVERFLOW) { 1380 ifp->if_ierrors++; 1381 cas_init(ifp); 1382 } 1383 #ifdef CAS_DEBUG 1384 else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT)) 1385 printf("%s: MAC rx fault, status %x\n", 1386 sc->sc_dev.dv_xname, rxstat); 1387 #endif 1388 } 1389 return (r); 1390 } 1391 1392 1393 void 1394 cas_watchdog(struct ifnet *ifp) 1395 { 1396 struct cas_softc *sc = ifp->if_softc; 1397 1398 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x " 1399 "CAS_MAC_RX_CONFIG %x\n", 1400 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG), 1401 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS), 1402 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG))); 1403 1404 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1405 ++ifp->if_oerrors; 1406 1407 /* Try to get more packets going. */ 1408 cas_init(ifp); 1409 } 1410 1411 /* 1412 * Initialize the MII Management Interface 1413 */ 1414 void 1415 cas_mifinit(struct cas_softc *sc) 1416 { 1417 bus_space_tag_t t = sc->sc_memt; 1418 bus_space_handle_t mif = sc->sc_memh; 1419 1420 /* Configure the MIF in frame mode */ 1421 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG); 1422 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA; 1423 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config); 1424 } 1425 1426 /* 1427 * MII interface 1428 * 1429 * The Cassini MII interface supports at least three different operating modes: 1430 * 1431 * Bitbang mode is implemented using data, clock and output enable registers. 1432 * 1433 * Frame mode is implemented by loading a complete frame into the frame 1434 * register and polling the valid bit for completion. 1435 * 1436 * Polling mode uses the frame register but completion is indicated by 1437 * an interrupt. 1438 * 1439 */ 1440 int 1441 cas_mii_readreg(struct device *self, int phy, int reg) 1442 { 1443 struct cas_softc *sc = (void *)self; 1444 bus_space_tag_t t = sc->sc_memt; 1445 bus_space_handle_t mif = sc->sc_memh; 1446 int n; 1447 u_int32_t v; 1448 1449 #ifdef CAS_DEBUG 1450 if (sc->sc_debug) 1451 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg); 1452 #endif 1453 1454 /* Construct the frame command */ 1455 v = (reg << CAS_MIF_REG_SHIFT) | (phy << CAS_MIF_PHY_SHIFT) | 1456 CAS_MIF_FRAME_READ; 1457 1458 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1459 for (n = 0; n < 100; n++) { 1460 DELAY(1); 1461 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1462 if (v & CAS_MIF_FRAME_TA0) 1463 return (v & CAS_MIF_FRAME_DATA); 1464 } 1465 1466 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1467 return (0); 1468 } 1469 1470 void 1471 cas_mii_writereg(struct device *self, int phy, int reg, int val) 1472 { 1473 struct cas_softc *sc = (void *)self; 1474 bus_space_tag_t t = sc->sc_memt; 1475 bus_space_handle_t mif = sc->sc_memh; 1476 int n; 1477 u_int32_t v; 1478 1479 #ifdef CAS_DEBUG 1480 if (sc->sc_debug) 1481 printf("cas_mii_writereg: phy %d reg %d val %x\n", 1482 phy, reg, val); 1483 #endif 1484 1485 /* Construct the frame command */ 1486 v = CAS_MIF_FRAME_WRITE | 1487 (phy << CAS_MIF_PHY_SHIFT) | 1488 (reg << CAS_MIF_REG_SHIFT) | 1489 (val & CAS_MIF_FRAME_DATA); 1490 1491 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1492 for (n = 0; n < 100; n++) { 1493 DELAY(1); 1494 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1495 if (v & CAS_MIF_FRAME_TA0) 1496 return; 1497 } 1498 1499 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1500 } 1501 1502 void 1503 cas_mii_statchg(struct device *dev) 1504 { 1505 struct cas_softc *sc = (void *)dev; 1506 #ifdef CAS_DEBUG 1507 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1508 #endif 1509 bus_space_tag_t t = sc->sc_memt; 1510 bus_space_handle_t mac = sc->sc_memh; 1511 u_int32_t v; 1512 1513 #ifdef CAS_DEBUG 1514 if (sc->sc_debug) 1515 printf("cas_mii_statchg: status change: phy = %d\n", 1516 sc->sc_phys[instance]); 1517 #endif 1518 1519 /* Set tx full duplex options */ 1520 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0); 1521 delay(10000); /* reg must be cleared and delay before changing. */ 1522 v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT| 1523 CAS_MAC_TX_ENABLE; 1524 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1525 v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS; 1526 } 1527 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v); 1528 1529 /* XIF Configuration */ 1530 v = CAS_MAC_XIF_TX_MII_ENA; 1531 v |= CAS_MAC_XIF_LINK_LED; 1532 1533 /* MII needs echo disable if half duplex. */ 1534 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1535 /* turn on full duplex LED */ 1536 v |= CAS_MAC_XIF_FDPLX_LED; 1537 else 1538 /* half duplex -- disable echo */ 1539 v |= CAS_MAC_XIF_ECHO_DISABL; 1540 1541 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1542 case IFM_1000_T: /* Gigabit using GMII interface */ 1543 case IFM_1000_SX: 1544 v |= CAS_MAC_XIF_GMII_MODE; 1545 break; 1546 default: 1547 v &= ~CAS_MAC_XIF_GMII_MODE; 1548 } 1549 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v); 1550 } 1551 1552 int 1553 cas_pcs_readreg(struct device *self, int phy, int reg) 1554 { 1555 struct cas_softc *sc = (void *)self; 1556 bus_space_tag_t t = sc->sc_memt; 1557 bus_space_handle_t pcs = sc->sc_memh; 1558 1559 #ifdef CAS_DEBUG 1560 if (sc->sc_debug) 1561 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg); 1562 #endif 1563 1564 if (phy != CAS_PHYAD_EXTERNAL) 1565 return (0); 1566 1567 switch (reg) { 1568 case MII_BMCR: 1569 reg = CAS_MII_CONTROL; 1570 break; 1571 case MII_BMSR: 1572 reg = CAS_MII_STATUS; 1573 break; 1574 case MII_ANAR: 1575 reg = CAS_MII_ANAR; 1576 break; 1577 case MII_ANLPAR: 1578 reg = CAS_MII_ANLPAR; 1579 break; 1580 case MII_EXTSR: 1581 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1582 default: 1583 return (0); 1584 } 1585 1586 return bus_space_read_4(t, pcs, reg); 1587 } 1588 1589 void 1590 cas_pcs_writereg(struct device *self, int phy, int reg, int val) 1591 { 1592 struct cas_softc *sc = (void *)self; 1593 bus_space_tag_t t = sc->sc_memt; 1594 bus_space_handle_t pcs = sc->sc_memh; 1595 int reset = 0; 1596 1597 #ifdef CAS_DEBUG 1598 if (sc->sc_debug) 1599 printf("cas_pcs_writereg: phy %d reg %d val %x\n", 1600 phy, reg, val); 1601 #endif 1602 1603 if (phy != CAS_PHYAD_EXTERNAL) 1604 return; 1605 1606 if (reg == MII_ANAR) 1607 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0); 1608 1609 switch (reg) { 1610 case MII_BMCR: 1611 reset = (val & CAS_MII_CONTROL_RESET); 1612 reg = CAS_MII_CONTROL; 1613 break; 1614 case MII_BMSR: 1615 reg = CAS_MII_STATUS; 1616 break; 1617 case MII_ANAR: 1618 reg = CAS_MII_ANAR; 1619 break; 1620 case MII_ANLPAR: 1621 reg = CAS_MII_ANLPAR; 1622 break; 1623 default: 1624 return; 1625 } 1626 1627 bus_space_write_4(t, pcs, reg, val); 1628 1629 if (reset) 1630 cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0); 1631 1632 if (reg == CAS_MII_ANAR || reset) 1633 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 1634 CAS_MII_CONFIG_ENABLE); 1635 } 1636 1637 int 1638 cas_mediachange(struct ifnet *ifp) 1639 { 1640 struct cas_softc *sc = ifp->if_softc; 1641 struct mii_data *mii = &sc->sc_mii; 1642 1643 if (mii->mii_instance) { 1644 struct mii_softc *miisc; 1645 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1646 mii_phy_reset(miisc); 1647 } 1648 1649 return (mii_mediachg(&sc->sc_mii)); 1650 } 1651 1652 void 1653 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1654 { 1655 struct cas_softc *sc = ifp->if_softc; 1656 1657 mii_pollstat(&sc->sc_mii); 1658 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1659 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1660 } 1661 1662 /* 1663 * Process an ioctl request. 1664 */ 1665 int 1666 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1667 { 1668 struct cas_softc *sc = ifp->if_softc; 1669 struct ifaddr *ifa = (struct ifaddr *)data; 1670 struct ifreq *ifr = (struct ifreq *)data; 1671 int s, error = 0; 1672 1673 s = splnet(); 1674 1675 switch (cmd) { 1676 case SIOCSIFADDR: 1677 ifp->if_flags |= IFF_UP; 1678 if ((ifp->if_flags & IFF_RUNNING) == 0) 1679 cas_init(ifp); 1680 #ifdef INET 1681 if (ifa->ifa_addr->sa_family == AF_INET) 1682 arp_ifinit(&sc->sc_arpcom, ifa); 1683 #endif 1684 break; 1685 1686 case SIOCSIFFLAGS: 1687 if (ifp->if_flags & IFF_UP) { 1688 if (ifp->if_flags & IFF_RUNNING) 1689 error = ENETRESET; 1690 else 1691 cas_init(ifp); 1692 } else { 1693 if (ifp->if_flags & IFF_RUNNING) 1694 cas_stop(ifp, 1); 1695 } 1696 #ifdef CAS_DEBUG 1697 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1698 #endif 1699 break; 1700 1701 case SIOCGIFMEDIA: 1702 case SIOCSIFMEDIA: 1703 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1704 break; 1705 1706 default: 1707 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1708 } 1709 1710 if (error == ENETRESET) { 1711 if (ifp->if_flags & IFF_RUNNING) 1712 cas_iff(sc); 1713 error = 0; 1714 } 1715 1716 splx(s); 1717 return (error); 1718 } 1719 1720 void 1721 cas_iff(struct cas_softc *sc) 1722 { 1723 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1724 struct arpcom *ac = &sc->sc_arpcom; 1725 struct ether_multi *enm; 1726 struct ether_multistep step; 1727 bus_space_tag_t t = sc->sc_memt; 1728 bus_space_handle_t h = sc->sc_memh; 1729 u_int32_t crc, hash[16], rxcfg; 1730 int i; 1731 1732 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1733 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS | 1734 CAS_MAC_RX_PROMISC_GRP); 1735 ifp->if_flags &= ~IFF_ALLMULTI; 1736 1737 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1738 ifp->if_flags |= IFF_ALLMULTI; 1739 if (ifp->if_flags & IFF_PROMISC) 1740 rxcfg |= CAS_MAC_RX_PROMISCUOUS; 1741 else 1742 rxcfg |= CAS_MAC_RX_PROMISC_GRP; 1743 } else { 1744 /* 1745 * Set up multicast address filter by passing all multicast 1746 * addresses through a crc generator, and then using the 1747 * high order 8 bits as an index into the 256 bit logical 1748 * address filter. The high order 4 bits selects the word, 1749 * while the other 4 bits select the bit within the word 1750 * (where bit 0 is the MSB). 1751 */ 1752 1753 rxcfg |= CAS_MAC_RX_HASH_FILTER; 1754 1755 /* Clear hash table */ 1756 for (i = 0; i < 16; i++) 1757 hash[i] = 0; 1758 1759 ETHER_FIRST_MULTI(step, ac, enm); 1760 while (enm != NULL) { 1761 crc = ether_crc32_le(enm->enm_addrlo, 1762 ETHER_ADDR_LEN); 1763 1764 /* Just want the 8 most significant bits. */ 1765 crc >>= 24; 1766 1767 /* Set the corresponding bit in the filter. */ 1768 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1769 1770 ETHER_NEXT_MULTI(step, enm); 1771 } 1772 1773 /* Now load the hash table into the chip (if we are using it) */ 1774 for (i = 0; i < 16; i++) { 1775 bus_space_write_4(t, h, 1776 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 1777 hash[i]); 1778 } 1779 } 1780 1781 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg); 1782 } 1783 1784 int 1785 cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp) 1786 { 1787 u_int64_t flags; 1788 u_int32_t cur, frag, i; 1789 bus_dmamap_t map; 1790 1791 cur = frag = *bixp; 1792 map = sc->sc_txd[cur].sd_map; 1793 1794 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1795 BUS_DMA_NOWAIT) != 0) { 1796 return (ENOBUFS); 1797 } 1798 1799 if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) { 1800 bus_dmamap_unload(sc->sc_dmatag, map); 1801 return (ENOBUFS); 1802 } 1803 1804 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1805 BUS_DMASYNC_PREWRITE); 1806 1807 for (i = 0; i < map->dm_nsegs; i++) { 1808 sc->sc_txdescs[frag].cd_addr = 1809 CAS_DMA_WRITE(map->dm_segs[i].ds_addr); 1810 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) | 1811 (i == 0 ? CAS_TD_START_OF_PACKET : 0) | 1812 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0); 1813 sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags); 1814 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1815 CAS_CDTXOFF(frag), sizeof(struct cas_desc), 1816 BUS_DMASYNC_PREWRITE); 1817 cur = frag; 1818 if (++frag == CAS_NTXDESC) 1819 frag = 0; 1820 } 1821 1822 sc->sc_tx_cnt += map->dm_nsegs; 1823 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; 1824 sc->sc_txd[cur].sd_map = map; 1825 sc->sc_txd[cur].sd_mbuf = mhead; 1826 1827 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag); 1828 1829 *bixp = frag; 1830 1831 /* sync descriptors */ 1832 1833 return (0); 1834 } 1835 1836 /* 1837 * Transmit interrupt. 1838 */ 1839 int 1840 cas_tint(struct cas_softc *sc, u_int32_t status) 1841 { 1842 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1843 struct cas_sxd *sd; 1844 u_int32_t cons, comp; 1845 1846 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION); 1847 cons = sc->sc_tx_cons; 1848 while (cons != comp) { 1849 sd = &sc->sc_txd[cons]; 1850 if (sd->sd_mbuf != NULL) { 1851 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 1852 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1853 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 1854 m_freem(sd->sd_mbuf); 1855 sd->sd_mbuf = NULL; 1856 ifp->if_opackets++; 1857 } 1858 sc->sc_tx_cnt--; 1859 if (++cons == CAS_NTXDESC) 1860 cons = 0; 1861 } 1862 sc->sc_tx_cons = cons; 1863 1864 if (sc->sc_tx_cnt < CAS_NTXDESC - 2) 1865 ifp->if_flags &= ~IFF_OACTIVE; 1866 if (sc->sc_tx_cnt == 0) 1867 ifp->if_timer = 0; 1868 1869 cas_start(ifp); 1870 1871 return (1); 1872 } 1873 1874 void 1875 cas_start(struct ifnet *ifp) 1876 { 1877 struct cas_softc *sc = ifp->if_softc; 1878 struct mbuf *m; 1879 u_int32_t bix; 1880 1881 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1882 return; 1883 1884 bix = sc->sc_tx_prod; 1885 while (sc->sc_txd[bix].sd_mbuf == NULL) { 1886 IFQ_POLL(&ifp->if_snd, m); 1887 if (m == NULL) 1888 break; 1889 1890 #if NBPFILTER > 0 1891 /* 1892 * If BPF is listening on this interface, let it see the 1893 * packet before we commit it to the wire. 1894 */ 1895 if (ifp->if_bpf) 1896 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1897 #endif 1898 1899 /* 1900 * Encapsulate this packet and start it going... 1901 * or fail... 1902 */ 1903 if (cas_encap(sc, m, &bix)) { 1904 ifp->if_flags |= IFF_OACTIVE; 1905 break; 1906 } 1907 1908 IFQ_DEQUEUE(&ifp->if_snd, m); 1909 ifp->if_timer = 5; 1910 } 1911 1912 sc->sc_tx_prod = bix; 1913 } 1914