1 /* $OpenBSD: hme.c,v 1.63 2013/08/07 01:06:29 bluhm Exp $ */ 2 /* $NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * HME Ethernet module driver. 35 */ 36 37 #include "bpfilter.h" 38 #include "vlan.h" 39 40 #undef HMEDEBUG 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/mbuf.h> 46 #include <sys/syslog.h> 47 #include <sys/socket.h> 48 #include <sys/device.h> 49 #include <sys/malloc.h> 50 #include <sys/ioctl.h> 51 #include <sys/errno.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/ip.h> 61 #include <netinet/if_ether.h> 62 #include <netinet/tcp.h> 63 #include <netinet/udp.h> 64 #endif 65 66 #if NBPFILTER > 0 67 #include <net/bpf.h> 68 #endif 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <machine/bus.h> 74 75 #include <dev/ic/hmereg.h> 76 #include <dev/ic/hmevar.h> 77 78 struct cfdriver hme_cd = { 79 NULL, "hme", DV_IFNET 80 }; 81 82 #define HME_RX_OFFSET 2 83 84 void hme_start(struct ifnet *); 85 void hme_stop(struct hme_softc *, int); 86 int hme_ioctl(struct ifnet *, u_long, caddr_t); 87 void hme_tick(void *); 88 void hme_watchdog(struct ifnet *); 89 void hme_init(struct hme_softc *); 90 void hme_meminit(struct hme_softc *); 91 void hme_mifinit(struct hme_softc *); 92 void hme_reset(struct hme_softc *); 93 void hme_iff(struct hme_softc *); 94 void hme_fill_rx_ring(struct hme_softc *); 95 int hme_newbuf(struct hme_softc *, struct hme_sxd *); 96 97 /* MII methods & callbacks */ 98 static int hme_mii_readreg(struct device *, int, int); 99 static void hme_mii_writereg(struct device *, int, int, int); 100 static void hme_mii_statchg(struct device *); 101 102 int hme_mediachange(struct ifnet *); 103 void hme_mediastatus(struct ifnet *, struct ifmediareq *); 104 105 int hme_eint(struct hme_softc *, u_int); 106 int hme_rint(struct hme_softc *); 107 int hme_tint(struct hme_softc *); 108 /* TCP/UDP checksum offload support */ 109 void hme_rxcksum(struct mbuf *, u_int32_t); 110 111 void 112 hme_config(sc) 113 struct hme_softc *sc; 114 { 115 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 116 struct mii_data *mii = &sc->sc_mii; 117 struct mii_softc *child; 118 bus_dma_tag_t dmatag = sc->sc_dmatag; 119 bus_dma_segment_t seg; 120 bus_size_t size; 121 int rseg, error, i; 122 123 /* 124 * HME common initialization. 125 * 126 * hme_softc fields that must be initialized by the front-end: 127 * 128 * the bus tag: 129 * sc_bustag 130 * 131 * the dma bus tag: 132 * sc_dmatag 133 * 134 * the bus handles: 135 * sc_seb (Shared Ethernet Block registers) 136 * sc_erx (Receiver Unit registers) 137 * sc_etx (Transmitter Unit registers) 138 * sc_mac (MAC registers) 139 * sc_mif (Management Interface registers) 140 * 141 * the maximum bus burst size: 142 * sc_burst 143 * 144 * the local Ethernet address: 145 * sc_arpcom.ac_enaddr 146 * 147 */ 148 149 /* Make sure the chip is stopped. */ 150 hme_stop(sc, 0); 151 152 for (i = 0; i < HME_TX_RING_SIZE; i++) { 153 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS, 154 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 155 &sc->sc_txd[i].sd_map) != 0) { 156 sc->sc_txd[i].sd_map = NULL; 157 goto fail; 158 } 159 } 160 for (i = 0; i < HME_RX_RING_SIZE; i++) { 161 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 162 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 163 &sc->sc_rxd[i].sd_map) != 0) { 164 sc->sc_rxd[i].sd_map = NULL; 165 goto fail; 166 } 167 } 168 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0, 169 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) { 170 sc->sc_rxmap_spare = NULL; 171 goto fail; 172 } 173 174 /* 175 * Allocate DMA capable memory 176 * Buffer descriptors must be aligned on a 2048 byte boundary; 177 * take this into account when calculating the size. Note that 178 * the maximum number of descriptors (256) occupies 2048 bytes, 179 * so we allocate that much regardless of the number of descriptors. 180 */ 181 size = (HME_XD_SIZE * HME_RX_RING_MAX) + /* RX descriptors */ 182 (HME_XD_SIZE * HME_TX_RING_MAX); /* TX descriptors */ 183 184 /* Allocate DMA buffer */ 185 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg, 186 BUS_DMA_NOWAIT)) != 0) { 187 printf("\n%s: DMA buffer alloc error %d\n", 188 sc->sc_dev.dv_xname, error); 189 return; 190 } 191 192 /* Map DMA memory in CPU addressable space */ 193 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 194 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 195 printf("\n%s: DMA buffer map error %d\n", 196 sc->sc_dev.dv_xname, error); 197 bus_dmamap_unload(dmatag, sc->sc_dmamap); 198 bus_dmamem_free(dmatag, &seg, rseg); 199 return; 200 } 201 202 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 203 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 204 printf("\n%s: DMA map create error %d\n", 205 sc->sc_dev.dv_xname, error); 206 return; 207 } 208 209 /* Load the buffer */ 210 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 211 sc->sc_rb.rb_membase, size, NULL, 212 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 213 printf("\n%s: DMA buffer map load error %d\n", 214 sc->sc_dev.dv_xname, error); 215 bus_dmamem_free(dmatag, &seg, rseg); 216 return; 217 } 218 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 219 220 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 221 222 /* Initialize ifnet structure. */ 223 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 224 ifp->if_softc = sc; 225 ifp->if_start = hme_start; 226 ifp->if_ioctl = hme_ioctl; 227 ifp->if_watchdog = hme_watchdog; 228 ifp->if_flags = 229 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 230 IFQ_SET_READY(&ifp->if_snd); 231 ifp->if_capabilities = IFCAP_VLAN_MTU; 232 233 m_clsetwms(ifp, MCLBYTES, 0, HME_RX_RING_SIZE); 234 235 /* Initialize ifmedia structures and MII info */ 236 mii->mii_ifp = ifp; 237 mii->mii_readreg = hme_mii_readreg; 238 mii->mii_writereg = hme_mii_writereg; 239 mii->mii_statchg = hme_mii_statchg; 240 241 ifmedia_init(&mii->mii_media, IFM_IMASK, 242 hme_mediachange, hme_mediastatus); 243 244 hme_mifinit(sc); 245 246 if (sc->sc_tcvr == -1) 247 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 248 MII_OFFSET_ANY, 0); 249 else 250 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr, 251 MII_OFFSET_ANY, 0); 252 253 child = LIST_FIRST(&mii->mii_phys); 254 if (child == NULL) { 255 /* No PHY attached */ 256 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 257 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 258 } else { 259 /* 260 * Walk along the list of attached MII devices and 261 * establish an `MII instance' to `phy number' 262 * mapping. We'll use this mapping in media change 263 * requests to determine which phy to use to program 264 * the MIF configuration register. 265 */ 266 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 267 /* 268 * Note: we support just two PHYs: the built-in 269 * internal device and an external on the MII 270 * connector. 271 */ 272 if (child->mii_phy > 1 || child->mii_inst > 1) { 273 printf("%s: cannot accommodate MII device %s" 274 " at phy %d, instance %d\n", 275 sc->sc_dev.dv_xname, 276 child->mii_dev.dv_xname, 277 child->mii_phy, child->mii_inst); 278 continue; 279 } 280 281 sc->sc_phys[child->mii_inst] = child->mii_phy; 282 } 283 284 /* 285 * XXX - we can really do the following ONLY if the 286 * phy indeed has the auto negotiation capability!! 287 */ 288 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 289 } 290 291 /* Attach the interface. */ 292 if_attach(ifp); 293 ether_ifattach(ifp); 294 295 timeout_set(&sc->sc_tick_ch, hme_tick, sc); 296 return; 297 298 fail: 299 if (sc->sc_rxmap_spare != NULL) 300 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare); 301 for (i = 0; i < HME_TX_RING_SIZE; i++) 302 if (sc->sc_txd[i].sd_map != NULL) 303 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map); 304 for (i = 0; i < HME_RX_RING_SIZE; i++) 305 if (sc->sc_rxd[i].sd_map != NULL) 306 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map); 307 } 308 309 void 310 hme_unconfig(sc) 311 struct hme_softc *sc; 312 { 313 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 314 int i; 315 316 hme_stop(sc, 1); 317 318 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare); 319 for (i = 0; i < HME_TX_RING_SIZE; i++) 320 if (sc->sc_txd[i].sd_map != NULL) 321 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map); 322 for (i = 0; i < HME_RX_RING_SIZE; i++) 323 if (sc->sc_rxd[i].sd_map != NULL) 324 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map); 325 326 /* Detach all PHYs */ 327 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 328 329 /* Delete all remaining media. */ 330 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 331 332 ether_ifdetach(ifp); 333 if_detach(ifp); 334 } 335 336 void 337 hme_tick(arg) 338 void *arg; 339 { 340 struct hme_softc *sc = arg; 341 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 342 bus_space_tag_t t = sc->sc_bustag; 343 bus_space_handle_t mac = sc->sc_mac; 344 int s; 345 346 s = splnet(); 347 /* 348 * Unload collision counters 349 */ 350 ifp->if_collisions += 351 bus_space_read_4(t, mac, HME_MACI_NCCNT) + 352 bus_space_read_4(t, mac, HME_MACI_FCCNT) + 353 bus_space_read_4(t, mac, HME_MACI_EXCNT) + 354 bus_space_read_4(t, mac, HME_MACI_LTCNT); 355 356 /* 357 * then clear the hardware counters. 358 */ 359 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 360 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 361 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 362 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 363 364 /* 365 * If buffer allocation fails, the receive ring may become 366 * empty. There is no receive interrupt to recover from that. 367 */ 368 if (sc->sc_rx_cnt == 0) 369 hme_fill_rx_ring(sc); 370 371 mii_tick(&sc->sc_mii); 372 splx(s); 373 374 timeout_add_sec(&sc->sc_tick_ch, 1); 375 } 376 377 void 378 hme_reset(sc) 379 struct hme_softc *sc; 380 { 381 int s; 382 383 s = splnet(); 384 hme_init(sc); 385 splx(s); 386 } 387 388 void 389 hme_stop(struct hme_softc *sc, int softonly) 390 { 391 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 392 bus_space_tag_t t = sc->sc_bustag; 393 bus_space_handle_t seb = sc->sc_seb; 394 int n; 395 396 timeout_del(&sc->sc_tick_ch); 397 398 /* 399 * Mark the interface down and cancel the watchdog timer. 400 */ 401 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 402 ifp->if_timer = 0; 403 404 if (!softonly) { 405 mii_down(&sc->sc_mii); 406 407 /* Mask all interrupts */ 408 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); 409 410 /* Reset transmitter and receiver */ 411 bus_space_write_4(t, seb, HME_SEBI_RESET, 412 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); 413 414 for (n = 0; n < 20; n++) { 415 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); 416 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 417 break; 418 DELAY(20); 419 } 420 if (n >= 20) 421 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname); 422 } 423 424 for (n = 0; n < HME_TX_RING_SIZE; n++) { 425 if (sc->sc_txd[n].sd_mbuf != NULL) { 426 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map, 427 0, sc->sc_txd[n].sd_map->dm_mapsize, 428 BUS_DMASYNC_POSTWRITE); 429 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map); 430 m_freem(sc->sc_txd[n].sd_mbuf); 431 sc->sc_txd[n].sd_mbuf = NULL; 432 } 433 } 434 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0; 435 436 for (n = 0; n < HME_RX_RING_SIZE; n++) { 437 if (sc->sc_rxd[n].sd_mbuf != NULL) { 438 bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map, 439 0, sc->sc_rxd[n].sd_map->dm_mapsize, 440 BUS_DMASYNC_POSTREAD); 441 bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map); 442 m_freem(sc->sc_rxd[n].sd_mbuf); 443 sc->sc_rxd[n].sd_mbuf = NULL; 444 } 445 } 446 sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0; 447 } 448 449 void 450 hme_meminit(sc) 451 struct hme_softc *sc; 452 { 453 bus_addr_t dma; 454 caddr_t p; 455 unsigned int i; 456 struct hme_ring *hr = &sc->sc_rb; 457 458 p = hr->rb_membase; 459 dma = hr->rb_dmabase; 460 461 /* 462 * Allocate transmit descriptors 463 */ 464 hr->rb_txd = p; 465 hr->rb_txddma = dma; 466 p += HME_TX_RING_SIZE * HME_XD_SIZE; 467 dma += HME_TX_RING_SIZE * HME_XD_SIZE; 468 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 469 dma = (bus_addr_t)roundup((u_long)dma, 2048); 470 p = (caddr_t)roundup((u_long)p, 2048); 471 472 /* 473 * Allocate receive descriptors 474 */ 475 hr->rb_rxd = p; 476 hr->rb_rxddma = dma; 477 p += HME_RX_RING_SIZE * HME_XD_SIZE; 478 dma += HME_RX_RING_SIZE * HME_XD_SIZE; 479 /* Again move forward to the next 2048 byte boundary.*/ 480 dma = (bus_addr_t)roundup((u_long)dma, 2048); 481 p = (caddr_t)roundup((u_long)p, 2048); 482 483 /* 484 * Initialize transmit descriptors 485 */ 486 for (i = 0; i < HME_TX_RING_SIZE; i++) { 487 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 488 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 489 sc->sc_txd[i].sd_mbuf = NULL; 490 } 491 492 /* 493 * Initialize receive descriptors 494 */ 495 for (i = 0; i < HME_RX_RING_SIZE; i++) { 496 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0); 497 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0); 498 sc->sc_rxd[i].sd_mbuf = NULL; 499 } 500 501 hme_fill_rx_ring(sc); 502 } 503 504 /* 505 * Initialization of interface; set up initialization block 506 * and transmit/receive descriptor rings. 507 */ 508 void 509 hme_init(sc) 510 struct hme_softc *sc; 511 { 512 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 513 bus_space_tag_t t = sc->sc_bustag; 514 bus_space_handle_t seb = sc->sc_seb; 515 bus_space_handle_t etx = sc->sc_etx; 516 bus_space_handle_t erx = sc->sc_erx; 517 bus_space_handle_t mac = sc->sc_mac; 518 u_int8_t *ea; 519 u_int32_t v, n; 520 521 /* 522 * Initialization sequence. The numbered steps below correspond 523 * to the sequence outlined in section 6.3.5.1 in the Ethernet 524 * Channel Engine manual (part of the PCIO manual). 525 * See also the STP2002-STQ document from Sun Microsystems. 526 */ 527 528 /* step 1 & 2. Reset the Ethernet Channel */ 529 hme_stop(sc, 0); 530 531 /* Re-initialize the MIF */ 532 hme_mifinit(sc); 533 534 /* Call MI reset function if any */ 535 if (sc->sc_hwreset) 536 (*sc->sc_hwreset)(sc); 537 538 #if 0 539 /* Mask all MIF interrupts, just in case */ 540 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff); 541 #endif 542 543 /* step 3. Setup data structures in host memory */ 544 hme_meminit(sc); 545 546 /* step 4. TX MAC registers & counters */ 547 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 548 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 549 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 550 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 551 bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 552 553 /* Load station MAC address */ 554 ea = sc->sc_arpcom.ac_enaddr; 555 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 556 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 557 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 558 559 /* 560 * Init seed for backoff 561 * (source suggested by manual: low 10 bits of MAC address) 562 */ 563 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 564 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); 565 566 567 /* Note: Accepting power-on default for other MAC registers here.. */ 568 569 570 /* step 5. RX MAC registers & counters */ 571 hme_iff(sc); 572 573 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 574 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); 575 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE); 576 577 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 578 bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 579 580 /* step 8. Global Configuration & Interrupt Mask */ 581 bus_space_write_4(t, seb, HME_SEBI_IMASK, 582 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST | 583 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR | 584 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS)); 585 586 switch (sc->sc_burst) { 587 default: 588 v = 0; 589 break; 590 case 16: 591 v = HME_SEB_CFG_BURST16; 592 break; 593 case 32: 594 v = HME_SEB_CFG_BURST32; 595 break; 596 case 64: 597 v = HME_SEB_CFG_BURST64; 598 break; 599 } 600 bus_space_write_4(t, seb, HME_SEBI_CFG, v); 601 602 /* step 9. ETX Configuration: use mostly default values */ 603 604 /* Enable DMA */ 605 v = bus_space_read_4(t, etx, HME_ETXI_CFG); 606 v |= HME_ETX_CFG_DMAENABLE; 607 bus_space_write_4(t, etx, HME_ETXI_CFG, v); 608 609 /* Transmit Descriptor ring size: in increments of 16 */ 610 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1); 611 612 /* step 10. ERX Configuration */ 613 v = bus_space_read_4(t, erx, HME_ERXI_CFG); 614 v &= ~HME_ERX_CFG_RINGSIZE256; 615 #if HME_RX_RING_SIZE == 32 616 v |= HME_ERX_CFG_RINGSIZE32; 617 #elif HME_RX_RING_SIZE == 64 618 v |= HME_ERX_CFG_RINGSIZE64; 619 #elif HME_RX_RING_SIZE == 128 620 v |= HME_ERX_CFG_RINGSIZE128; 621 #elif HME_RX_RING_SIZE == 256 622 v |= HME_ERX_CFG_RINGSIZE256; 623 #else 624 # error "RX ring size must be 32, 64, 128, or 256" 625 #endif 626 /* Enable DMA */ 627 v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3); 628 /* RX TCP/UDP cksum offset */ 629 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; 630 n = (n << HME_ERX_CFG_CSUM_SHIFT) & HME_ERX_CFG_CSUMSTART; 631 v |= n; 632 bus_space_write_4(t, erx, HME_ERXI_CFG, v); 633 634 /* step 11. XIF Configuration */ 635 v = bus_space_read_4(t, mac, HME_MACI_XIF); 636 v |= HME_MAC_XIF_OE; 637 bus_space_write_4(t, mac, HME_MACI_XIF, v); 638 639 /* step 12. RX_MAC Configuration Register */ 640 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 641 v |= HME_MAC_RXCFG_ENABLE; 642 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 643 644 /* step 13. TX_MAC Configuration Register */ 645 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 646 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 647 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 648 649 /* step 14. Issue Transmit Pending command */ 650 651 /* Call MI initialization function if any */ 652 if (sc->sc_hwinit) 653 (*sc->sc_hwinit)(sc); 654 655 /* Set the current media. */ 656 mii_mediachg(&sc->sc_mii); 657 658 /* Start the one second timer. */ 659 timeout_add_sec(&sc->sc_tick_ch, 1); 660 661 ifp->if_flags |= IFF_RUNNING; 662 ifp->if_flags &= ~IFF_OACTIVE; 663 664 hme_start(ifp); 665 } 666 667 void 668 hme_start(ifp) 669 struct ifnet *ifp; 670 { 671 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 672 struct hme_ring *hr = &sc->sc_rb; 673 struct mbuf *m; 674 u_int32_t flags; 675 bus_dmamap_t map; 676 u_int32_t frag, cur, i; 677 int error; 678 679 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 680 return; 681 682 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { 683 IFQ_POLL(&ifp->if_snd, m); 684 if (m == NULL) 685 break; 686 687 /* 688 * Encapsulate this packet and start it going... 689 * or fail... 690 */ 691 692 cur = frag = sc->sc_tx_prod; 693 map = sc->sc_txd[cur].sd_map; 694 695 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 696 BUS_DMA_NOWAIT); 697 if (error != 0 && error != EFBIG) 698 goto drop; 699 if (error != 0) { 700 /* Too many fragments, linearize. */ 701 if (m_defrag(m, M_DONTWAIT)) 702 goto drop; 703 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 704 BUS_DMA_NOWAIT); 705 if (error != 0) 706 goto drop; 707 } 708 709 if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) { 710 bus_dmamap_unload(sc->sc_dmatag, map); 711 ifp->if_flags |= IFF_OACTIVE; 712 break; 713 } 714 715 /* We are now committed to transmitting the packet. */ 716 IFQ_DEQUEUE(&ifp->if_snd, m); 717 718 #if NBPFILTER > 0 719 /* 720 * If BPF is listening on this interface, let it see the 721 * packet before we commit it to the wire. 722 */ 723 if (ifp->if_bpf) 724 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 725 #endif 726 727 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 728 BUS_DMASYNC_PREWRITE); 729 730 for (i = 0; i < map->dm_nsegs; i++) { 731 flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len); 732 if (i == 0) 733 flags |= HME_XD_SOP; 734 else 735 flags |= HME_XD_OWN; 736 737 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag, 738 map->dm_segs[i].ds_addr); 739 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags); 740 741 cur = frag; 742 if (++frag == HME_TX_RING_SIZE) 743 frag = 0; 744 } 745 746 /* Set end of packet on last descriptor. */ 747 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur); 748 flags |= HME_XD_EOP; 749 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags); 750 751 sc->sc_tx_cnt += map->dm_nsegs; 752 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; 753 sc->sc_txd[cur].sd_map = map; 754 sc->sc_txd[cur].sd_mbuf = m; 755 756 /* Give first frame over to the hardware. */ 757 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod); 758 flags |= HME_XD_OWN; 759 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags); 760 761 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, 762 HME_ETX_TP_DMAWAKEUP); 763 sc->sc_tx_prod = frag; 764 765 ifp->if_timer = 5; 766 } 767 768 return; 769 770 drop: 771 IFQ_DEQUEUE(&ifp->if_snd, m); 772 m_freem(m); 773 ifp->if_oerrors++; 774 } 775 776 /* 777 * Transmit interrupt. 778 */ 779 int 780 hme_tint(sc) 781 struct hme_softc *sc; 782 { 783 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 784 unsigned int ri, txflags; 785 struct hme_sxd *sd; 786 int cnt = sc->sc_tx_cnt; 787 788 /* Fetch current position in the transmit ring */ 789 ri = sc->sc_tx_cons; 790 sd = &sc->sc_txd[ri]; 791 792 for (;;) { 793 if (cnt <= 0) 794 break; 795 796 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 797 798 if (txflags & HME_XD_OWN) 799 break; 800 801 ifp->if_flags &= ~IFF_OACTIVE; 802 if (txflags & HME_XD_EOP) 803 ifp->if_opackets++; 804 805 if (sd->sd_mbuf != NULL) { 806 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 807 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 808 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 809 m_freem(sd->sd_mbuf); 810 sd->sd_mbuf = NULL; 811 } 812 813 if (++ri == HME_TX_RING_SIZE) { 814 ri = 0; 815 sd = sc->sc_txd; 816 } else 817 sd++; 818 819 --cnt; 820 } 821 822 sc->sc_tx_cnt = cnt; 823 ifp->if_timer = cnt > 0 ? 5 : 0; 824 825 /* Update ring */ 826 sc->sc_tx_cons = ri; 827 828 hme_start(ifp); 829 830 return (1); 831 } 832 833 /* 834 * XXX layering violation 835 * 836 * If we can have additional csum data member in 'struct pkthdr' for 837 * these incomplete checksum offload capable hardware, things would be 838 * much simpler. That member variable will carry partial checksum 839 * data and it may be evaluated in TCP/UDP input handler after 840 * computing pseudo header checksumming. 841 */ 842 void 843 hme_rxcksum(struct mbuf *m, u_int32_t flags) 844 { 845 struct ether_header *eh; 846 struct ip *ip; 847 struct udphdr *uh; 848 int32_t hlen, len, pktlen; 849 u_int16_t cksum, *opts; 850 u_int32_t temp32; 851 union pseudoh { 852 struct hdr { 853 u_int16_t len; 854 u_int8_t ttl; 855 u_int8_t proto; 856 u_int32_t src; 857 u_int32_t dst; 858 } h; 859 u_int16_t w[6]; 860 } ph; 861 862 pktlen = m->m_pkthdr.len; 863 if (pktlen < sizeof(struct ether_header)) 864 return; 865 eh = mtod(m, struct ether_header *); 866 if (eh->ether_type != htons(ETHERTYPE_IP)) 867 return; 868 ip = (struct ip *)(eh + 1); 869 if (ip->ip_v != IPVERSION) 870 return; 871 872 hlen = ip->ip_hl << 2; 873 pktlen -= sizeof(struct ether_header); 874 if (hlen < sizeof(struct ip)) 875 return; 876 if (ntohs(ip->ip_len) < hlen) 877 return; 878 if (ntohs(ip->ip_len) != pktlen) 879 return; 880 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 881 return; /* can't handle fragmented packet */ 882 883 switch (ip->ip_p) { 884 case IPPROTO_TCP: 885 if (pktlen < (hlen + sizeof(struct tcphdr))) 886 return; 887 break; 888 case IPPROTO_UDP: 889 if (pktlen < (hlen + sizeof(struct udphdr))) 890 return; 891 uh = (struct udphdr *)((caddr_t)ip + hlen); 892 if (uh->uh_sum == 0) 893 return; /* no checksum */ 894 break; 895 default: 896 return; 897 } 898 899 cksum = htons(~(flags & HME_XD_RXCKSUM)); 900 /* cksum fixup for IP options */ 901 len = hlen - sizeof(struct ip); 902 if (len > 0) { 903 opts = (u_int16_t *)(ip + 1); 904 for (; len > 0; len -= sizeof(u_int16_t), opts++) { 905 temp32 = cksum - *opts; 906 temp32 = (temp32 >> 16) + (temp32 & 65535); 907 cksum = temp32 & 65535; 908 } 909 } 910 /* cksum fixup for pseudo-header, replace with in_cksum_phdr()? */ 911 ph.h.len = htons(ntohs(ip->ip_len) - hlen); 912 ph.h.ttl = 0; 913 ph.h.proto = ip->ip_p; 914 ph.h.src = ip->ip_src.s_addr; 915 ph.h.dst = ip->ip_dst.s_addr; 916 temp32 = cksum; 917 opts = &ph.w[0]; 918 temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5]; 919 temp32 = (temp32 >> 16) + (temp32 & 65535); 920 temp32 += (temp32 >> 16); 921 cksum = ~temp32; 922 if (cksum == 0) { 923 m->m_pkthdr.csum_flags |= 924 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 925 } 926 } 927 928 /* 929 * Receive interrupt. 930 */ 931 int 932 hme_rint(sc) 933 struct hme_softc *sc; 934 { 935 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 936 struct mbuf *m; 937 struct hme_sxd *sd; 938 unsigned int ri, len; 939 u_int32_t flags; 940 941 ri = sc->sc_rx_cons; 942 sd = &sc->sc_rxd[ri]; 943 944 /* 945 * Process all buffers with valid data. 946 */ 947 while (sc->sc_rx_cnt > 0) { 948 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri); 949 if (flags & HME_XD_OWN) 950 break; 951 952 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 953 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 954 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 955 956 m = sd->sd_mbuf; 957 sd->sd_mbuf = NULL; 958 959 if (++ri == HME_RX_RING_SIZE) { 960 ri = 0; 961 sd = sc->sc_rxd; 962 } else 963 sd++; 964 sc->sc_rx_cnt--; 965 966 if (flags & HME_XD_OFL) { 967 ifp->if_ierrors++; 968 printf("%s: buffer overflow, ri=%d; flags=0x%x\n", 969 sc->sc_dev.dv_xname, ri, flags); 970 m_freem(m); 971 continue; 972 } 973 974 len = HME_XD_DECODE_RSIZE(flags); 975 m->m_pkthdr.len = m->m_len = len; 976 977 ifp->if_ipackets++; 978 hme_rxcksum(m, flags); 979 980 #if NBPFILTER > 0 981 if (ifp->if_bpf) 982 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 983 #endif 984 985 ether_input_mbuf(ifp, m); 986 } 987 988 sc->sc_rx_cons = ri; 989 hme_fill_rx_ring(sc); 990 return (1); 991 } 992 993 int 994 hme_eint(sc, status) 995 struct hme_softc *sc; 996 u_int status; 997 { 998 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 999 1000 if (status & HME_SEB_STAT_MIFIRQ) { 1001 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname); 1002 status &= ~HME_SEB_STAT_MIFIRQ; 1003 } 1004 1005 if (status & HME_SEB_STAT_DTIMEXP) { 1006 ifp->if_oerrors++; 1007 status &= ~HME_SEB_STAT_DTIMEXP; 1008 } 1009 1010 if (status & HME_SEB_STAT_NORXD) { 1011 ifp->if_ierrors++; 1012 status &= ~HME_SEB_STAT_NORXD; 1013 } 1014 1015 status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME | 1016 HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX | 1017 HME_SEB_STAT_TXALL); 1018 1019 if (status == 0) 1020 return (1); 1021 1022 #ifdef HME_DEBUG 1023 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS); 1024 #endif 1025 return (1); 1026 } 1027 1028 int 1029 hme_intr(v) 1030 void *v; 1031 { 1032 struct hme_softc *sc = (struct hme_softc *)v; 1033 bus_space_tag_t t = sc->sc_bustag; 1034 bus_space_handle_t seb = sc->sc_seb; 1035 u_int32_t status; 1036 int r = 0; 1037 1038 status = bus_space_read_4(t, seb, HME_SEBI_STAT); 1039 if (status == 0xffffffff) 1040 return (0); 1041 1042 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1043 r |= hme_eint(sc, status); 1044 1045 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1046 r |= hme_tint(sc); 1047 1048 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1049 r |= hme_rint(sc); 1050 1051 return (r); 1052 } 1053 1054 1055 void 1056 hme_watchdog(ifp) 1057 struct ifnet *ifp; 1058 { 1059 struct hme_softc *sc = ifp->if_softc; 1060 1061 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1062 ifp->if_oerrors++; 1063 1064 hme_reset(sc); 1065 } 1066 1067 /* 1068 * Initialize the MII Management Interface 1069 */ 1070 void 1071 hme_mifinit(sc) 1072 struct hme_softc *sc; 1073 { 1074 bus_space_tag_t t = sc->sc_bustag; 1075 bus_space_handle_t mif = sc->sc_mif; 1076 bus_space_handle_t mac = sc->sc_mac; 1077 int phy; 1078 u_int32_t v; 1079 1080 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1081 phy = HME_PHYAD_EXTERNAL; 1082 if (v & HME_MIF_CFG_MDI1) 1083 phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL; 1084 else if (v & HME_MIF_CFG_MDI0) 1085 phy = sc->sc_tcvr = HME_PHYAD_INTERNAL; 1086 else 1087 sc->sc_tcvr = -1; 1088 1089 /* Configure the MIF in frame mode, no poll, current phy select */ 1090 v = 0; 1091 if (phy == HME_PHYAD_EXTERNAL) 1092 v |= HME_MIF_CFG_PHY; 1093 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1094 1095 /* If an external transceiver is selected, enable its MII drivers */ 1096 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1097 v &= ~HME_MAC_XIF_MIIENABLE; 1098 if (phy == HME_PHYAD_EXTERNAL) 1099 v |= HME_MAC_XIF_MIIENABLE; 1100 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1101 } 1102 1103 /* 1104 * MII interface 1105 */ 1106 static int 1107 hme_mii_readreg(self, phy, reg) 1108 struct device *self; 1109 int phy, reg; 1110 { 1111 struct hme_softc *sc = (struct hme_softc *)self; 1112 bus_space_tag_t t = sc->sc_bustag; 1113 bus_space_handle_t mif = sc->sc_mif; 1114 bus_space_handle_t mac = sc->sc_mac; 1115 u_int32_t v, xif_cfg, mifi_cfg; 1116 int n; 1117 1118 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1119 return (0); 1120 1121 /* Select the desired PHY in the MIF configuration register */ 1122 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1123 v &= ~HME_MIF_CFG_PHY; 1124 if (phy == HME_PHYAD_EXTERNAL) 1125 v |= HME_MIF_CFG_PHY; 1126 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1127 1128 /* Enable MII drivers on external transceiver */ 1129 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1130 if (phy == HME_PHYAD_EXTERNAL) 1131 v |= HME_MAC_XIF_MIIENABLE; 1132 else 1133 v &= ~HME_MAC_XIF_MIIENABLE; 1134 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1135 1136 /* Construct the frame command */ 1137 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1138 HME_MIF_FO_TAMSB | 1139 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1140 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1141 (reg << HME_MIF_FO_REGAD_SHIFT); 1142 1143 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1144 for (n = 0; n < 100; n++) { 1145 DELAY(1); 1146 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1147 if (v & HME_MIF_FO_TALSB) { 1148 v &= HME_MIF_FO_DATA; 1149 goto out; 1150 } 1151 } 1152 1153 v = 0; 1154 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1155 1156 out: 1157 /* Restore MIFI_CFG register */ 1158 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1159 /* Restore XIF register */ 1160 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1161 return (v); 1162 } 1163 1164 static void 1165 hme_mii_writereg(self, phy, reg, val) 1166 struct device *self; 1167 int phy, reg, val; 1168 { 1169 struct hme_softc *sc = (void *)self; 1170 bus_space_tag_t t = sc->sc_bustag; 1171 bus_space_handle_t mif = sc->sc_mif; 1172 bus_space_handle_t mac = sc->sc_mac; 1173 u_int32_t v, xif_cfg, mifi_cfg; 1174 int n; 1175 1176 /* We can at most have two PHYs */ 1177 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1178 return; 1179 1180 /* Select the desired PHY in the MIF configuration register */ 1181 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1182 v &= ~HME_MIF_CFG_PHY; 1183 if (phy == HME_PHYAD_EXTERNAL) 1184 v |= HME_MIF_CFG_PHY; 1185 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1186 1187 /* Enable MII drivers on external transceiver */ 1188 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1189 if (phy == HME_PHYAD_EXTERNAL) 1190 v |= HME_MAC_XIF_MIIENABLE; 1191 else 1192 v &= ~HME_MAC_XIF_MIIENABLE; 1193 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1194 1195 /* Construct the frame command */ 1196 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1197 HME_MIF_FO_TAMSB | 1198 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1199 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1200 (reg << HME_MIF_FO_REGAD_SHIFT) | 1201 (val & HME_MIF_FO_DATA); 1202 1203 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1204 for (n = 0; n < 100; n++) { 1205 DELAY(1); 1206 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1207 if (v & HME_MIF_FO_TALSB) 1208 goto out; 1209 } 1210 1211 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1212 out: 1213 /* Restore MIFI_CFG register */ 1214 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1215 /* Restore XIF register */ 1216 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1217 } 1218 1219 static void 1220 hme_mii_statchg(dev) 1221 struct device *dev; 1222 { 1223 struct hme_softc *sc = (void *)dev; 1224 bus_space_tag_t t = sc->sc_bustag; 1225 bus_space_handle_t mac = sc->sc_mac; 1226 u_int32_t v; 1227 1228 #ifdef HMEDEBUG 1229 if (sc->sc_debug) 1230 printf("hme_mii_statchg: status change\n", phy); 1231 #endif 1232 1233 /* Set the MAC Full Duplex bit appropriately */ 1234 /* Apparently the hme chip is SIMPLEX if working in full duplex mode, 1235 but not otherwise. */ 1236 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 1237 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1238 v |= HME_MAC_TXCFG_FULLDPLX; 1239 sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX; 1240 } else { 1241 v &= ~HME_MAC_TXCFG_FULLDPLX; 1242 sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX; 1243 } 1244 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 1245 } 1246 1247 int 1248 hme_mediachange(ifp) 1249 struct ifnet *ifp; 1250 { 1251 struct hme_softc *sc = ifp->if_softc; 1252 bus_space_tag_t t = sc->sc_bustag; 1253 bus_space_handle_t mif = sc->sc_mif; 1254 bus_space_handle_t mac = sc->sc_mac; 1255 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1256 int phy = sc->sc_phys[instance]; 1257 u_int32_t v; 1258 1259 #ifdef HMEDEBUG 1260 if (sc->sc_debug) 1261 printf("hme_mediachange: phy = %d\n", phy); 1262 #endif 1263 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER) 1264 return (EINVAL); 1265 1266 /* Select the current PHY in the MIF configuration register */ 1267 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1268 v &= ~HME_MIF_CFG_PHY; 1269 if (phy == HME_PHYAD_EXTERNAL) 1270 v |= HME_MIF_CFG_PHY; 1271 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1272 1273 /* If an external transceiver is selected, enable its MII drivers */ 1274 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1275 v &= ~HME_MAC_XIF_MIIENABLE; 1276 if (phy == HME_PHYAD_EXTERNAL) 1277 v |= HME_MAC_XIF_MIIENABLE; 1278 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1279 1280 return (mii_mediachg(&sc->sc_mii)); 1281 } 1282 1283 void 1284 hme_mediastatus(ifp, ifmr) 1285 struct ifnet *ifp; 1286 struct ifmediareq *ifmr; 1287 { 1288 struct hme_softc *sc = ifp->if_softc; 1289 1290 if ((ifp->if_flags & IFF_UP) == 0) 1291 return; 1292 1293 mii_pollstat(&sc->sc_mii); 1294 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1295 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1296 } 1297 1298 /* 1299 * Process an ioctl request. 1300 */ 1301 int 1302 hme_ioctl(ifp, cmd, data) 1303 struct ifnet *ifp; 1304 u_long cmd; 1305 caddr_t data; 1306 { 1307 struct hme_softc *sc = ifp->if_softc; 1308 struct ifaddr *ifa = (struct ifaddr *)data; 1309 struct ifreq *ifr = (struct ifreq *)data; 1310 int s, error = 0; 1311 1312 s = splnet(); 1313 1314 switch (cmd) { 1315 case SIOCSIFADDR: 1316 ifp->if_flags |= IFF_UP; 1317 if (!(ifp->if_flags & IFF_RUNNING)) 1318 hme_init(sc); 1319 #ifdef INET 1320 if (ifa->ifa_addr->sa_family == AF_INET) 1321 arp_ifinit(&sc->sc_arpcom, ifa); 1322 #endif 1323 break; 1324 1325 case SIOCSIFFLAGS: 1326 if (ifp->if_flags & IFF_UP) { 1327 if (ifp->if_flags & IFF_RUNNING) 1328 error = ENETRESET; 1329 else 1330 hme_init(sc); 1331 } else { 1332 if (ifp->if_flags & IFF_RUNNING) 1333 hme_stop(sc, 0); 1334 } 1335 #ifdef HMEDEBUG 1336 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1337 #endif 1338 break; 1339 1340 case SIOCGIFMEDIA: 1341 case SIOCSIFMEDIA: 1342 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1343 break; 1344 1345 default: 1346 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1347 } 1348 1349 if (error == ENETRESET) { 1350 if (ifp->if_flags & IFF_RUNNING) 1351 hme_iff(sc); 1352 error = 0; 1353 } 1354 1355 splx(s); 1356 return (error); 1357 } 1358 1359 void 1360 hme_iff(struct hme_softc *sc) 1361 { 1362 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1363 struct arpcom *ac = &sc->sc_arpcom; 1364 struct ether_multi *enm; 1365 struct ether_multistep step; 1366 bus_space_tag_t t = sc->sc_bustag; 1367 bus_space_handle_t mac = sc->sc_mac; 1368 u_int32_t hash[4]; 1369 u_int32_t rxcfg, crc; 1370 1371 rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG); 1372 rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC); 1373 ifp->if_flags &= ~IFF_ALLMULTI; 1374 /* Clear hash table */ 1375 hash[0] = hash[1] = hash[2] = hash[3] = 0; 1376 1377 if (ifp->if_flags & IFF_PROMISC) { 1378 ifp->if_flags |= IFF_ALLMULTI; 1379 rxcfg |= HME_MAC_RXCFG_PMISC; 1380 } else if (ac->ac_multirangecnt > 0) { 1381 ifp->if_flags |= IFF_ALLMULTI; 1382 rxcfg |= HME_MAC_RXCFG_HENABLE; 1383 hash[0] = hash[1] = hash[2] = hash[3] = 0xffff; 1384 } else { 1385 rxcfg |= HME_MAC_RXCFG_HENABLE; 1386 1387 ETHER_FIRST_MULTI(step, ac, enm); 1388 while (enm != NULL) { 1389 crc = ether_crc32_le(enm->enm_addrlo, 1390 ETHER_ADDR_LEN) >> 26; 1391 1392 /* Set the corresponding bit in the filter. */ 1393 hash[crc >> 4] |= 1 << (crc & 0xf); 1394 1395 ETHER_NEXT_MULTI(step, enm); 1396 } 1397 } 1398 1399 /* Now load the hash table into the chip */ 1400 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); 1401 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); 1402 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); 1403 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); 1404 bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg); 1405 } 1406 1407 void 1408 hme_fill_rx_ring(sc) 1409 struct hme_softc *sc; 1410 { 1411 struct hme_sxd *sd; 1412 1413 while (sc->sc_rx_cnt < HME_RX_RING_SIZE) { 1414 if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod])) 1415 break; 1416 1417 sd = &sc->sc_rxd[sc->sc_rx_prod]; 1418 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod, 1419 sd->sd_map->dm_segs[0].ds_addr); 1420 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod, 1421 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE)); 1422 1423 if (++sc->sc_rx_prod == HME_RX_RING_SIZE) 1424 sc->sc_rx_prod = 0; 1425 sc->sc_rx_cnt++; 1426 } 1427 } 1428 1429 int 1430 hme_newbuf(sc, d) 1431 struct hme_softc *sc; 1432 struct hme_sxd *d; 1433 { 1434 struct mbuf *m; 1435 bus_dmamap_t map; 1436 1437 /* 1438 * All operations should be on local variables and/or rx spare map 1439 * until we're sure everything is a success. 1440 */ 1441 1442 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1443 if (!m) 1444 return (ENOBUFS); 1445 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if; 1446 1447 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare, 1448 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL, 1449 BUS_DMA_NOWAIT) != 0) { 1450 m_freem(m); 1451 return (ENOBUFS); 1452 } 1453 1454 /* 1455 * At this point we have a new buffer loaded into the spare map. 1456 * Just need to clear out the old mbuf/map and put the new one 1457 * in place. 1458 */ 1459 1460 map = d->sd_map; 1461 d->sd_map = sc->sc_rxmap_spare; 1462 sc->sc_rxmap_spare = map; 1463 1464 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize, 1465 BUS_DMASYNC_PREREAD); 1466 1467 m->m_data += HME_RX_OFFSET; 1468 d->sd_mbuf = m; 1469 return (0); 1470 } 1471