1 /*- 2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca> 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 18 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 20 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 21 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 /* 29 * Allwinner Gigabit Ethernet MAC (EMAC) controller 30 */ 31 32 #include "opt_device_polling.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/rman.h> 41 #include <sys/kernel.h> 42 #include <sys/endian.h> 43 #include <sys/mbuf.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/module.h> 47 #include <sys/gpio.h> 48 49 #include <net/bpf.h> 50 #include <net/if.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_var.h> 56 57 #include <machine/bus.h> 58 59 #include <dev/ofw/ofw_bus.h> 60 #include <dev/ofw/ofw_bus_subr.h> 61 62 #include <arm/allwinner/if_awgreg.h> 63 #include <arm/allwinner/aw_sid.h> 64 #include <dev/mii/mii.h> 65 #include <dev/mii/miivar.h> 66 67 #include <dev/extres/clk/clk.h> 68 #include <dev/extres/hwreset/hwreset.h> 69 #include <dev/extres/regulator/regulator.h> 70 #include <dev/extres/syscon/syscon.h> 71 72 #include "syscon_if.h" 73 #include "miibus_if.h" 74 #include "gpio_if.h" 75 76 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) 77 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) 78 79 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) 80 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); 81 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 82 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 83 84 #define DESC_ALIGN 4 85 #define TX_DESC_COUNT 1024 86 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) 87 #define RX_DESC_COUNT 256 88 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) 89 90 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) 91 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 92 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 93 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 94 95 #define TX_MAX_SEGS 20 96 97 #define SOFT_RST_RETRY 1000 98 #define MII_BUSY_RETRY 1000 99 #define MDIO_FREQ 2500000 100 101 #define BURST_LEN_DEFAULT 8 102 #define RX_TX_PRI_DEFAULT 0 103 #define PAUSE_TIME_DEFAULT 0x400 104 #define TX_INTERVAL_DEFAULT 64 105 #define RX_BATCH_DEFAULT 64 106 107 /* syscon EMAC clock register */ 108 #define EMAC_CLK_REG 0x30 109 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 110 #define EMAC_CLK_EPHY_ADDR_SHIFT 20 111 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 112 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 113 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 114 #define EMAC_CLK_RMII_EN (1 << 13) 115 #define EMAC_CLK_ETXDC (0x7 << 10) 116 #define EMAC_CLK_ETXDC_SHIFT 10 117 #define EMAC_CLK_ERXDC (0x1f << 5) 118 #define EMAC_CLK_ERXDC_SHIFT 5 119 #define EMAC_CLK_PIT (0x1 << 2) 120 #define EMAC_CLK_PIT_MII (0 << 2) 121 #define EMAC_CLK_PIT_RGMII (1 << 2) 122 #define EMAC_CLK_SRC (0x3 << 0) 123 #define EMAC_CLK_SRC_MII (0 << 0) 124 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 125 #define EMAC_CLK_SRC_RGMII (2 << 0) 126 127 /* Burst length of RX and TX DMA transfers */ 128 static int awg_burst_len = BURST_LEN_DEFAULT; 129 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); 130 131 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 132 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; 133 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); 134 135 /* Pause time field in the transmitted control frame */ 136 static int awg_pause_time = PAUSE_TIME_DEFAULT; 137 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); 138 139 /* Request a TX interrupt every <n> descriptors */ 140 static int awg_tx_interval = TX_INTERVAL_DEFAULT; 141 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); 142 143 /* Maximum number of mbufs to send to if_input */ 144 static int awg_rx_batch = RX_BATCH_DEFAULT; 145 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); 146 147 enum awg_type { 148 EMAC_A83T = 1, 149 EMAC_H3, 150 EMAC_A64, 151 }; 152 153 static struct ofw_compat_data compat_data[] = { 154 { "allwinner,sun8i-a83t-emac", EMAC_A83T }, 155 { "allwinner,sun8i-h3-emac", EMAC_H3 }, 156 { "allwinner,sun50i-a64-emac", EMAC_A64 }, 157 { NULL, 0 } 158 }; 159 160 struct awg_bufmap { 161 bus_dmamap_t map; 162 struct mbuf *mbuf; 163 }; 164 165 struct awg_txring { 166 bus_dma_tag_t desc_tag; 167 bus_dmamap_t desc_map; 168 struct emac_desc *desc_ring; 169 bus_addr_t desc_ring_paddr; 170 bus_dma_tag_t buf_tag; 171 struct awg_bufmap buf_map[TX_DESC_COUNT]; 172 u_int cur, next, queued; 173 u_int segs; 174 }; 175 176 struct awg_rxring { 177 bus_dma_tag_t desc_tag; 178 bus_dmamap_t desc_map; 179 struct emac_desc *desc_ring; 180 bus_addr_t desc_ring_paddr; 181 bus_dma_tag_t buf_tag; 182 struct awg_bufmap buf_map[RX_DESC_COUNT]; 183 bus_dmamap_t buf_spare_map; 184 u_int cur; 185 }; 186 187 enum { 188 _RES_EMAC, 189 _RES_IRQ, 190 _RES_SYSCON, 191 _RES_NITEMS 192 }; 193 194 struct awg_softc { 195 struct resource *res[_RES_NITEMS]; 196 struct mtx mtx; 197 if_t ifp; 198 device_t dev; 199 device_t miibus; 200 struct callout stat_ch; 201 void *ih; 202 u_int mdc_div_ratio_m; 203 int link; 204 int if_flags; 205 enum awg_type type; 206 struct syscon *syscon; 207 208 struct awg_txring tx; 209 struct awg_rxring rx; 210 }; 211 212 static struct resource_spec awg_spec[] = { 213 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 214 { SYS_RES_IRQ, 0, RF_ACTIVE }, 215 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, 216 { -1, 0 } 217 }; 218 219 static void awg_txeof(struct awg_softc *sc); 220 221 static int awg_parse_delay(device_t dev, uint32_t *tx_delay, 222 uint32_t *rx_delay); 223 static uint32_t syscon_read_emac_clk_reg(device_t dev); 224 static void syscon_write_emac_clk_reg(device_t dev, uint32_t val); 225 static phandle_t awg_get_phy_node(device_t dev); 226 static bool awg_has_internal_phy(device_t dev); 227 228 static int 229 awg_miibus_readreg(device_t dev, int phy, int reg) 230 { 231 struct awg_softc *sc; 232 int retry, val; 233 234 sc = device_get_softc(dev); 235 val = 0; 236 237 WR4(sc, EMAC_MII_CMD, 238 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 239 (phy << PHY_ADDR_SHIFT) | 240 (reg << PHY_REG_ADDR_SHIFT) | 241 MII_BUSY); 242 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 243 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 244 val = RD4(sc, EMAC_MII_DATA); 245 break; 246 } 247 DELAY(10); 248 } 249 250 if (retry == 0) 251 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 252 phy, reg); 253 254 return (val); 255 } 256 257 static int 258 awg_miibus_writereg(device_t dev, int phy, int reg, int val) 259 { 260 struct awg_softc *sc; 261 int retry; 262 263 sc = device_get_softc(dev); 264 265 WR4(sc, EMAC_MII_DATA, val); 266 WR4(sc, EMAC_MII_CMD, 267 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 268 (phy << PHY_ADDR_SHIFT) | 269 (reg << PHY_REG_ADDR_SHIFT) | 270 MII_WR | MII_BUSY); 271 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 272 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 273 break; 274 DELAY(10); 275 } 276 277 if (retry == 0) 278 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 279 phy, reg); 280 281 return (0); 282 } 283 284 static void 285 awg_miibus_statchg(device_t dev) 286 { 287 struct awg_softc *sc; 288 struct mii_data *mii; 289 uint32_t val; 290 291 sc = device_get_softc(dev); 292 293 AWG_ASSERT_LOCKED(sc); 294 295 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 296 return; 297 mii = device_get_softc(sc->miibus); 298 299 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 300 (IFM_ACTIVE | IFM_AVALID)) { 301 switch (IFM_SUBTYPE(mii->mii_media_active)) { 302 case IFM_1000_T: 303 case IFM_1000_SX: 304 case IFM_100_TX: 305 case IFM_10_T: 306 sc->link = 1; 307 break; 308 default: 309 sc->link = 0; 310 break; 311 } 312 } else 313 sc->link = 0; 314 315 if (sc->link == 0) 316 return; 317 318 val = RD4(sc, EMAC_BASIC_CTL_0); 319 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 320 321 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 322 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 323 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 324 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 325 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 326 else 327 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 328 329 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 330 val |= BASIC_CTL_DUPLEX; 331 332 WR4(sc, EMAC_BASIC_CTL_0, val); 333 334 val = RD4(sc, EMAC_RX_CTL_0); 335 val &= ~RX_FLOW_CTL_EN; 336 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 337 val |= RX_FLOW_CTL_EN; 338 WR4(sc, EMAC_RX_CTL_0, val); 339 340 val = RD4(sc, EMAC_TX_FLOW_CTL); 341 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); 342 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 343 val |= TX_FLOW_CTL_EN; 344 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 345 val |= awg_pause_time << PAUSE_TIME_SHIFT; 346 WR4(sc, EMAC_TX_FLOW_CTL, val); 347 } 348 349 static void 350 awg_media_status(if_t ifp, struct ifmediareq *ifmr) 351 { 352 struct awg_softc *sc; 353 struct mii_data *mii; 354 355 sc = if_getsoftc(ifp); 356 mii = device_get_softc(sc->miibus); 357 358 AWG_LOCK(sc); 359 mii_pollstat(mii); 360 ifmr->ifm_active = mii->mii_media_active; 361 ifmr->ifm_status = mii->mii_media_status; 362 AWG_UNLOCK(sc); 363 } 364 365 static int 366 awg_media_change(if_t ifp) 367 { 368 struct awg_softc *sc; 369 struct mii_data *mii; 370 int error; 371 372 sc = if_getsoftc(ifp); 373 mii = device_get_softc(sc->miibus); 374 375 AWG_LOCK(sc); 376 error = mii_mediachg(mii); 377 AWG_UNLOCK(sc); 378 379 return (error); 380 } 381 382 static int 383 awg_encap(struct awg_softc *sc, struct mbuf **mp) 384 { 385 bus_dmamap_t map; 386 bus_dma_segment_t segs[TX_MAX_SEGS]; 387 int error, nsegs, cur, first, last, i; 388 u_int csum_flags; 389 uint32_t flags, status; 390 struct mbuf *m; 391 392 cur = first = sc->tx.cur; 393 map = sc->tx.buf_map[first].map; 394 395 m = *mp; 396 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, 397 &nsegs, BUS_DMA_NOWAIT); 398 if (error == EFBIG) { 399 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 400 if (m == NULL) { 401 device_printf(sc->dev, "awg_encap: m_collapse failed\n"); 402 m_freem(*mp); 403 *mp = NULL; 404 return (ENOMEM); 405 } 406 *mp = m; 407 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, 408 segs, &nsegs, BUS_DMA_NOWAIT); 409 if (error != 0) { 410 m_freem(*mp); 411 *mp = NULL; 412 } 413 } 414 if (error != 0) { 415 device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n"); 416 return (error); 417 } 418 if (nsegs == 0) { 419 m_freem(*mp); 420 *mp = NULL; 421 return (EIO); 422 } 423 424 if (sc->tx.queued + nsegs > TX_DESC_COUNT) { 425 bus_dmamap_unload(sc->tx.buf_tag, map); 426 return (ENOBUFS); 427 } 428 429 bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE); 430 431 flags = TX_FIR_DESC; 432 status = 0; 433 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 434 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) 435 csum_flags = TX_CHECKSUM_CTL_FULL; 436 else 437 csum_flags = TX_CHECKSUM_CTL_IP; 438 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 439 } 440 441 for (i = 0; i < nsegs; i++) { 442 sc->tx.segs++; 443 if (i == nsegs - 1) { 444 flags |= TX_LAST_DESC; 445 /* 446 * Can only request TX completion 447 * interrupt on last descriptor. 448 */ 449 if (sc->tx.segs >= awg_tx_interval) { 450 sc->tx.segs = 0; 451 flags |= TX_INT_CTL; 452 } 453 } 454 455 sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr); 456 sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len); 457 sc->tx.desc_ring[cur].status = htole32(status); 458 459 flags &= ~TX_FIR_DESC; 460 /* 461 * Setting of the valid bit in the first descriptor is 462 * deferred until the whole chain is fully set up. 463 */ 464 status = TX_DESC_CTL; 465 466 ++sc->tx.queued; 467 cur = TX_NEXT(cur); 468 } 469 470 sc->tx.cur = cur; 471 472 /* Store mapping and mbuf in the last segment */ 473 last = TX_SKIP(cur, TX_DESC_COUNT - 1); 474 sc->tx.buf_map[first].map = sc->tx.buf_map[last].map; 475 sc->tx.buf_map[last].map = map; 476 sc->tx.buf_map[last].mbuf = m; 477 478 /* 479 * The whole mbuf chain has been DMA mapped, 480 * fix the first descriptor. 481 */ 482 sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL); 483 484 return (0); 485 } 486 487 static void 488 awg_clean_txbuf(struct awg_softc *sc, int index) 489 { 490 struct awg_bufmap *bmap; 491 492 --sc->tx.queued; 493 494 bmap = &sc->tx.buf_map[index]; 495 if (bmap->mbuf != NULL) { 496 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 497 BUS_DMASYNC_POSTWRITE); 498 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 499 m_freem(bmap->mbuf); 500 bmap->mbuf = NULL; 501 } 502 } 503 504 static void 505 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) 506 { 507 uint32_t status, size; 508 509 status = RX_DESC_CTL; 510 size = MCLBYTES - 1; 511 512 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 513 sc->rx.desc_ring[index].size = htole32(size); 514 sc->rx.desc_ring[index].status = htole32(status); 515 } 516 517 static void 518 awg_reuse_rxdesc(struct awg_softc *sc, int index) 519 { 520 521 sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL); 522 } 523 524 static int 525 awg_newbuf_rx(struct awg_softc *sc, int index) 526 { 527 struct mbuf *m; 528 bus_dma_segment_t seg; 529 bus_dmamap_t map; 530 int nsegs; 531 532 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 533 if (m == NULL) 534 return (ENOBUFS); 535 536 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 537 m_adj(m, ETHER_ALIGN); 538 539 if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map, 540 m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) { 541 m_freem(m); 542 return (ENOBUFS); 543 } 544 545 if (sc->rx.buf_map[index].mbuf != NULL) { 546 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 547 BUS_DMASYNC_POSTREAD); 548 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 549 } 550 map = sc->rx.buf_map[index].map; 551 sc->rx.buf_map[index].map = sc->rx.buf_spare_map; 552 sc->rx.buf_spare_map = map; 553 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 554 BUS_DMASYNC_PREREAD); 555 556 sc->rx.buf_map[index].mbuf = m; 557 awg_setup_rxdesc(sc, index, seg.ds_addr); 558 559 return (0); 560 } 561 562 static void 563 awg_start_locked(struct awg_softc *sc) 564 { 565 struct mbuf *m; 566 uint32_t val; 567 if_t ifp; 568 int cnt, err; 569 570 AWG_ASSERT_LOCKED(sc); 571 572 if (!sc->link) 573 return; 574 575 ifp = sc->ifp; 576 577 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 578 IFF_DRV_RUNNING) 579 return; 580 581 for (cnt = 0; ; cnt++) { 582 m = if_dequeue(ifp); 583 if (m == NULL) 584 break; 585 586 err = awg_encap(sc, &m); 587 if (err != 0) { 588 if (err == ENOBUFS) 589 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 590 if (m != NULL) 591 if_sendq_prepend(ifp, m); 592 break; 593 } 594 if_bpfmtap(ifp, m); 595 } 596 597 if (cnt != 0) { 598 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 599 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 600 601 /* Start and run TX DMA */ 602 val = RD4(sc, EMAC_TX_CTL_1); 603 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 604 } 605 } 606 607 static void 608 awg_start(if_t ifp) 609 { 610 struct awg_softc *sc; 611 612 sc = if_getsoftc(ifp); 613 614 AWG_LOCK(sc); 615 awg_start_locked(sc); 616 AWG_UNLOCK(sc); 617 } 618 619 static void 620 awg_tick(void *softc) 621 { 622 struct awg_softc *sc; 623 struct mii_data *mii; 624 if_t ifp; 625 int link; 626 627 sc = softc; 628 ifp = sc->ifp; 629 mii = device_get_softc(sc->miibus); 630 631 AWG_ASSERT_LOCKED(sc); 632 633 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 634 return; 635 636 link = sc->link; 637 mii_tick(mii); 638 if (sc->link && !link) 639 awg_start_locked(sc); 640 641 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 642 } 643 644 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 645 static uint32_t 646 bitrev32(uint32_t x) 647 { 648 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 649 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 650 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 651 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 652 653 return (x >> 16) | (x << 16); 654 } 655 656 static u_int 657 awg_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 658 { 659 uint32_t crc, hashreg, hashbit, *hash = arg; 660 661 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7f; 662 crc = bitrev32(~crc) >> 26; 663 hashreg = (crc >> 5); 664 hashbit = (crc & 0x1f); 665 hash[hashreg] |= (1 << hashbit); 666 667 return (1); 668 } 669 670 static void 671 awg_setup_rxfilter(struct awg_softc *sc) 672 { 673 uint32_t val, hash[2], machi, maclo; 674 uint8_t *eaddr; 675 if_t ifp; 676 677 AWG_ASSERT_LOCKED(sc); 678 679 ifp = sc->ifp; 680 val = 0; 681 hash[0] = hash[1] = 0; 682 683 if (if_getflags(ifp) & IFF_PROMISC) 684 val |= DIS_ADDR_FILTER; 685 else if (if_getflags(ifp) & IFF_ALLMULTI) { 686 val |= RX_ALL_MULTICAST; 687 hash[0] = hash[1] = ~0; 688 } else if (if_foreach_llmaddr(ifp, awg_hash_maddr, hash) > 0) 689 val |= HASH_MULTICAST; 690 691 /* Write our unicast address */ 692 eaddr = IF_LLADDR(ifp); 693 machi = (eaddr[5] << 8) | eaddr[4]; 694 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 695 (eaddr[0] << 0); 696 WR4(sc, EMAC_ADDR_HIGH(0), machi); 697 WR4(sc, EMAC_ADDR_LOW(0), maclo); 698 699 /* Multicast hash filters */ 700 WR4(sc, EMAC_RX_HASH_0, hash[1]); 701 WR4(sc, EMAC_RX_HASH_1, hash[0]); 702 703 /* RX frame filter config */ 704 WR4(sc, EMAC_RX_FRM_FLT, val); 705 } 706 707 static void 708 awg_setup_core(struct awg_softc *sc) 709 { 710 uint32_t val; 711 712 AWG_ASSERT_LOCKED(sc); 713 /* Configure DMA burst length and priorities */ 714 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 715 if (awg_rx_tx_pri) 716 val |= BASIC_CTL_RX_TX_PRI; 717 WR4(sc, EMAC_BASIC_CTL_1, val); 718 719 } 720 721 static void 722 awg_enable_mac(struct awg_softc *sc, bool enable) 723 { 724 uint32_t tx, rx; 725 726 AWG_ASSERT_LOCKED(sc); 727 728 tx = RD4(sc, EMAC_TX_CTL_0); 729 rx = RD4(sc, EMAC_RX_CTL_0); 730 if (enable) { 731 tx |= TX_EN; 732 rx |= RX_EN | CHECK_CRC; 733 } else { 734 tx &= ~TX_EN; 735 rx &= ~(RX_EN | CHECK_CRC); 736 } 737 738 WR4(sc, EMAC_TX_CTL_0, tx); 739 WR4(sc, EMAC_RX_CTL_0, rx); 740 } 741 742 743 static void 744 awg_enable_dma_intr(struct awg_softc *sc) 745 { 746 /* Enable interrupts */ 747 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 748 } 749 750 static void 751 awg_disable_dma_intr(struct awg_softc *sc) 752 { 753 /* Disable interrupts */ 754 WR4(sc, EMAC_INT_EN, 0); 755 } 756 757 static void 758 awg_init_dma(struct awg_softc *sc) 759 { 760 uint32_t val; 761 762 AWG_ASSERT_LOCKED(sc); 763 764 /* Enable interrupts */ 765 #ifdef DEVICE_POLLING 766 if ((if_getcapenable(sc->ifp) & IFCAP_POLLING) == 0) 767 awg_enable_dma_intr(sc); 768 else 769 awg_disable_dma_intr(sc); 770 #else 771 awg_enable_dma_intr(sc); 772 #endif 773 774 /* Enable transmit DMA */ 775 val = RD4(sc, EMAC_TX_CTL_1); 776 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 777 778 /* Enable receive DMA */ 779 val = RD4(sc, EMAC_RX_CTL_1); 780 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 781 } 782 783 static void 784 awg_stop_dma(struct awg_softc *sc) 785 { 786 uint32_t val; 787 788 AWG_ASSERT_LOCKED(sc); 789 790 /* Stop transmit DMA and flush data in the TX FIFO */ 791 val = RD4(sc, EMAC_TX_CTL_1); 792 val &= ~TX_DMA_EN; 793 val |= FLUSH_TX_FIFO; 794 WR4(sc, EMAC_TX_CTL_1, val); 795 796 /* Disable interrupts */ 797 awg_disable_dma_intr(sc); 798 799 /* Disable transmit DMA */ 800 val = RD4(sc, EMAC_TX_CTL_1); 801 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 802 803 /* Disable receive DMA */ 804 val = RD4(sc, EMAC_RX_CTL_1); 805 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 806 } 807 808 static void 809 awg_init_locked(struct awg_softc *sc) 810 { 811 struct mii_data *mii; 812 if_t ifp; 813 814 mii = device_get_softc(sc->miibus); 815 ifp = sc->ifp; 816 817 AWG_ASSERT_LOCKED(sc); 818 819 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 820 return; 821 822 awg_setup_rxfilter(sc); 823 awg_setup_core(sc); 824 awg_enable_mac(sc, true); 825 awg_init_dma(sc); 826 827 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 828 829 mii_mediachg(mii); 830 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 831 } 832 833 static void 834 awg_init(void *softc) 835 { 836 struct awg_softc *sc; 837 838 sc = softc; 839 840 AWG_LOCK(sc); 841 awg_init_locked(sc); 842 AWG_UNLOCK(sc); 843 } 844 845 static void 846 awg_stop(struct awg_softc *sc) 847 { 848 if_t ifp; 849 uint32_t val; 850 int i; 851 852 AWG_ASSERT_LOCKED(sc); 853 854 ifp = sc->ifp; 855 856 callout_stop(&sc->stat_ch); 857 858 awg_stop_dma(sc); 859 awg_enable_mac(sc, false); 860 861 sc->link = 0; 862 863 /* Finish handling transmitted buffers */ 864 awg_txeof(sc); 865 866 /* Release any untransmitted buffers. */ 867 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 868 val = le32toh(sc->tx.desc_ring[i].status); 869 if ((val & TX_DESC_CTL) != 0) 870 break; 871 awg_clean_txbuf(sc, i); 872 } 873 sc->tx.next = i; 874 for (; sc->tx.queued > 0; i = TX_NEXT(i)) { 875 sc->tx.desc_ring[i].status = 0; 876 awg_clean_txbuf(sc, i); 877 } 878 sc->tx.cur = sc->tx.next; 879 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 880 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 881 882 /* Setup RX buffers for reuse */ 883 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 884 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 885 886 for (i = sc->rx.cur; ; i = RX_NEXT(i)) { 887 val = le32toh(sc->rx.desc_ring[i].status); 888 if ((val & RX_DESC_CTL) != 0) 889 break; 890 awg_reuse_rxdesc(sc, i); 891 } 892 sc->rx.cur = i; 893 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 894 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 895 896 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 897 } 898 899 static int 900 awg_rxintr(struct awg_softc *sc) 901 { 902 if_t ifp; 903 struct mbuf *m, *mh, *mt; 904 int error, index, len, cnt, npkt; 905 uint32_t status; 906 907 ifp = sc->ifp; 908 mh = mt = NULL; 909 cnt = 0; 910 npkt = 0; 911 912 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 913 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 914 915 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 916 status = le32toh(sc->rx.desc_ring[index].status); 917 if ((status & RX_DESC_CTL) != 0) 918 break; 919 920 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 921 922 if (len == 0) { 923 if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0) 924 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 925 awg_reuse_rxdesc(sc, index); 926 continue; 927 } 928 929 m = sc->rx.buf_map[index].mbuf; 930 931 error = awg_newbuf_rx(sc, index); 932 if (error != 0) { 933 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 934 awg_reuse_rxdesc(sc, index); 935 continue; 936 } 937 938 m->m_pkthdr.rcvif = ifp; 939 m->m_pkthdr.len = len; 940 m->m_len = len; 941 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 942 943 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 944 (status & RX_FRM_TYPE) != 0) { 945 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 946 if ((status & RX_HEADER_ERR) == 0) 947 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 948 if ((status & RX_PAYLOAD_ERR) == 0) { 949 m->m_pkthdr.csum_flags |= 950 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 951 m->m_pkthdr.csum_data = 0xffff; 952 } 953 } 954 955 m->m_nextpkt = NULL; 956 if (mh == NULL) 957 mh = m; 958 else 959 mt->m_nextpkt = m; 960 mt = m; 961 ++cnt; 962 ++npkt; 963 964 if (cnt == awg_rx_batch) { 965 AWG_UNLOCK(sc); 966 if_input(ifp, mh); 967 AWG_LOCK(sc); 968 mh = mt = NULL; 969 cnt = 0; 970 } 971 } 972 973 if (index != sc->rx.cur) { 974 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 975 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 976 } 977 978 if (mh != NULL) { 979 AWG_UNLOCK(sc); 980 if_input(ifp, mh); 981 AWG_LOCK(sc); 982 } 983 984 sc->rx.cur = index; 985 986 return (npkt); 987 } 988 989 static void 990 awg_txeof(struct awg_softc *sc) 991 { 992 struct emac_desc *desc; 993 uint32_t status, size; 994 if_t ifp; 995 int i, prog; 996 997 AWG_ASSERT_LOCKED(sc); 998 999 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 1000 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1001 1002 ifp = sc->ifp; 1003 1004 prog = 0; 1005 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 1006 desc = &sc->tx.desc_ring[i]; 1007 status = le32toh(desc->status); 1008 if ((status & TX_DESC_CTL) != 0) 1009 break; 1010 size = le32toh(desc->size); 1011 if (size & TX_LAST_DESC) { 1012 if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0) 1013 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1014 else 1015 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1016 } 1017 prog++; 1018 awg_clean_txbuf(sc, i); 1019 } 1020 1021 if (prog > 0) { 1022 sc->tx.next = i; 1023 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1024 } 1025 } 1026 1027 static void 1028 awg_intr(void *arg) 1029 { 1030 struct awg_softc *sc; 1031 uint32_t val; 1032 1033 sc = arg; 1034 1035 AWG_LOCK(sc); 1036 val = RD4(sc, EMAC_INT_STA); 1037 WR4(sc, EMAC_INT_STA, val); 1038 1039 if (val & RX_INT) 1040 awg_rxintr(sc); 1041 1042 if (val & TX_INT) 1043 awg_txeof(sc); 1044 1045 if (val & (TX_INT | TX_BUF_UA_INT)) { 1046 if (!if_sendq_empty(sc->ifp)) 1047 awg_start_locked(sc); 1048 } 1049 1050 AWG_UNLOCK(sc); 1051 } 1052 1053 #ifdef DEVICE_POLLING 1054 static int 1055 awg_poll(if_t ifp, enum poll_cmd cmd, int count) 1056 { 1057 struct awg_softc *sc; 1058 uint32_t val; 1059 int rx_npkts; 1060 1061 sc = if_getsoftc(ifp); 1062 rx_npkts = 0; 1063 1064 AWG_LOCK(sc); 1065 1066 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 1067 AWG_UNLOCK(sc); 1068 return (0); 1069 } 1070 1071 rx_npkts = awg_rxintr(sc); 1072 awg_txeof(sc); 1073 if (!if_sendq_empty(ifp)) 1074 awg_start_locked(sc); 1075 1076 if (cmd == POLL_AND_CHECK_STATUS) { 1077 val = RD4(sc, EMAC_INT_STA); 1078 if (val != 0) 1079 WR4(sc, EMAC_INT_STA, val); 1080 } 1081 1082 AWG_UNLOCK(sc); 1083 1084 return (rx_npkts); 1085 } 1086 #endif 1087 1088 static int 1089 awg_ioctl(if_t ifp, u_long cmd, caddr_t data) 1090 { 1091 struct awg_softc *sc; 1092 struct mii_data *mii; 1093 struct ifreq *ifr; 1094 int flags, mask, error; 1095 1096 sc = if_getsoftc(ifp); 1097 mii = device_get_softc(sc->miibus); 1098 ifr = (struct ifreq *)data; 1099 error = 0; 1100 1101 switch (cmd) { 1102 case SIOCSIFFLAGS: 1103 AWG_LOCK(sc); 1104 if (if_getflags(ifp) & IFF_UP) { 1105 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1106 flags = if_getflags(ifp) ^ sc->if_flags; 1107 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1108 awg_setup_rxfilter(sc); 1109 } else 1110 awg_init_locked(sc); 1111 } else { 1112 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1113 awg_stop(sc); 1114 } 1115 sc->if_flags = if_getflags(ifp); 1116 AWG_UNLOCK(sc); 1117 break; 1118 case SIOCADDMULTI: 1119 case SIOCDELMULTI: 1120 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1121 AWG_LOCK(sc); 1122 awg_setup_rxfilter(sc); 1123 AWG_UNLOCK(sc); 1124 } 1125 break; 1126 case SIOCSIFMEDIA: 1127 case SIOCGIFMEDIA: 1128 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1129 break; 1130 case SIOCSIFCAP: 1131 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1132 #ifdef DEVICE_POLLING 1133 if (mask & IFCAP_POLLING) { 1134 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1135 error = ether_poll_register(awg_poll, ifp); 1136 if (error != 0) 1137 break; 1138 AWG_LOCK(sc); 1139 awg_disable_dma_intr(sc); 1140 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1141 AWG_UNLOCK(sc); 1142 } else { 1143 error = ether_poll_deregister(ifp); 1144 AWG_LOCK(sc); 1145 awg_enable_dma_intr(sc); 1146 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1147 AWG_UNLOCK(sc); 1148 } 1149 } 1150 #endif 1151 if (mask & IFCAP_VLAN_MTU) 1152 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1153 if (mask & IFCAP_RXCSUM) 1154 if_togglecapenable(ifp, IFCAP_RXCSUM); 1155 if (mask & IFCAP_TXCSUM) 1156 if_togglecapenable(ifp, IFCAP_TXCSUM); 1157 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1158 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1159 else 1160 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1161 break; 1162 default: 1163 error = ether_ioctl(ifp, cmd, data); 1164 break; 1165 } 1166 1167 return (error); 1168 } 1169 1170 static uint32_t 1171 syscon_read_emac_clk_reg(device_t dev) 1172 { 1173 struct awg_softc *sc; 1174 1175 sc = device_get_softc(dev); 1176 if (sc->syscon != NULL) 1177 return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG)); 1178 else if (sc->res[_RES_SYSCON] != NULL) 1179 return (bus_read_4(sc->res[_RES_SYSCON], 0)); 1180 1181 return (0); 1182 } 1183 1184 static void 1185 syscon_write_emac_clk_reg(device_t dev, uint32_t val) 1186 { 1187 struct awg_softc *sc; 1188 1189 sc = device_get_softc(dev); 1190 if (sc->syscon != NULL) 1191 SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val); 1192 else if (sc->res[_RES_SYSCON] != NULL) 1193 bus_write_4(sc->res[_RES_SYSCON], 0, val); 1194 } 1195 1196 static phandle_t 1197 awg_get_phy_node(device_t dev) 1198 { 1199 phandle_t node; 1200 pcell_t phy_handle; 1201 1202 node = ofw_bus_get_node(dev); 1203 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, 1204 sizeof(phy_handle)) <= 0) 1205 return (0); 1206 1207 return (OF_node_from_xref(phy_handle)); 1208 } 1209 1210 static bool 1211 awg_has_internal_phy(device_t dev) 1212 { 1213 phandle_t node, phy_node; 1214 1215 node = ofw_bus_get_node(dev); 1216 /* Legacy binding */ 1217 if (OF_hasprop(node, "allwinner,use-internal-phy")) 1218 return (true); 1219 1220 phy_node = awg_get_phy_node(dev); 1221 return (phy_node != 0 && ofw_bus_node_is_compatible(OF_parent(phy_node), 1222 "allwinner,sun8i-h3-mdio-internal") != 0); 1223 } 1224 1225 static int 1226 awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay) 1227 { 1228 phandle_t node; 1229 uint32_t delay; 1230 1231 if (tx_delay == NULL || rx_delay == NULL) 1232 return (EINVAL); 1233 *tx_delay = *rx_delay = 0; 1234 node = ofw_bus_get_node(dev); 1235 1236 if (OF_getencprop(node, "tx-delay", &delay, sizeof(delay)) >= 0) 1237 *tx_delay = delay; 1238 else if (OF_getencprop(node, "allwinner,tx-delay-ps", &delay, 1239 sizeof(delay)) >= 0) { 1240 if ((delay % 100) != 0) { 1241 device_printf(dev, "tx-delay-ps is not a multiple of 100\n"); 1242 return (EDOM); 1243 } 1244 *tx_delay = delay / 100; 1245 } 1246 if (*tx_delay > 7) { 1247 device_printf(dev, "tx-delay out of range\n"); 1248 return (ERANGE); 1249 } 1250 1251 if (OF_getencprop(node, "rx-delay", &delay, sizeof(delay)) >= 0) 1252 *rx_delay = delay; 1253 else if (OF_getencprop(node, "allwinner,rx-delay-ps", &delay, 1254 sizeof(delay)) >= 0) { 1255 if ((delay % 100) != 0) { 1256 device_printf(dev, "rx-delay-ps is not within documented domain\n"); 1257 return (EDOM); 1258 } 1259 *rx_delay = delay / 100; 1260 } 1261 if (*rx_delay > 31) { 1262 device_printf(dev, "rx-delay out of range\n"); 1263 return (ERANGE); 1264 } 1265 1266 return (0); 1267 } 1268 1269 static int 1270 awg_setup_phy(device_t dev) 1271 { 1272 struct awg_softc *sc; 1273 clk_t clk_tx, clk_tx_parent; 1274 const char *tx_parent_name; 1275 char *phy_type; 1276 phandle_t node; 1277 uint32_t reg, tx_delay, rx_delay; 1278 int error; 1279 bool use_syscon; 1280 1281 sc = device_get_softc(dev); 1282 node = ofw_bus_get_node(dev); 1283 use_syscon = false; 1284 1285 if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_type) == 0) 1286 return (0); 1287 1288 if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL) 1289 use_syscon = true; 1290 1291 if (bootverbose) 1292 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, 1293 use_syscon ? "reg" : "clk"); 1294 1295 if (use_syscon) { 1296 /* 1297 * Abstract away writing to syscon for devices like the pine64. 1298 * For the pine64, we get dtb from U-Boot and it still uses the 1299 * legacy setup of specifying syscon register in emac node 1300 * rather than as its own node and using an xref in emac. 1301 * These abstractions can go away once U-Boot dts is up-to-date. 1302 */ 1303 reg = syscon_read_emac_clk_reg(dev); 1304 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 1305 if (strncmp(phy_type, "rgmii", 5) == 0) 1306 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 1307 else if (strcmp(phy_type, "rmii") == 0) 1308 reg |= EMAC_CLK_RMII_EN; 1309 else 1310 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1311 1312 /* 1313 * Fail attach if we fail to parse either of the delay 1314 * parameters. If we don't have the proper delay to write to 1315 * syscon, then awg likely won't function properly anyways. 1316 * Lack of delay is not an error! 1317 */ 1318 error = awg_parse_delay(dev, &tx_delay, &rx_delay); 1319 if (error != 0) 1320 goto fail; 1321 1322 /* Default to 0 and we'll increase it if we need to. */ 1323 reg &= ~(EMAC_CLK_ETXDC | EMAC_CLK_ERXDC); 1324 if (tx_delay > 0) 1325 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1326 if (rx_delay > 0) 1327 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1328 1329 if (sc->type == EMAC_H3) { 1330 if (awg_has_internal_phy(dev)) { 1331 reg |= EMAC_CLK_EPHY_SELECT; 1332 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1333 if (OF_hasprop(node, 1334 "allwinner,leds-active-low")) 1335 reg |= EMAC_CLK_EPHY_LED_POL; 1336 else 1337 reg &= ~EMAC_CLK_EPHY_LED_POL; 1338 1339 /* Set internal PHY addr to 1 */ 1340 reg &= ~EMAC_CLK_EPHY_ADDR; 1341 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1342 } else { 1343 reg &= ~EMAC_CLK_EPHY_SELECT; 1344 } 1345 } 1346 1347 if (bootverbose) 1348 device_printf(dev, "EMAC clock: 0x%08x\n", reg); 1349 syscon_write_emac_clk_reg(dev, reg); 1350 } else { 1351 if (strncmp(phy_type, "rgmii", 5) == 0) 1352 tx_parent_name = "emac_int_tx"; 1353 else 1354 tx_parent_name = "mii_phy_tx"; 1355 1356 /* Get the TX clock */ 1357 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); 1358 if (error != 0) { 1359 device_printf(dev, "cannot get tx clock\n"); 1360 goto fail; 1361 } 1362 1363 /* Find the desired parent clock based on phy-mode property */ 1364 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); 1365 if (error != 0) { 1366 device_printf(dev, "cannot get clock '%s'\n", 1367 tx_parent_name); 1368 goto fail; 1369 } 1370 1371 /* Set TX clock parent */ 1372 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); 1373 if (error != 0) { 1374 device_printf(dev, "cannot set tx clock parent\n"); 1375 goto fail; 1376 } 1377 1378 /* Enable TX clock */ 1379 error = clk_enable(clk_tx); 1380 if (error != 0) { 1381 device_printf(dev, "cannot enable tx clock\n"); 1382 goto fail; 1383 } 1384 } 1385 1386 error = 0; 1387 1388 fail: 1389 OF_prop_free(phy_type); 1390 return (error); 1391 } 1392 1393 static int 1394 awg_setup_extres(device_t dev) 1395 { 1396 struct awg_softc *sc; 1397 phandle_t node, phy_node; 1398 hwreset_t rst_ahb, rst_ephy; 1399 clk_t clk_ahb, clk_ephy; 1400 regulator_t reg; 1401 uint64_t freq; 1402 int error, div; 1403 1404 sc = device_get_softc(dev); 1405 rst_ahb = rst_ephy = NULL; 1406 clk_ahb = clk_ephy = NULL; 1407 reg = NULL; 1408 node = ofw_bus_get_node(dev); 1409 phy_node = awg_get_phy_node(dev); 1410 1411 if (phy_node == 0 && OF_hasprop(node, "phy-handle")) { 1412 error = ENXIO; 1413 device_printf(dev, "cannot get phy handle\n"); 1414 goto fail; 1415 } 1416 1417 /* Get AHB clock and reset resources */ 1418 error = hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_ahb); 1419 if (error != 0) 1420 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); 1421 if (error != 0) { 1422 device_printf(dev, "cannot get ahb reset\n"); 1423 goto fail; 1424 } 1425 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) 1426 if (phy_node == 0 || hwreset_get_by_ofw_idx(dev, phy_node, 0, 1427 &rst_ephy) != 0) 1428 rst_ephy = NULL; 1429 error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk_ahb); 1430 if (error != 0) 1431 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); 1432 if (error != 0) { 1433 device_printf(dev, "cannot get ahb clock\n"); 1434 goto fail; 1435 } 1436 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) 1437 if (phy_node == 0 || clk_get_by_ofw_index(dev, phy_node, 0, 1438 &clk_ephy) != 0) 1439 clk_ephy = NULL; 1440 1441 if (OF_hasprop(node, "syscon") && syscon_get_by_ofw_property(dev, node, 1442 "syscon", &sc->syscon) != 0) { 1443 device_printf(dev, "cannot get syscon driver handle\n"); 1444 goto fail; 1445 } 1446 1447 /* Configure PHY for MII or RGMII mode */ 1448 if (awg_setup_phy(dev) != 0) 1449 goto fail; 1450 1451 /* Enable clocks */ 1452 error = clk_enable(clk_ahb); 1453 if (error != 0) { 1454 device_printf(dev, "cannot enable ahb clock\n"); 1455 goto fail; 1456 } 1457 if (clk_ephy != NULL) { 1458 error = clk_enable(clk_ephy); 1459 if (error != 0) { 1460 device_printf(dev, "cannot enable ephy clock\n"); 1461 goto fail; 1462 } 1463 } 1464 1465 /* De-assert reset */ 1466 error = hwreset_deassert(rst_ahb); 1467 if (error != 0) { 1468 device_printf(dev, "cannot de-assert ahb reset\n"); 1469 goto fail; 1470 } 1471 if (rst_ephy != NULL) { 1472 /* 1473 * The ephy reset is left de-asserted by U-Boot. Assert it 1474 * here to make sure that we're in a known good state going 1475 * into the PHY reset. 1476 */ 1477 hwreset_assert(rst_ephy); 1478 error = hwreset_deassert(rst_ephy); 1479 if (error != 0) { 1480 device_printf(dev, "cannot de-assert ephy reset\n"); 1481 goto fail; 1482 } 1483 } 1484 1485 /* Enable PHY regulator if applicable */ 1486 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { 1487 error = regulator_enable(reg); 1488 if (error != 0) { 1489 device_printf(dev, "cannot enable PHY regulator\n"); 1490 goto fail; 1491 } 1492 } 1493 1494 /* Determine MDC clock divide ratio based on AHB clock */ 1495 error = clk_get_freq(clk_ahb, &freq); 1496 if (error != 0) { 1497 device_printf(dev, "cannot get AHB clock frequency\n"); 1498 goto fail; 1499 } 1500 div = freq / MDIO_FREQ; 1501 if (div <= 16) 1502 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1503 else if (div <= 32) 1504 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1505 else if (div <= 64) 1506 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1507 else if (div <= 128) 1508 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1509 else { 1510 device_printf(dev, "cannot determine MDC clock divide ratio\n"); 1511 error = ENXIO; 1512 goto fail; 1513 } 1514 1515 if (bootverbose) 1516 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", 1517 (uintmax_t)freq, sc->mdc_div_ratio_m); 1518 1519 return (0); 1520 1521 fail: 1522 if (reg != NULL) 1523 regulator_release(reg); 1524 if (clk_ephy != NULL) 1525 clk_release(clk_ephy); 1526 if (clk_ahb != NULL) 1527 clk_release(clk_ahb); 1528 if (rst_ephy != NULL) 1529 hwreset_release(rst_ephy); 1530 if (rst_ahb != NULL) 1531 hwreset_release(rst_ahb); 1532 return (error); 1533 } 1534 1535 static void 1536 awg_get_eaddr(device_t dev, uint8_t *eaddr) 1537 { 1538 struct awg_softc *sc; 1539 uint32_t maclo, machi, rnd; 1540 u_char rootkey[16]; 1541 uint32_t rootkey_size; 1542 1543 sc = device_get_softc(dev); 1544 1545 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1546 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1547 1548 rootkey_size = sizeof(rootkey); 1549 if (maclo == 0xffffffff && machi == 0xffff) { 1550 /* MAC address in hardware is invalid, create one */ 1551 if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey, 1552 &rootkey_size) == 0 && 1553 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | 1554 rootkey[15]) != 0) { 1555 /* MAC address is derived from the root key in SID */ 1556 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | 1557 (rootkey[3] << 8) | 0x02; 1558 machi = (rootkey[15] << 8) | rootkey[14]; 1559 } else { 1560 /* Create one */ 1561 rnd = arc4random(); 1562 maclo = 0x00f2 | (rnd & 0xffff0000); 1563 machi = rnd & 0xffff; 1564 } 1565 } 1566 1567 eaddr[0] = maclo & 0xff; 1568 eaddr[1] = (maclo >> 8) & 0xff; 1569 eaddr[2] = (maclo >> 16) & 0xff; 1570 eaddr[3] = (maclo >> 24) & 0xff; 1571 eaddr[4] = machi & 0xff; 1572 eaddr[5] = (machi >> 8) & 0xff; 1573 } 1574 1575 #ifdef AWG_DEBUG 1576 static void 1577 awg_dump_regs(device_t dev) 1578 { 1579 static const struct { 1580 const char *name; 1581 u_int reg; 1582 } regs[] = { 1583 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 1584 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 1585 { "INT_STA", EMAC_INT_STA }, 1586 { "INT_EN", EMAC_INT_EN }, 1587 { "TX_CTL_0", EMAC_TX_CTL_0 }, 1588 { "TX_CTL_1", EMAC_TX_CTL_1 }, 1589 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 1590 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 1591 { "RX_CTL_0", EMAC_RX_CTL_0 }, 1592 { "RX_CTL_1", EMAC_RX_CTL_1 }, 1593 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 1594 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 1595 { "RX_HASH_0", EMAC_RX_HASH_0 }, 1596 { "RX_HASH_1", EMAC_RX_HASH_1 }, 1597 { "MII_CMD", EMAC_MII_CMD }, 1598 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 1599 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 1600 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 1601 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 1602 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 1603 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 1604 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 1605 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 1606 { "RGMII_STA", EMAC_RGMII_STA }, 1607 }; 1608 struct awg_softc *sc; 1609 unsigned int n; 1610 1611 sc = device_get_softc(dev); 1612 1613 for (n = 0; n < nitems(regs); n++) 1614 device_printf(dev, " %-20s %08x\n", regs[n].name, 1615 RD4(sc, regs[n].reg)); 1616 } 1617 #endif 1618 1619 #define GPIO_ACTIVE_LOW 1 1620 1621 static int 1622 awg_phy_reset(device_t dev) 1623 { 1624 pcell_t gpio_prop[4], delay_prop[3]; 1625 phandle_t node, gpio_node; 1626 device_t gpio; 1627 uint32_t pin, flags; 1628 uint32_t pin_value; 1629 1630 node = ofw_bus_get_node(dev); 1631 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, 1632 sizeof(gpio_prop)) <= 0) 1633 return (0); 1634 1635 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, 1636 sizeof(delay_prop)) <= 0) 1637 return (ENXIO); 1638 1639 gpio_node = OF_node_from_xref(gpio_prop[0]); 1640 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) 1641 return (ENXIO); 1642 1643 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, 1644 gpio_prop + 1, &pin, &flags) != 0) 1645 return (ENXIO); 1646 1647 pin_value = GPIO_PIN_LOW; 1648 if (OF_hasprop(node, "allwinner,reset-active-low")) 1649 pin_value = GPIO_PIN_HIGH; 1650 1651 if (flags & GPIO_ACTIVE_LOW) 1652 pin_value = !pin_value; 1653 1654 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1655 GPIO_PIN_SET(gpio, pin, pin_value); 1656 DELAY(delay_prop[0]); 1657 GPIO_PIN_SET(gpio, pin, !pin_value); 1658 DELAY(delay_prop[1]); 1659 GPIO_PIN_SET(gpio, pin, pin_value); 1660 DELAY(delay_prop[2]); 1661 1662 return (0); 1663 } 1664 1665 static int 1666 awg_reset(device_t dev) 1667 { 1668 struct awg_softc *sc; 1669 int retry; 1670 1671 sc = device_get_softc(dev); 1672 1673 /* Reset PHY if necessary */ 1674 if (awg_phy_reset(dev) != 0) { 1675 device_printf(dev, "failed to reset PHY\n"); 1676 return (ENXIO); 1677 } 1678 1679 /* Soft reset all registers and logic */ 1680 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 1681 1682 /* Wait for soft reset bit to self-clear */ 1683 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 1684 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 1685 break; 1686 DELAY(10); 1687 } 1688 if (retry == 0) { 1689 device_printf(dev, "soft reset timed out\n"); 1690 #ifdef AWG_DEBUG 1691 awg_dump_regs(dev); 1692 #endif 1693 return (ETIMEDOUT); 1694 } 1695 1696 return (0); 1697 } 1698 1699 static void 1700 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1701 { 1702 if (error != 0) 1703 return; 1704 *(bus_addr_t *)arg = segs[0].ds_addr; 1705 } 1706 1707 static int 1708 awg_setup_dma(device_t dev) 1709 { 1710 struct awg_softc *sc; 1711 int error, i; 1712 1713 sc = device_get_softc(dev); 1714 1715 /* Setup TX ring */ 1716 error = bus_dma_tag_create( 1717 bus_get_dma_tag(dev), /* Parent tag */ 1718 DESC_ALIGN, 0, /* alignment, boundary */ 1719 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1720 BUS_SPACE_MAXADDR, /* highaddr */ 1721 NULL, NULL, /* filter, filterarg */ 1722 TX_DESC_SIZE, 1, /* maxsize, nsegs */ 1723 TX_DESC_SIZE, /* maxsegsize */ 1724 0, /* flags */ 1725 NULL, NULL, /* lockfunc, lockarg */ 1726 &sc->tx.desc_tag); 1727 if (error != 0) { 1728 device_printf(dev, "cannot create TX descriptor ring tag\n"); 1729 return (error); 1730 } 1731 1732 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, 1733 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); 1734 if (error != 0) { 1735 device_printf(dev, "cannot allocate TX descriptor ring\n"); 1736 return (error); 1737 } 1738 1739 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, 1740 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, 1741 &sc->tx.desc_ring_paddr, 0); 1742 if (error != 0) { 1743 device_printf(dev, "cannot load TX descriptor ring\n"); 1744 return (error); 1745 } 1746 1747 for (i = 0; i < TX_DESC_COUNT; i++) 1748 sc->tx.desc_ring[i].next = 1749 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1750 1751 error = bus_dma_tag_create( 1752 bus_get_dma_tag(dev), /* Parent tag */ 1753 1, 0, /* alignment, boundary */ 1754 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1755 BUS_SPACE_MAXADDR, /* highaddr */ 1756 NULL, NULL, /* filter, filterarg */ 1757 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 1758 MCLBYTES, /* maxsegsize */ 1759 0, /* flags */ 1760 NULL, NULL, /* lockfunc, lockarg */ 1761 &sc->tx.buf_tag); 1762 if (error != 0) { 1763 device_printf(dev, "cannot create TX buffer tag\n"); 1764 return (error); 1765 } 1766 1767 sc->tx.queued = 0; 1768 for (i = 0; i < TX_DESC_COUNT; i++) { 1769 error = bus_dmamap_create(sc->tx.buf_tag, 0, 1770 &sc->tx.buf_map[i].map); 1771 if (error != 0) { 1772 device_printf(dev, "cannot create TX buffer map\n"); 1773 return (error); 1774 } 1775 } 1776 1777 /* Setup RX ring */ 1778 error = bus_dma_tag_create( 1779 bus_get_dma_tag(dev), /* Parent tag */ 1780 DESC_ALIGN, 0, /* alignment, boundary */ 1781 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1782 BUS_SPACE_MAXADDR, /* highaddr */ 1783 NULL, NULL, /* filter, filterarg */ 1784 RX_DESC_SIZE, 1, /* maxsize, nsegs */ 1785 RX_DESC_SIZE, /* maxsegsize */ 1786 0, /* flags */ 1787 NULL, NULL, /* lockfunc, lockarg */ 1788 &sc->rx.desc_tag); 1789 if (error != 0) { 1790 device_printf(dev, "cannot create RX descriptor ring tag\n"); 1791 return (error); 1792 } 1793 1794 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, 1795 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); 1796 if (error != 0) { 1797 device_printf(dev, "cannot allocate RX descriptor ring\n"); 1798 return (error); 1799 } 1800 1801 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, 1802 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, 1803 &sc->rx.desc_ring_paddr, 0); 1804 if (error != 0) { 1805 device_printf(dev, "cannot load RX descriptor ring\n"); 1806 return (error); 1807 } 1808 1809 error = bus_dma_tag_create( 1810 bus_get_dma_tag(dev), /* Parent tag */ 1811 1, 0, /* alignment, boundary */ 1812 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1813 BUS_SPACE_MAXADDR, /* highaddr */ 1814 NULL, NULL, /* filter, filterarg */ 1815 MCLBYTES, 1, /* maxsize, nsegs */ 1816 MCLBYTES, /* maxsegsize */ 1817 0, /* flags */ 1818 NULL, NULL, /* lockfunc, lockarg */ 1819 &sc->rx.buf_tag); 1820 if (error != 0) { 1821 device_printf(dev, "cannot create RX buffer tag\n"); 1822 return (error); 1823 } 1824 1825 error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map); 1826 if (error != 0) { 1827 device_printf(dev, 1828 "cannot create RX buffer spare map\n"); 1829 return (error); 1830 } 1831 1832 for (i = 0; i < RX_DESC_COUNT; i++) { 1833 sc->rx.desc_ring[i].next = 1834 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i))); 1835 1836 error = bus_dmamap_create(sc->rx.buf_tag, 0, 1837 &sc->rx.buf_map[i].map); 1838 if (error != 0) { 1839 device_printf(dev, "cannot create RX buffer map\n"); 1840 return (error); 1841 } 1842 sc->rx.buf_map[i].mbuf = NULL; 1843 error = awg_newbuf_rx(sc, i); 1844 if (error != 0) { 1845 device_printf(dev, "cannot create RX buffer\n"); 1846 return (error); 1847 } 1848 } 1849 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1850 BUS_DMASYNC_PREWRITE); 1851 1852 /* Write transmit and receive descriptor base address registers */ 1853 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 1854 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 1855 1856 return (0); 1857 } 1858 1859 static int 1860 awg_probe(device_t dev) 1861 { 1862 if (!ofw_bus_status_okay(dev)) 1863 return (ENXIO); 1864 1865 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1866 return (ENXIO); 1867 1868 device_set_desc(dev, "Allwinner Gigabit Ethernet"); 1869 return (BUS_PROBE_DEFAULT); 1870 } 1871 1872 static int 1873 awg_attach(device_t dev) 1874 { 1875 uint8_t eaddr[ETHER_ADDR_LEN]; 1876 struct awg_softc *sc; 1877 int error; 1878 1879 sc = device_get_softc(dev); 1880 sc->dev = dev; 1881 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1882 1883 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { 1884 device_printf(dev, "cannot allocate resources for device\n"); 1885 return (ENXIO); 1886 } 1887 1888 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1889 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 1890 1891 /* Setup clocks and regulators */ 1892 error = awg_setup_extres(dev); 1893 if (error != 0) 1894 return (error); 1895 1896 /* Read MAC address before resetting the chip */ 1897 awg_get_eaddr(dev, eaddr); 1898 1899 /* Soft reset EMAC core */ 1900 error = awg_reset(dev); 1901 if (error != 0) 1902 return (error); 1903 1904 /* Setup DMA descriptors */ 1905 error = awg_setup_dma(dev); 1906 if (error != 0) 1907 return (error); 1908 1909 /* Install interrupt handler */ 1910 error = bus_setup_intr(dev, sc->res[_RES_IRQ], 1911 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); 1912 if (error != 0) { 1913 device_printf(dev, "cannot setup interrupt handler\n"); 1914 return (error); 1915 } 1916 1917 /* Setup ethernet interface */ 1918 sc->ifp = if_alloc(IFT_ETHER); 1919 if_setsoftc(sc->ifp, sc); 1920 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 1921 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1922 if_setstartfn(sc->ifp, awg_start); 1923 if_setioctlfn(sc->ifp, awg_ioctl); 1924 if_setinitfn(sc->ifp, awg_init); 1925 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 1926 if_setsendqready(sc->ifp); 1927 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1928 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1929 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1930 #ifdef DEVICE_POLLING 1931 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); 1932 #endif 1933 1934 /* Attach MII driver */ 1935 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, 1936 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 1937 MIIF_DOPAUSE); 1938 if (error != 0) { 1939 device_printf(dev, "cannot attach PHY\n"); 1940 return (error); 1941 } 1942 1943 /* Attach ethernet interface */ 1944 ether_ifattach(sc->ifp, eaddr); 1945 1946 return (0); 1947 } 1948 1949 static device_method_t awg_methods[] = { 1950 /* Device interface */ 1951 DEVMETHOD(device_probe, awg_probe), 1952 DEVMETHOD(device_attach, awg_attach), 1953 1954 /* MII interface */ 1955 DEVMETHOD(miibus_readreg, awg_miibus_readreg), 1956 DEVMETHOD(miibus_writereg, awg_miibus_writereg), 1957 DEVMETHOD(miibus_statchg, awg_miibus_statchg), 1958 1959 DEVMETHOD_END 1960 }; 1961 1962 static driver_t awg_driver = { 1963 "awg", 1964 awg_methods, 1965 sizeof(struct awg_softc), 1966 }; 1967 1968 static devclass_t awg_devclass; 1969 1970 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0); 1971 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0); 1972 MODULE_DEPEND(awg, ether, 1, 1, 1); 1973 MODULE_DEPEND(awg, miibus, 1, 1, 1); 1974 MODULE_DEPEND(awg, aw_sid, 1, 1, 1); 1975 SIMPLEBUS_PNP_INFO(compat_data); 1976