1 /* $OpenBSD: bcmgenet.c,v 1.7 2023/11/10 15:51:20 bluhm Exp $ */ 2 /* $NetBSD: bcmgenet.c,v 1.3 2020/02/27 17:30:07 jmcneill Exp $ */ 3 4 /*- 5 * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca> 6 * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Broadcom GENETv5 33 */ 34 35 #include <sys/param.h> 36 #include <sys/device.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/mbuf.h> 40 #include <sys/queue.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/timeout.h> 44 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/bpf.h> 49 50 #include <netinet/in.h> 51 #include <netinet/if_ether.h> 52 53 #include <machine/bus.h> 54 #include <machine/intr.h> 55 56 #include <dev/mii/miivar.h> 57 58 #include <dev/ic/bcmgenetreg.h> 59 #include <dev/ic/bcmgenetvar.h> 60 61 CTASSERT(MCLBYTES == 2048); 62 63 #ifdef GENET_DEBUG 64 #define DPRINTF(...) printf(##__VA_ARGS__) 65 #else 66 #define DPRINTF(...) ((void)0) 67 #endif 68 69 #define TX_SKIP(n, o) (((n) + (o)) & (GENET_DMA_DESC_COUNT - 1)) 70 #define TX_NEXT(n) TX_SKIP(n, 1) 71 #define RX_NEXT(n) (((n) + 1) & (GENET_DMA_DESC_COUNT - 1)) 72 73 #define TX_MAX_SEGS 128 74 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT 75 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT 76 #define MII_BUSY_RETRY 1000 77 #define GENET_MAX_MDF_FILTER 17 78 79 #define RD4(sc, reg) \ 80 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 81 #define WR4(sc, reg, val) \ 82 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 83 84 struct cfdriver bse_cd = { 85 0, "bse", DV_IFNET 86 }; 87 88 int 89 genet_media_change(struct ifnet *ifp) 90 { 91 struct genet_softc *sc = ifp->if_softc; 92 93 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 94 mii_mediachg(&sc->sc_mii); 95 96 return (0); 97 } 98 99 void 100 genet_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 101 { 102 struct genet_softc *sc = ifp->if_softc; 103 104 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 105 mii_pollstat(&sc->sc_mii); 106 ifmr->ifm_active = sc->sc_mii.mii_media_active; 107 ifmr->ifm_status = sc->sc_mii.mii_media_status; 108 } 109 } 110 111 int 112 genet_mii_readreg(struct device *dev, int phy, int reg) 113 { 114 struct genet_softc *sc = (struct genet_softc *)dev; 115 int retry; 116 117 WR4(sc, GENET_MDIO_CMD, 118 GENET_MDIO_READ | GENET_MDIO_START_BUSY | 119 __SHIFTIN(phy, GENET_MDIO_PMD) | 120 __SHIFTIN(reg, GENET_MDIO_REG)); 121 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 122 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) 123 return RD4(sc, GENET_MDIO_CMD) & 0xffff; 124 delay(10); 125 } 126 127 printf("%s: phy read timeout, phy=%d reg=%d\n", 128 sc->sc_dev.dv_xname, phy, reg); 129 return 0; 130 } 131 132 void 133 genet_mii_writereg(struct device *dev, int phy, int reg, int val) 134 { 135 struct genet_softc *sc = (struct genet_softc *)dev; 136 int retry; 137 138 WR4(sc, GENET_MDIO_CMD, 139 val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY | 140 __SHIFTIN(phy, GENET_MDIO_PMD) | 141 __SHIFTIN(reg, GENET_MDIO_REG)); 142 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 143 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) 144 return; 145 delay(10); 146 } 147 148 printf("%s: phy write timeout, phy=%d reg=%d\n", 149 sc->sc_dev.dv_xname, phy, reg); 150 } 151 152 void 153 genet_update_link(struct genet_softc *sc) 154 { 155 struct mii_data *mii = &sc->sc_mii; 156 uint32_t val; 157 u_int speed; 158 159 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 160 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 161 speed = GENET_UMAC_CMD_SPEED_1000; 162 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 163 speed = GENET_UMAC_CMD_SPEED_100; 164 else 165 speed = GENET_UMAC_CMD_SPEED_10; 166 167 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL); 168 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE; 169 val |= GENET_EXT_RGMII_OOB_RGMII_LINK; 170 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN; 171 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII) 172 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 173 else 174 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 175 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val); 176 177 val = RD4(sc, GENET_UMAC_CMD); 178 val &= ~GENET_UMAC_CMD_SPEED; 179 val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED); 180 WR4(sc, GENET_UMAC_CMD, val); 181 } 182 183 void 184 genet_mii_statchg(struct device *self) 185 { 186 struct genet_softc *sc = (struct genet_softc *)self; 187 188 genet_update_link(sc); 189 } 190 191 void 192 genet_setup_txdesc(struct genet_softc *sc, int index, int flags, 193 bus_addr_t paddr, u_int len) 194 { 195 uint32_t status; 196 197 status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN); 198 ++sc->sc_tx.queued; 199 200 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr); 201 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32)); 202 WR4(sc, GENET_TX_DESC_STATUS(index), status); 203 } 204 205 int 206 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m) 207 { 208 bus_dma_segment_t *segs; 209 int error, nsegs, cur, i; 210 uint32_t flags; 211 212 /* 213 * XXX Hardware doesn't seem to like small fragments. For now 214 * just look at the first fragment and defrag if it is smaller 215 * than the minimum Ethernet packet size. 216 */ 217 if (m->m_len < ETHER_MIN_LEN - ETHER_CRC_LEN) { 218 if (m_defrag(m, M_DONTWAIT)) 219 return 0; 220 } 221 222 error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag, 223 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 224 if (error == EFBIG) { 225 if (m_defrag(m, M_DONTWAIT)) 226 return 0; 227 error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag, 228 sc->sc_tx.buf_map[index].map, m, 229 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 230 } 231 if (error != 0) 232 return 0; 233 234 segs = sc->sc_tx.buf_map[index].map->dm_segs; 235 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; 236 237 if (sc->sc_tx.queued >= GENET_DMA_DESC_COUNT - nsegs) { 238 bus_dmamap_unload(sc->sc_tx.buf_tag, 239 sc->sc_tx.buf_map[index].map); 240 return -1; 241 } 242 243 flags = GENET_TX_DESC_STATUS_SOP | 244 GENET_TX_DESC_STATUS_CRC | 245 GENET_TX_DESC_STATUS_QTAG; 246 247 for (cur = index, i = 0; i < nsegs; i++) { 248 sc->sc_tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); 249 if (i == nsegs - 1) 250 flags |= GENET_TX_DESC_STATUS_EOP; 251 252 genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 253 segs[i].ds_len); 254 255 if (i == 0) { 256 flags &= ~GENET_TX_DESC_STATUS_SOP; 257 flags &= ~GENET_TX_DESC_STATUS_CRC; 258 } 259 cur = TX_NEXT(cur); 260 } 261 262 bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map, 263 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 264 265 return nsegs; 266 } 267 268 void 269 genet_setup_rxdesc(struct genet_softc *sc, int index, 270 bus_addr_t paddr, bus_size_t len) 271 { 272 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr); 273 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32)); 274 } 275 276 int 277 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m) 278 { 279 int error; 280 281 error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag, 282 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 283 if (error != 0) 284 return error; 285 286 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map, 287 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 288 BUS_DMASYNC_PREREAD); 289 290 sc->sc_rx.buf_map[index].mbuf = m; 291 genet_setup_rxdesc(sc, index, 292 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr, 293 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len); 294 295 return 0; 296 } 297 298 struct mbuf * 299 genet_alloc_mbufcl(struct genet_softc *sc) 300 { 301 struct mbuf *m; 302 303 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 304 if (m != NULL) 305 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 306 307 return m; 308 } 309 310 void 311 genet_fill_rx_ring(struct genet_softc *sc, int qid) 312 { 313 struct mbuf *m; 314 uint32_t cidx, index, total; 315 u_int slots; 316 int error; 317 318 cidx = sc->sc_rx.cidx; 319 total = (sc->sc_rx.pidx - cidx) & 0xffff; 320 KASSERT(total <= RX_DESC_COUNT); 321 322 index = sc->sc_rx.cidx & (RX_DESC_COUNT - 1); 323 for (slots = if_rxr_get(&sc->sc_rx_ring, total); 324 slots > 0; slots--) { 325 if ((m = genet_alloc_mbufcl(sc)) == NULL) { 326 printf("%s: cannot allocate RX mbuf\n", 327 sc->sc_dev.dv_xname); 328 break; 329 } 330 error = genet_setup_rxbuf(sc, index, m); 331 if (error != 0) { 332 printf("%s: cannot create RX buffer\n", 333 sc->sc_dev.dv_xname); 334 m_freem(m); 335 break; 336 } 337 338 cidx = (cidx + 1) & 0xffff; 339 index = RX_NEXT(index); 340 } 341 if_rxr_put(&sc->sc_rx_ring, slots); 342 343 if (sc->sc_rx.cidx != cidx) { 344 sc->sc_rx.cidx = cidx; 345 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx); 346 } 347 348 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 349 timeout_add(&sc->sc_rxto, 1); 350 } 351 352 void 353 genet_rxtick(void *arg) 354 { 355 genet_fill_rx_ring(arg, GENET_DMA_DEFAULT_QUEUE); 356 } 357 358 void 359 genet_enable_intr(struct genet_softc *sc) 360 { 361 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 362 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 363 } 364 365 void 366 genet_disable_intr(struct genet_softc *sc) 367 { 368 /* Disable interrupts */ 369 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff); 370 WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff); 371 } 372 373 void 374 genet_tick(void *softc) 375 { 376 struct genet_softc *sc = softc; 377 struct mii_data *mii = &sc->sc_mii; 378 int s = splnet(); 379 380 mii_tick(mii); 381 timeout_add_sec(&sc->sc_stat_ch, 1); 382 383 splx(s); 384 } 385 386 void 387 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea) 388 { 389 uint32_t addr0 = (ea[0] << 8) | ea[1]; 390 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]; 391 392 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0); 393 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1); 394 } 395 396 void 397 genet_setup_rxfilter(struct genet_softc *sc) 398 { 399 struct arpcom *ac = &sc->sc_ac; 400 struct ifnet *ifp = &ac->ac_if; 401 struct ether_multistep step; 402 struct ether_multi *enm; 403 uint32_t cmd, mdf_ctrl; 404 u_int n; 405 406 cmd = RD4(sc, GENET_UMAC_CMD); 407 408 /* 409 * Count the required number of hardware filters. We need one 410 * for each multicast address, plus one for our own address and 411 * the broadcast address. 412 */ 413 ETHER_FIRST_MULTI(step, ac, enm); 414 for (n = 2; enm != NULL; n++) 415 ETHER_NEXT_MULTI(step, enm); 416 417 if (n > GENET_MAX_MDF_FILTER || ac->ac_multirangecnt > 0) 418 ifp->if_flags |= IFF_ALLMULTI; 419 else 420 ifp->if_flags &= ~IFF_ALLMULTI; 421 422 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 423 cmd |= GENET_UMAC_CMD_PROMISC; 424 mdf_ctrl = 0; 425 } else { 426 cmd &= ~GENET_UMAC_CMD_PROMISC; 427 genet_setup_rxfilter_mdf(sc, 0, etherbroadcastaddr); 428 genet_setup_rxfilter_mdf(sc, 1, LLADDR(ifp->if_sadl)); 429 ETHER_FIRST_MULTI(step, ac, enm); 430 for (n = 2; enm != NULL; n++) { 431 genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo); 432 ETHER_NEXT_MULTI(step, enm); 433 } 434 mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1, 435 GENET_MAX_MDF_FILTER - n); 436 } 437 438 WR4(sc, GENET_UMAC_CMD, cmd); 439 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl); 440 } 441 442 void 443 genet_disable_dma(struct genet_softc *sc) 444 { 445 uint32_t val; 446 447 /* Disable receiver */ 448 val = RD4(sc, GENET_UMAC_CMD); 449 val &= ~GENET_UMAC_CMD_RXEN; 450 WR4(sc, GENET_UMAC_CMD, val); 451 452 /* Stop receive DMA */ 453 val = RD4(sc, GENET_RX_DMA_CTRL); 454 val &= ~GENET_RX_DMA_CTRL_EN; 455 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 456 WR4(sc, GENET_RX_DMA_CTRL, val); 457 458 /* Stop transmit DMA */ 459 val = RD4(sc, GENET_TX_DMA_CTRL); 460 val &= ~GENET_TX_DMA_CTRL_EN; 461 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 462 WR4(sc, GENET_TX_DMA_CTRL, val); 463 464 /* Flush data in the TX FIFO */ 465 WR4(sc, GENET_UMAC_TX_FLUSH, 1); 466 delay(10); 467 WR4(sc, GENET_UMAC_TX_FLUSH, 0); 468 469 /* Disable transmitter */ 470 val = RD4(sc, GENET_UMAC_CMD); 471 val &= ~GENET_UMAC_CMD_TXEN; 472 WR4(sc, GENET_UMAC_CMD, val); 473 } 474 475 int 476 genet_reset(struct genet_softc *sc) 477 { 478 uint32_t val; 479 480 genet_disable_dma(sc); 481 482 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 483 val |= GENET_SYS_RBUF_FLUSH_RESET; 484 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 485 delay(10); 486 487 val &= ~GENET_SYS_RBUF_FLUSH_RESET; 488 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 489 delay(10); 490 491 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0); 492 delay(10); 493 494 WR4(sc, GENET_UMAC_CMD, 0); 495 WR4(sc, GENET_UMAC_CMD, 496 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET); 497 delay(10); 498 WR4(sc, GENET_UMAC_CMD, 0); 499 500 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT | 501 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX); 502 WR4(sc, GENET_UMAC_MIB_CTRL, 0); 503 504 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536); 505 506 val = RD4(sc, GENET_RBUF_CTRL); 507 val |= GENET_RBUF_ALIGN_2B; 508 WR4(sc, GENET_RBUF_CTRL, val); 509 510 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1); 511 512 return 0; 513 } 514 515 void 516 genet_init_rings(struct genet_softc *sc, int qid) 517 { 518 uint32_t val; 519 520 /* TX ring */ 521 522 sc->sc_tx.next = 0; 523 sc->sc_tx.queued = 0; 524 sc->sc_tx.cidx = sc->sc_tx.pidx = 0; 525 526 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08); 527 528 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0); 529 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0); 530 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), sc->sc_tx.cidx); 531 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx); 532 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), 533 __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) | 534 __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH)); 535 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0); 536 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0); 537 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), 538 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 539 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0); 540 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1); 541 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0); 542 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0); 543 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0); 544 545 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */ 546 547 /* Enable transmit DMA */ 548 val = RD4(sc, GENET_TX_DMA_CTRL); 549 val |= GENET_TX_DMA_CTRL_EN; 550 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); 551 WR4(sc, GENET_TX_DMA_CTRL, val); 552 553 /* RX ring */ 554 555 sc->sc_rx.next = 0; 556 sc->sc_rx.cidx = 0; 557 sc->sc_rx.pidx = RX_DESC_COUNT; 558 559 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08); 560 561 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0); 562 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0); 563 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), sc->sc_rx.pidx); 564 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx); 565 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), 566 __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) | 567 __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH)); 568 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0); 569 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0); 570 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), 571 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 572 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0); 573 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid), 574 __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) | 575 __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI)); 576 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0); 577 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0); 578 579 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */ 580 581 if_rxr_init(&sc->sc_rx_ring, 2, RX_DESC_COUNT); 582 genet_fill_rx_ring(sc, qid); 583 584 /* Enable receive DMA */ 585 val = RD4(sc, GENET_RX_DMA_CTRL); 586 val |= GENET_RX_DMA_CTRL_EN; 587 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid); 588 WR4(sc, GENET_RX_DMA_CTRL, val); 589 } 590 591 int 592 genet_init(struct genet_softc *sc) 593 { 594 struct ifnet *ifp = &sc->sc_ac.ac_if; 595 struct mii_data *mii = &sc->sc_mii; 596 uint32_t val; 597 uint8_t *enaddr = LLADDR(ifp->if_sadl); 598 599 if (ifp->if_flags & IFF_RUNNING) 600 return 0; 601 602 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII || 603 sc->sc_phy_mode == GENET_PHY_MODE_RGMII_ID || 604 sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID || 605 sc->sc_phy_mode == GENET_PHY_MODE_RGMII_TXID) 606 WR4(sc, GENET_SYS_PORT_CTRL, 607 GENET_SYS_PORT_MODE_EXT_GPHY); 608 609 /* Write hardware address */ 610 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) | 611 (enaddr[0] << 24); 612 WR4(sc, GENET_UMAC_MAC0, val); 613 val = enaddr[5] | (enaddr[4] << 8); 614 WR4(sc, GENET_UMAC_MAC1, val); 615 616 /* Setup RX filter */ 617 genet_setup_rxfilter(sc); 618 619 /* Setup TX/RX rings */ 620 genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE); 621 622 /* Enable transmitter and receiver */ 623 val = RD4(sc, GENET_UMAC_CMD); 624 val |= GENET_UMAC_CMD_TXEN; 625 val |= GENET_UMAC_CMD_RXEN; 626 WR4(sc, GENET_UMAC_CMD, val); 627 628 /* Enable interrupts */ 629 genet_enable_intr(sc); 630 631 ifp->if_flags |= IFF_RUNNING; 632 ifq_clr_oactive(&ifp->if_snd); 633 634 mii_mediachg(mii); 635 timeout_add_sec(&sc->sc_stat_ch, 1); 636 637 return 0; 638 } 639 640 void 641 genet_stop(struct genet_softc *sc) 642 { 643 struct ifnet *ifp = &sc->sc_ac.ac_if; 644 struct genet_bufmap *bmap; 645 int i; 646 647 timeout_del(&sc->sc_rxto); 648 timeout_del(&sc->sc_stat_ch); 649 650 mii_down(&sc->sc_mii); 651 652 genet_disable_dma(sc); 653 654 /* Disable interrupts */ 655 genet_disable_intr(sc); 656 657 ifp->if_flags &= ~IFF_RUNNING; 658 ifq_clr_oactive(&ifp->if_snd); 659 ifp->if_timer = 0; 660 661 intr_barrier(sc->sc_ih); 662 663 /* Clean RX ring. */ 664 for (i = 0; i < RX_DESC_COUNT; i++) { 665 bmap = &sc->sc_rx.buf_map[i]; 666 if (bmap->mbuf) { 667 bus_dmamap_sync(sc->sc_dmat, bmap->map, 0, 668 bmap->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 669 bus_dmamap_unload(sc->sc_dmat, bmap->map); 670 m_freem(bmap->mbuf); 671 bmap->mbuf = NULL; 672 } 673 } 674 675 /* Clean TX ring. */ 676 for (i = 0; i < TX_DESC_COUNT; i++) { 677 bmap = &sc->sc_tx.buf_map[i]; 678 if (bmap->mbuf) { 679 bus_dmamap_sync(sc->sc_dmat, bmap->map, 0, 680 bmap->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 681 bus_dmamap_unload(sc->sc_dmat, bmap->map); 682 m_freem(bmap->mbuf); 683 bmap->mbuf = NULL; 684 } 685 } 686 } 687 688 void 689 genet_rxintr(struct genet_softc *sc, int qid) 690 { 691 struct ifnet *ifp = &sc->sc_ac.ac_if; 692 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 693 struct mbuf *m; 694 int index, len, n; 695 uint32_t status, pidx, total; 696 697 pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff; 698 total = (pidx - sc->sc_rx.pidx) & 0xffff; 699 700 DPRINTF("RX pidx=%08x total=%d\n", pidx, total); 701 702 index = sc->sc_rx.next; 703 for (n = 0; n < total; n++) { 704 status = RD4(sc, GENET_RX_DESC_STATUS(index)); 705 len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN); 706 707 /* XXX check for errors */ 708 709 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map, 710 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 711 BUS_DMASYNC_POSTREAD); 712 bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map); 713 714 DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n", 715 n, index, status, len, len - ETHER_ALIGN); 716 717 m = sc->sc_rx.buf_map[index].mbuf; 718 sc->sc_rx.buf_map[index].mbuf = NULL; 719 720 if (len > ETHER_ALIGN) { 721 m_adj(m, ETHER_ALIGN); 722 723 m->m_len = m->m_pkthdr.len = len - ETHER_ALIGN; 724 m->m_nextpkt = NULL; 725 726 ml_enqueue(&ml, m); 727 } else { 728 ifp->if_ierrors++; 729 m_freem(m); 730 } 731 732 if_rxr_put(&sc->sc_rx_ring, 1); 733 734 index = RX_NEXT(index); 735 } 736 737 if (sc->sc_rx.pidx != pidx) { 738 sc->sc_rx.next = index; 739 sc->sc_rx.pidx = pidx; 740 741 if (ifiq_input(&ifp->if_rcv, &ml)) 742 if_rxr_livelocked(&sc->sc_rx_ring); 743 744 genet_fill_rx_ring(sc, qid); 745 } 746 } 747 748 void 749 genet_txintr(struct genet_softc *sc, int qid) 750 { 751 struct ifnet *ifp = &sc->sc_ac.ac_if; 752 struct genet_bufmap *bmap; 753 uint32_t cidx, total; 754 int i; 755 756 cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff; 757 total = (cidx - sc->sc_tx.cidx) & 0xffff; 758 759 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0 && total > 0; 760 i = TX_NEXT(i), total--) { 761 /* XXX check for errors */ 762 763 bmap = &sc->sc_tx.buf_map[i]; 764 if (bmap->mbuf != NULL) { 765 bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map, 766 0, bmap->map->dm_mapsize, 767 BUS_DMASYNC_POSTWRITE); 768 bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map); 769 m_freem(bmap->mbuf); 770 bmap->mbuf = NULL; 771 } 772 773 --sc->sc_tx.queued; 774 } 775 776 if (sc->sc_tx.queued == 0) 777 ifp->if_timer = 0; 778 779 if (sc->sc_tx.cidx != cidx) { 780 sc->sc_tx.next = i; 781 sc->sc_tx.cidx = cidx; 782 783 if (ifq_is_oactive(&ifp->if_snd)) 784 ifq_restart(&ifp->if_snd); 785 } 786 } 787 788 void 789 genet_start(struct ifnet *ifp) 790 { 791 struct genet_softc *sc = ifp->if_softc; 792 struct mbuf *m; 793 const int qid = GENET_DMA_DEFAULT_QUEUE; 794 int nsegs, index, cnt; 795 796 if ((ifp->if_flags & IFF_RUNNING) == 0) 797 return; 798 if (ifq_is_oactive(&ifp->if_snd)) 799 return; 800 801 index = sc->sc_tx.pidx & (TX_DESC_COUNT - 1); 802 cnt = 0; 803 804 for (;;) { 805 m = ifq_deq_begin(&ifp->if_snd); 806 if (m == NULL) 807 break; 808 809 nsegs = genet_setup_txbuf(sc, index, m); 810 if (nsegs == -1) { 811 ifq_deq_rollback(&ifp->if_snd, m); 812 ifq_set_oactive(&ifp->if_snd); 813 break; 814 } 815 if (nsegs == 0) { 816 ifq_deq_commit(&ifp->if_snd, m); 817 m_freem(m); 818 ifp->if_oerrors++; 819 continue; 820 } 821 ifq_deq_commit(&ifp->if_snd, m); 822 if (ifp->if_bpf) 823 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 824 825 index = TX_SKIP(index, nsegs); 826 827 sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff; 828 cnt++; 829 } 830 831 if (cnt != 0) { 832 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx); 833 ifp->if_timer = 5; 834 } 835 } 836 837 int 838 genet_intr(void *arg) 839 { 840 struct genet_softc *sc = arg; 841 struct ifnet *ifp = &sc->sc_ac.ac_if; 842 uint32_t val; 843 844 val = RD4(sc, GENET_INTRL2_CPU_STAT); 845 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK); 846 WR4(sc, GENET_INTRL2_CPU_CLEAR, val); 847 848 if (val & GENET_IRQ_RXDMA_DONE) 849 genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE); 850 851 if (val & GENET_IRQ_TXDMA_DONE) { 852 genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE); 853 if (ifq_is_oactive(&ifp->if_snd)) 854 ifq_restart(&ifp->if_snd); 855 } 856 857 return 1; 858 } 859 860 int 861 genet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 862 { 863 struct genet_softc *sc = ifp->if_softc; 864 struct ifreq *ifr = (struct ifreq *)addr; 865 int error = 0, s; 866 867 s = splnet(); 868 869 switch (cmd) { 870 case SIOCSIFADDR: 871 ifp->if_flags |= IFF_UP; 872 /* FALLTHROUGH */ 873 case SIOCSIFFLAGS: 874 if (ifp->if_flags & IFF_UP) { 875 if (ifp->if_flags & IFF_RUNNING) 876 error = ENETRESET; 877 else 878 genet_init(sc); 879 } else { 880 if (ifp->if_flags & IFF_RUNNING) 881 genet_stop(sc); 882 } 883 break; 884 885 case SIOCGIFMEDIA: 886 case SIOCSIFMEDIA: 887 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 888 break; 889 890 case SIOCGIFRXR: 891 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 892 NULL, MCLBYTES, &sc->sc_rx_ring); 893 break; 894 895 default: 896 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 897 break; 898 } 899 900 if (error == ENETRESET) { 901 if (ifp->if_flags & IFF_RUNNING) 902 genet_setup_rxfilter(sc); 903 error = 0; 904 } 905 906 splx(s); 907 return error; 908 } 909 910 int 911 genet_setup_dma(struct genet_softc *sc, int qid) 912 { 913 int error, i; 914 915 /* Setup TX ring */ 916 sc->sc_tx.buf_tag = sc->sc_dmat; 917 for (i = 0; i < TX_DESC_COUNT; i++) { 918 error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES, 919 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 920 &sc->sc_tx.buf_map[i].map); 921 if (error != 0) { 922 printf("%s: cannot create TX buffer map\n", 923 sc->sc_dev.dv_xname); 924 return error; 925 } 926 } 927 928 /* Setup RX ring */ 929 sc->sc_rx.buf_tag = sc->sc_dmat; 930 for (i = 0; i < RX_DESC_COUNT; i++) { 931 error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES, 932 1, MCLBYTES, 0, BUS_DMA_WAITOK, 933 &sc->sc_rx.buf_map[i].map); 934 if (error != 0) { 935 printf("%s: cannot create RX buffer map\n", 936 sc->sc_dev.dv_xname); 937 return error; 938 } 939 } 940 941 return 0; 942 } 943 944 int 945 genet_attach(struct genet_softc *sc) 946 { 947 struct mii_data *mii = &sc->sc_mii; 948 struct ifnet *ifp = &sc->sc_ac.ac_if; 949 int mii_flags = 0; 950 951 switch (sc->sc_phy_mode) { 952 case GENET_PHY_MODE_RGMII_ID: 953 mii_flags |= MIIF_RXID | MIIF_TXID; 954 break; 955 case GENET_PHY_MODE_RGMII_RXID: 956 mii_flags |= MIIF_RXID; 957 break; 958 case GENET_PHY_MODE_RGMII_TXID: 959 mii_flags |= MIIF_TXID; 960 break; 961 case GENET_PHY_MODE_RGMII: 962 default: 963 break; 964 } 965 966 printf(": address %s\n", ether_sprintf(sc->sc_lladdr)); 967 968 /* Soft reset EMAC core */ 969 genet_reset(sc); 970 971 /* Setup DMA descriptors */ 972 if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) { 973 printf("%s: failed to setup DMA descriptors\n", 974 sc->sc_dev.dv_xname); 975 return EINVAL; 976 } 977 978 timeout_set(&sc->sc_stat_ch, genet_tick, sc); 979 timeout_set(&sc->sc_rxto, genet_rxtick, sc); 980 981 /* Setup ethernet interface */ 982 ifp->if_softc = sc; 983 snprintf(ifp->if_xname, IFNAMSIZ, "%s", sc->sc_dev.dv_xname); 984 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 985 ifp->if_start = genet_start; 986 ifp->if_ioctl = genet_ioctl; 987 ifq_init_maxlen(&ifp->if_snd, IFQ_MAXLEN); 988 989 /* 802.1Q VLAN-sized frames are supported */ 990 ifp->if_capabilities = IFCAP_VLAN_MTU; 991 992 /* Attach MII driver */ 993 ifmedia_init(&mii->mii_media, 0, genet_media_change, genet_media_status); 994 mii->mii_ifp = ifp; 995 mii->mii_readreg = genet_mii_readreg; 996 mii->mii_writereg = genet_mii_writereg; 997 mii->mii_statchg = genet_mii_statchg; 998 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, 999 MII_OFFSET_ANY, mii_flags); 1000 1001 if (LIST_EMPTY(&mii->mii_phys)) { 1002 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1003 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1004 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL); 1005 } 1006 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1007 1008 /* Attach interface */ 1009 if_attach(ifp); 1010 1011 /* Attach ethernet interface */ 1012 ether_ifattach(ifp); 1013 1014 return 0; 1015 } 1016 1017 void 1018 genet_lladdr_read(struct genet_softc *sc, uint8_t *lladdr) 1019 { 1020 uint32_t maclo, machi; 1021 1022 maclo = RD4(sc, GENET_UMAC_MAC0); 1023 machi = RD4(sc, GENET_UMAC_MAC1); 1024 1025 lladdr[0] = (maclo >> 24) & 0xff; 1026 lladdr[1] = (maclo >> 16) & 0xff; 1027 lladdr[2] = (maclo >> 8) & 0xff; 1028 lladdr[3] = (maclo >> 0) & 0xff; 1029 lladdr[4] = (machi >> 8) & 0xff; 1030 lladdr[5] = (machi >> 0) & 0xff; 1031 } 1032