1 /* $OpenBSD: dwqe.c,v 1.16 2023/12/28 14:30:28 uwe Exp $ */ 2 /* 3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org> 4 * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Synopsys Designware ethernet controller. 21 */ 22 23 #include "bpfilter.h" 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/device.h> 28 #include <sys/kernel.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/queue.h> 32 #include <sys/socket.h> 33 #include <sys/sockio.h> 34 #include <sys/timeout.h> 35 36 #include <machine/bus.h> 37 38 #include <net/if.h> 39 #include <net/if_media.h> 40 41 #include <dev/mii/mii.h> 42 #include <dev/mii/miivar.h> 43 44 #if NBPFILTER > 0 45 #include <net/bpf.h> 46 #endif 47 48 #include <netinet/in.h> 49 #include <netinet/if_ether.h> 50 51 #include <dev/ic/dwqevar.h> 52 #include <dev/ic/dwqereg.h> 53 54 struct cfdriver dwqe_cd = { 55 NULL, "dwqe", DV_IFNET 56 }; 57 58 uint32_t dwqe_read(struct dwqe_softc *, bus_addr_t); 59 void dwqe_write(struct dwqe_softc *, bus_addr_t, uint32_t); 60 61 int dwqe_ioctl(struct ifnet *, u_long, caddr_t); 62 void dwqe_start(struct ifqueue *); 63 void dwqe_watchdog(struct ifnet *); 64 65 int dwqe_media_change(struct ifnet *); 66 void dwqe_media_status(struct ifnet *, struct ifmediareq *); 67 68 void dwqe_mii_attach(struct dwqe_softc *); 69 int dwqe_mii_readreg(struct device *, int, int); 70 void dwqe_mii_writereg(struct device *, int, int, int); 71 void dwqe_mii_statchg(struct device *); 72 73 void dwqe_lladdr_read(struct dwqe_softc *, uint8_t *); 74 void dwqe_lladdr_write(struct dwqe_softc *); 75 76 void dwqe_tick(void *); 77 void dwqe_rxtick(void *); 78 79 int dwqe_intr(void *); 80 void dwqe_tx_proc(struct dwqe_softc *); 81 void dwqe_rx_proc(struct dwqe_softc *); 82 83 void dwqe_up(struct dwqe_softc *); 84 void dwqe_down(struct dwqe_softc *); 85 void dwqe_iff(struct dwqe_softc *); 86 int dwqe_encap(struct dwqe_softc *, struct mbuf *, int *, int *); 87 88 void dwqe_reset(struct dwqe_softc *); 89 90 struct dwqe_dmamem * 91 dwqe_dmamem_alloc(struct dwqe_softc *, bus_size_t, bus_size_t); 92 void dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *); 93 struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t); 94 void dwqe_fill_rx_ring(struct dwqe_softc *); 95 96 int 97 dwqe_attach(struct dwqe_softc *sc) 98 { 99 struct ifnet *ifp; 100 uint32_t version, mode; 101 int i; 102 103 version = dwqe_read(sc, GMAC_VERSION); 104 printf(": rev 0x%02x, address %s\n", version & GMAC_VERSION_SNPS_MASK, 105 ether_sprintf(sc->sc_lladdr)); 106 107 for (i = 0; i < 4; i++) 108 sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i)); 109 110 timeout_set(&sc->sc_phy_tick, dwqe_tick, sc); 111 timeout_set(&sc->sc_rxto, dwqe_rxtick, sc); 112 113 ifp = &sc->sc_ac.ac_if; 114 ifp->if_softc = sc; 115 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 116 ifp->if_xflags = IFXF_MPSAFE; 117 ifp->if_ioctl = dwqe_ioctl; 118 ifp->if_qstart = dwqe_start; 119 ifp->if_watchdog = dwqe_watchdog; 120 ifq_init_maxlen(&ifp->if_snd, DWQE_NTXDESC - 1); 121 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 122 123 ifp->if_capabilities = IFCAP_VLAN_MTU; 124 125 sc->sc_mii.mii_ifp = ifp; 126 sc->sc_mii.mii_readreg = dwqe_mii_readreg; 127 sc->sc_mii.mii_writereg = dwqe_mii_writereg; 128 sc->sc_mii.mii_statchg = dwqe_mii_statchg; 129 130 ifmedia_init(&sc->sc_media, 0, dwqe_media_change, dwqe_media_status); 131 132 dwqe_reset(sc); 133 134 /* Configure DMA engine. */ 135 mode = dwqe_read(sc, GMAC_SYS_BUS_MODE); 136 if (sc->sc_fixed_burst) 137 mode |= GMAC_SYS_BUS_MODE_FB; 138 if (sc->sc_mixed_burst) 139 mode |= GMAC_SYS_BUS_MODE_MB; 140 if (sc->sc_aal) 141 mode |= GMAC_SYS_BUS_MODE_AAL; 142 dwqe_write(sc, GMAC_SYS_BUS_MODE, mode); 143 144 /* Configure channel 0. */ 145 mode = dwqe_read(sc, GMAC_CHAN_CONTROL(0)); 146 if (sc->sc_8xpbl) 147 mode |= GMAC_CHAN_CONTROL_8XPBL; 148 dwqe_write(sc, GMAC_CHAN_CONTROL(0), mode); 149 150 mode = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0)); 151 mode &= ~GMAC_CHAN_TX_CONTROL_PBL_MASK; 152 mode |= sc->sc_txpbl << GMAC_CHAN_TX_CONTROL_PBL_SHIFT; 153 mode |= GMAC_CHAN_TX_CONTROL_OSP; 154 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), mode); 155 mode = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0)); 156 mode &= ~GMAC_CHAN_RX_CONTROL_RPBL_MASK; 157 mode |= sc->sc_rxpbl << GMAC_CHAN_RX_CONTROL_RPBL_SHIFT; 158 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), mode); 159 160 /* Configure AXI master. */ 161 if (sc->sc_axi_config) { 162 int i; 163 164 mode = dwqe_read(sc, GMAC_SYS_BUS_MODE); 165 166 mode &= ~GMAC_SYS_BUS_MODE_EN_LPI; 167 if (sc->sc_lpi_en) 168 mode |= GMAC_SYS_BUS_MODE_EN_LPI; 169 mode &= ~GMAC_SYS_BUS_MODE_LPI_XIT_FRM; 170 if (sc->sc_xit_frm) 171 mode |= GMAC_SYS_BUS_MODE_LPI_XIT_FRM; 172 173 mode &= ~GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK; 174 mode |= (sc->sc_wr_osr_lmt << GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT); 175 mode &= ~GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK; 176 mode |= (sc->sc_rd_osr_lmt << GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT); 177 178 for (i = 0; i < nitems(sc->sc_blen); i++) { 179 switch (sc->sc_blen[i]) { 180 case 256: 181 mode |= GMAC_SYS_BUS_MODE_BLEN_256; 182 break; 183 case 128: 184 mode |= GMAC_SYS_BUS_MODE_BLEN_128; 185 break; 186 case 64: 187 mode |= GMAC_SYS_BUS_MODE_BLEN_64; 188 break; 189 case 32: 190 mode |= GMAC_SYS_BUS_MODE_BLEN_32; 191 break; 192 case 16: 193 mode |= GMAC_SYS_BUS_MODE_BLEN_16; 194 break; 195 case 8: 196 mode |= GMAC_SYS_BUS_MODE_BLEN_8; 197 break; 198 case 4: 199 mode |= GMAC_SYS_BUS_MODE_BLEN_4; 200 break; 201 } 202 } 203 204 dwqe_write(sc, GMAC_SYS_BUS_MODE, mode); 205 } 206 207 if (!sc->sc_fixed_link) 208 dwqe_mii_attach(sc); 209 210 if_attach(ifp); 211 ether_ifattach(ifp); 212 213 /* Disable interrupts. */ 214 dwqe_write(sc, GMAC_INT_EN, 0); 215 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0); 216 217 return 0; 218 } 219 220 void 221 dwqe_mii_attach(struct dwqe_softc *sc) 222 { 223 int mii_flags = 0; 224 225 switch (sc->sc_phy_mode) { 226 case DWQE_PHY_MODE_RGMII: 227 mii_flags |= MIIF_SETDELAY; 228 break; 229 case DWQE_PHY_MODE_RGMII_ID: 230 mii_flags |= MIIF_SETDELAY | MIIF_RXID | MIIF_TXID; 231 break; 232 case DWQE_PHY_MODE_RGMII_RXID: 233 mii_flags |= MIIF_SETDELAY | MIIF_RXID; 234 break; 235 case DWQE_PHY_MODE_RGMII_TXID: 236 mii_flags |= MIIF_SETDELAY | MIIF_TXID; 237 break; 238 default: 239 break; 240 } 241 242 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phyloc, 243 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, mii_flags); 244 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 245 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 246 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 247 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 248 } else 249 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 250 } 251 252 uint32_t 253 dwqe_read(struct dwqe_softc *sc, bus_addr_t addr) 254 { 255 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr); 256 } 257 258 void 259 dwqe_write(struct dwqe_softc *sc, bus_addr_t addr, uint32_t data) 260 { 261 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data); 262 } 263 264 void 265 dwqe_lladdr_read(struct dwqe_softc *sc, uint8_t *lladdr) 266 { 267 uint32_t machi, maclo; 268 269 machi = dwqe_read(sc, GMAC_MAC_ADDR0_HI); 270 maclo = dwqe_read(sc, GMAC_MAC_ADDR0_LO); 271 272 if (machi || maclo) { 273 lladdr[0] = (maclo >> 0) & 0xff; 274 lladdr[1] = (maclo >> 8) & 0xff; 275 lladdr[2] = (maclo >> 16) & 0xff; 276 lladdr[3] = (maclo >> 24) & 0xff; 277 lladdr[4] = (machi >> 0) & 0xff; 278 lladdr[5] = (machi >> 8) & 0xff; 279 } else { 280 ether_fakeaddr(&sc->sc_ac.ac_if); 281 } 282 } 283 284 void 285 dwqe_lladdr_write(struct dwqe_softc *sc) 286 { 287 dwqe_write(sc, GMAC_MAC_ADDR0_HI, 288 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0); 289 dwqe_write(sc, GMAC_MAC_ADDR0_LO, 290 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 | 291 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0); 292 } 293 294 void 295 dwqe_start(struct ifqueue *ifq) 296 { 297 struct ifnet *ifp = ifq->ifq_if; 298 struct dwqe_softc *sc = ifp->if_softc; 299 struct mbuf *m; 300 int error, idx, left, used; 301 302 if (!(ifp->if_flags & IFF_RUNNING)) 303 return; 304 if (ifq_is_oactive(&ifp->if_snd)) 305 return; 306 if (ifq_empty(&ifp->if_snd)) 307 return; 308 if (!sc->sc_link) 309 return; 310 311 idx = sc->sc_tx_prod; 312 left = sc->sc_tx_cons; 313 if (left <= idx) 314 left += DWQE_NTXDESC; 315 left -= idx; 316 used = 0; 317 318 for (;;) { 319 if (used + DWQE_NTXSEGS + 1 > left) { 320 ifq_set_oactive(ifq); 321 break; 322 } 323 324 m = ifq_dequeue(ifq); 325 if (m == NULL) 326 break; 327 328 error = dwqe_encap(sc, m, &idx, &used); 329 if (error == EFBIG) { 330 m_freem(m); /* give up: drop it */ 331 ifp->if_oerrors++; 332 continue; 333 } 334 335 #if NBPFILTER > 0 336 if (ifp->if_bpf) 337 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 338 #endif 339 } 340 341 if (used > 0) { 342 sc->sc_tx_prod = idx; 343 344 /* Set a timeout in case the chip goes out to lunch. */ 345 ifp->if_timer = 5; 346 347 /* 348 * Start the transmit process after the last in-use Tx 349 * descriptor's OWN bit has been updated. 350 */ 351 dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring) + 352 idx * sizeof(struct dwqe_desc)); 353 } 354 } 355 356 int 357 dwqe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 358 { 359 struct dwqe_softc *sc = ifp->if_softc; 360 struct ifreq *ifr = (struct ifreq *)addr; 361 int error = 0, s; 362 363 s = splnet(); 364 365 switch (cmd) { 366 case SIOCSIFADDR: 367 ifp->if_flags |= IFF_UP; 368 /* FALLTHROUGH */ 369 case SIOCSIFFLAGS: 370 if (ifp->if_flags & IFF_UP) { 371 if (ifp->if_flags & IFF_RUNNING) 372 error = ENETRESET; 373 else 374 dwqe_up(sc); 375 } else { 376 if (ifp->if_flags & IFF_RUNNING) 377 dwqe_down(sc); 378 } 379 break; 380 381 case SIOCGIFMEDIA: 382 case SIOCSIFMEDIA: 383 if (sc->sc_fixed_link) 384 error = ENOTTY; 385 else 386 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 387 break; 388 389 case SIOCGIFRXR: 390 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 391 NULL, MCLBYTES, &sc->sc_rx_ring); 392 break; 393 394 default: 395 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 396 break; 397 } 398 399 if (error == ENETRESET) { 400 if (ifp->if_flags & IFF_RUNNING) 401 dwqe_iff(sc); 402 error = 0; 403 } 404 405 splx(s); 406 return (error); 407 } 408 409 void 410 dwqe_watchdog(struct ifnet *ifp) 411 { 412 printf("%s\n", __func__); 413 } 414 415 int 416 dwqe_media_change(struct ifnet *ifp) 417 { 418 struct dwqe_softc *sc = ifp->if_softc; 419 420 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 421 mii_mediachg(&sc->sc_mii); 422 423 return (0); 424 } 425 426 void 427 dwqe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 428 { 429 struct dwqe_softc *sc = ifp->if_softc; 430 431 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 432 mii_pollstat(&sc->sc_mii); 433 ifmr->ifm_active = sc->sc_mii.mii_media_active; 434 ifmr->ifm_status = sc->sc_mii.mii_media_status; 435 } 436 } 437 438 int 439 dwqe_mii_readreg(struct device *self, int phy, int reg) 440 { 441 struct dwqe_softc *sc = (void *)self; 442 int n; 443 444 dwqe_write(sc, GMAC_MAC_MDIO_ADDR, 445 (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) | 446 (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) | 447 (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) | 448 GMAC_MAC_MDIO_ADDR_GOC_READ | 449 GMAC_MAC_MDIO_ADDR_GB); 450 451 for (n = 0; n < 2000; n++) { 452 delay(10); 453 if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0) 454 return dwqe_read(sc, GMAC_MAC_MDIO_DATA); 455 } 456 457 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 458 return (0); 459 } 460 461 void 462 dwqe_mii_writereg(struct device *self, int phy, int reg, int val) 463 { 464 struct dwqe_softc *sc = (void *)self; 465 int n; 466 467 dwqe_write(sc, GMAC_MAC_MDIO_DATA, val); 468 dwqe_write(sc, GMAC_MAC_MDIO_ADDR, 469 (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) | 470 (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) | 471 (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) | 472 GMAC_MAC_MDIO_ADDR_GOC_WRITE | 473 GMAC_MAC_MDIO_ADDR_GB); 474 475 for (n = 0; n < 2000; n++) { 476 delay(10); 477 if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0) 478 return; 479 } 480 481 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 482 } 483 484 void 485 dwqe_mii_statchg(struct device *self) 486 { 487 struct dwqe_softc *sc = (void *)self; 488 struct ifnet *ifp = &sc->sc_ac.ac_if; 489 uint32_t conf; 490 491 conf = dwqe_read(sc, GMAC_MAC_CONF); 492 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES); 493 494 switch (ifp->if_baudrate) { 495 case IF_Mbps(1000): 496 sc->sc_link = 1; 497 break; 498 case IF_Mbps(100): 499 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES; 500 sc->sc_link = 1; 501 break; 502 case IF_Mbps(10): 503 conf |= GMAC_MAC_CONF_PS; 504 sc->sc_link = 1; 505 break; 506 default: 507 sc->sc_link = 0; 508 return; 509 } 510 511 if (sc->sc_link == 0) 512 return; 513 514 conf &= ~GMAC_MAC_CONF_DM; 515 if (ifp->if_link_state == LINK_STATE_FULL_DUPLEX) 516 conf |= GMAC_MAC_CONF_DM; 517 518 dwqe_write(sc, GMAC_MAC_CONF, conf); 519 } 520 521 void 522 dwqe_tick(void *arg) 523 { 524 struct dwqe_softc *sc = arg; 525 int s; 526 527 s = splnet(); 528 mii_tick(&sc->sc_mii); 529 splx(s); 530 531 timeout_add_sec(&sc->sc_phy_tick, 1); 532 } 533 534 void 535 dwqe_rxtick(void *arg) 536 { 537 struct dwqe_softc *sc = arg; 538 int s; 539 540 s = splnet(); 541 542 /* TODO: disable RXQ? */ 543 printf("%s:%d\n", __func__, __LINE__); 544 545 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 546 0, DWQE_DMA_LEN(sc->sc_rxring), 547 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 548 549 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), 0); 550 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), 0); 551 552 sc->sc_rx_prod = sc->sc_rx_cons = 0; 553 dwqe_fill_rx_ring(sc); 554 555 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 556 0, DWQE_DMA_LEN(sc->sc_rxring), 557 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 558 559 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32); 560 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring)); 561 562 /* TODO: re-enable RXQ? */ 563 564 splx(s); 565 } 566 567 int 568 dwqe_intr(void *arg) 569 { 570 struct dwqe_softc *sc = arg; 571 uint32_t reg; 572 573 reg = dwqe_read(sc, GMAC_INT_STATUS); 574 dwqe_write(sc, GMAC_INT_STATUS, reg); 575 576 reg = dwqe_read(sc, GMAC_CHAN_STATUS(0)); 577 dwqe_write(sc, GMAC_CHAN_STATUS(0), reg); 578 579 if (reg & GMAC_CHAN_STATUS_RI) 580 dwqe_rx_proc(sc); 581 582 if (reg & GMAC_CHAN_STATUS_TI) 583 dwqe_tx_proc(sc); 584 585 return (1); 586 } 587 588 void 589 dwqe_tx_proc(struct dwqe_softc *sc) 590 { 591 struct ifnet *ifp = &sc->sc_ac.ac_if; 592 struct dwqe_desc *txd; 593 struct dwqe_buf *txb; 594 int idx, txfree; 595 596 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 0, 597 DWQE_DMA_LEN(sc->sc_txring), 598 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 599 600 txfree = 0; 601 while (sc->sc_tx_cons != sc->sc_tx_prod) { 602 idx = sc->sc_tx_cons; 603 KASSERT(idx < DWQE_NTXDESC); 604 605 txd = &sc->sc_txdesc[idx]; 606 if (txd->sd_tdes3 & TDES3_OWN) 607 break; 608 609 if (txd->sd_tdes3 & TDES3_ES) 610 ifp->if_oerrors++; 611 612 txb = &sc->sc_txbuf[idx]; 613 if (txb->tb_m) { 614 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 615 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 616 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 617 618 m_freem(txb->tb_m); 619 txb->tb_m = NULL; 620 } 621 622 txfree++; 623 624 if (sc->sc_tx_cons == (DWQE_NTXDESC - 1)) 625 sc->sc_tx_cons = 0; 626 else 627 sc->sc_tx_cons++; 628 629 txd->sd_tdes3 = 0; 630 } 631 632 if (sc->sc_tx_cons == sc->sc_tx_prod) 633 ifp->if_timer = 0; 634 635 if (txfree) { 636 if (ifq_is_oactive(&ifp->if_snd)) 637 ifq_restart(&ifp->if_snd); 638 } 639 } 640 641 void 642 dwqe_rx_proc(struct dwqe_softc *sc) 643 { 644 struct ifnet *ifp = &sc->sc_ac.ac_if; 645 struct dwqe_desc *rxd; 646 struct dwqe_buf *rxb; 647 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 648 struct mbuf *m; 649 int idx, len, cnt, put; 650 651 if ((ifp->if_flags & IFF_RUNNING) == 0) 652 return; 653 654 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0, 655 DWQE_DMA_LEN(sc->sc_rxring), 656 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 657 658 cnt = if_rxr_inuse(&sc->sc_rx_ring); 659 put = 0; 660 while (put < cnt) { 661 idx = sc->sc_rx_cons; 662 KASSERT(idx < DWQE_NRXDESC); 663 664 rxd = &sc->sc_rxdesc[idx]; 665 if (rxd->sd_tdes3 & RDES3_OWN) 666 break; 667 668 len = rxd->sd_tdes3 & RDES3_LENGTH; 669 rxb = &sc->sc_rxbuf[idx]; 670 KASSERT(rxb->tb_m); 671 672 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 673 len, BUS_DMASYNC_POSTREAD); 674 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 675 676 m = rxb->tb_m; 677 rxb->tb_m = NULL; 678 679 if (rxd->sd_tdes3 & RDES3_ES) { 680 ifp->if_ierrors++; 681 m_freem(m); 682 } else { 683 /* Strip off CRC. */ 684 len -= ETHER_CRC_LEN; 685 KASSERT(len > 0); 686 687 m->m_pkthdr.len = m->m_len = len; 688 689 ml_enqueue(&ml, m); 690 } 691 692 put++; 693 if (sc->sc_rx_cons == (DWQE_NRXDESC - 1)) 694 sc->sc_rx_cons = 0; 695 else 696 sc->sc_rx_cons++; 697 } 698 699 if_rxr_put(&sc->sc_rx_ring, put); 700 if (ifiq_input(&ifp->if_rcv, &ml)) 701 if_rxr_livelocked(&sc->sc_rx_ring); 702 703 dwqe_fill_rx_ring(sc); 704 705 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0, 706 DWQE_DMA_LEN(sc->sc_rxring), 707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 708 } 709 710 void 711 dwqe_up(struct dwqe_softc *sc) 712 { 713 struct ifnet *ifp = &sc->sc_ac.ac_if; 714 struct dwqe_buf *txb, *rxb; 715 uint32_t mode, reg, fifosz, tqs, rqs; 716 int i; 717 718 /* Allocate Tx descriptor ring. */ 719 sc->sc_txring = dwqe_dmamem_alloc(sc, 720 DWQE_NTXDESC * sizeof(struct dwqe_desc), 8); 721 sc->sc_txdesc = DWQE_DMA_KVA(sc->sc_txring); 722 723 sc->sc_txbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NTXDESC, 724 M_DEVBUF, M_WAITOK); 725 for (i = 0; i < DWQE_NTXDESC; i++) { 726 txb = &sc->sc_txbuf[i]; 727 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWQE_NTXSEGS, 728 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map); 729 txb->tb_m = NULL; 730 } 731 732 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 733 0, DWQE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE); 734 735 sc->sc_tx_prod = sc->sc_tx_cons = 0; 736 737 dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_txring) >> 32); 738 dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_txring)); 739 dwqe_write(sc, GMAC_CHAN_TX_RING_LEN(0), DWQE_NTXDESC - 1); 740 dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring)); 741 742 /* Allocate descriptor ring. */ 743 sc->sc_rxring = dwqe_dmamem_alloc(sc, 744 DWQE_NRXDESC * sizeof(struct dwqe_desc), 8); 745 sc->sc_rxdesc = DWQE_DMA_KVA(sc->sc_rxring); 746 747 sc->sc_rxbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NRXDESC, 748 M_DEVBUF, M_WAITOK); 749 750 for (i = 0; i < DWQE_NRXDESC; i++) { 751 rxb = &sc->sc_rxbuf[i]; 752 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 753 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map); 754 rxb->tb_m = NULL; 755 } 756 757 if_rxr_init(&sc->sc_rx_ring, 2, DWQE_NRXDESC); 758 759 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32); 760 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring)); 761 dwqe_write(sc, GMAC_CHAN_RX_RING_LEN(0), DWQE_NRXDESC - 1); 762 763 sc->sc_rx_prod = sc->sc_rx_cons = 0; 764 dwqe_fill_rx_ring(sc); 765 766 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 767 0, DWQE_DMA_LEN(sc->sc_rxring), 768 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 769 770 dwqe_lladdr_write(sc); 771 772 /* Configure media. */ 773 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 774 mii_mediachg(&sc->sc_mii); 775 776 /* Program promiscuous mode and multicast filters. */ 777 dwqe_iff(sc); 778 779 ifp->if_flags |= IFF_RUNNING; 780 ifq_clr_oactive(&ifp->if_snd); 781 782 dwqe_write(sc, GMAC_MAC_1US_TIC_CTR, (sc->sc_clkrate / 1000000) - 1); 783 784 /* Start receive DMA */ 785 reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0)); 786 reg |= GMAC_CHAN_RX_CONTROL_SR; 787 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg); 788 789 /* Start transmit DMA */ 790 reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0)); 791 reg |= GMAC_CHAN_TX_CONTROL_ST; 792 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg); 793 794 mode = dwqe_read(sc, GMAC_MTL_CHAN_RX_OP_MODE(0)); 795 if (sc->sc_force_thresh_dma_mode) { 796 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RSF; 797 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK; 798 mode |= GMAC_MTL_CHAN_RX_OP_MODE_RTC_128; 799 } else { 800 mode |= GMAC_MTL_CHAN_RX_OP_MODE_RSF; 801 } 802 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK; 803 if (sc->sc_rxfifo_size) 804 fifosz = sc->sc_rxfifo_size; 805 else 806 fifosz = (128 << 807 GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(sc->sc_hw_feature[1])); 808 rqs = fifosz / 256 - 1; 809 mode |= (rqs << GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT) & 810 GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK; 811 if (fifosz >= 4096) { 812 mode |= GMAC_MTL_CHAN_RX_OP_MODE_EHFC; 813 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFD_MASK; 814 mode |= 0x3 << GMAC_MTL_CHAN_RX_OP_MODE_RFD_SHIFT; 815 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFA_MASK; 816 mode |= 0x1 << GMAC_MTL_CHAN_RX_OP_MODE_RFA_SHIFT; 817 } 818 dwqe_write(sc, GMAC_MTL_CHAN_RX_OP_MODE(0), mode); 819 820 mode = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0)); 821 if (sc->sc_force_thresh_dma_mode) { 822 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF; 823 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK; 824 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_512; 825 } else { 826 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF; 827 } 828 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK; 829 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TXQEN; 830 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK; 831 if (sc->sc_txfifo_size) 832 fifosz = sc->sc_txfifo_size; 833 else 834 fifosz = (128 << 835 GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(sc->sc_hw_feature[1])); 836 tqs = (fifosz / 256) - 1; 837 mode |= (tqs << GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT) & 838 GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK; 839 dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), mode); 840 841 reg = dwqe_read(sc, GMAC_QX_TX_FLOW_CTRL(0)); 842 reg |= 0xffffU << GMAC_QX_TX_FLOW_CTRL_PT_SHIFT; 843 reg |= GMAC_QX_TX_FLOW_CTRL_TFE; 844 dwqe_write(sc, GMAC_QX_TX_FLOW_CTRL(0), reg); 845 reg = dwqe_read(sc, GMAC_RX_FLOW_CTRL); 846 reg |= GMAC_RX_FLOW_CTRL_RFE; 847 dwqe_write(sc, GMAC_RX_FLOW_CTRL, reg); 848 849 dwqe_write(sc, GMAC_RXQ_CTRL0, GMAC_RXQ_CTRL0_DCB_QUEUE_EN(0)); 850 851 dwqe_write(sc, GMAC_MAC_CONF, dwqe_read(sc, GMAC_MAC_CONF) | 852 GMAC_MAC_CONF_BE | GMAC_MAC_CONF_JD | GMAC_MAC_CONF_JE | 853 GMAC_MAC_CONF_DCRS | GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE); 854 855 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 856 GMAC_CHAN_INTR_ENA_NIE | 857 GMAC_CHAN_INTR_ENA_AIE | 858 GMAC_CHAN_INTR_ENA_FBE | 859 GMAC_CHAN_INTR_ENA_RIE | 860 GMAC_CHAN_INTR_ENA_TIE); 861 862 if (!sc->sc_fixed_link) 863 timeout_add_sec(&sc->sc_phy_tick, 1); 864 } 865 866 void 867 dwqe_down(struct dwqe_softc *sc) 868 { 869 struct ifnet *ifp = &sc->sc_ac.ac_if; 870 struct dwqe_buf *txb, *rxb; 871 uint32_t reg; 872 int i; 873 874 timeout_del(&sc->sc_rxto); 875 if (!sc->sc_fixed_link) 876 timeout_del(&sc->sc_phy_tick); 877 878 ifp->if_flags &= ~IFF_RUNNING; 879 ifq_clr_oactive(&ifp->if_snd); 880 ifp->if_timer = 0; 881 882 /* Disable receiver */ 883 reg = dwqe_read(sc, GMAC_MAC_CONF); 884 reg &= ~GMAC_MAC_CONF_RE; 885 dwqe_write(sc, GMAC_MAC_CONF, reg); 886 887 /* Stop receive DMA */ 888 reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0)); 889 reg &= ~GMAC_CHAN_RX_CONTROL_SR; 890 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg); 891 892 /* Stop transmit DMA */ 893 reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0)); 894 reg &= ~GMAC_CHAN_TX_CONTROL_ST; 895 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg); 896 897 /* Flush data in the TX FIFO */ 898 reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0)); 899 reg |= GMAC_MTL_CHAN_TX_OP_MODE_FTQ; 900 dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), reg); 901 /* Wait for flush to complete */ 902 for (i = 10000; i > 0; i--) { 903 reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0)); 904 if ((reg & GMAC_MTL_CHAN_TX_OP_MODE_FTQ) == 0) 905 break; 906 delay(1); 907 } 908 if (i == 0) { 909 printf("%s: timeout flushing TX queue\n", 910 sc->sc_dev.dv_xname); 911 } 912 913 /* Disable transmitter */ 914 reg = dwqe_read(sc, GMAC_MAC_CONF); 915 reg &= ~GMAC_MAC_CONF_TE; 916 dwqe_write(sc, GMAC_MAC_CONF, reg); 917 918 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0); 919 920 intr_barrier(sc->sc_ih); 921 ifq_barrier(&ifp->if_snd); 922 923 for (i = 0; i < DWQE_NTXDESC; i++) { 924 txb = &sc->sc_txbuf[i]; 925 if (txb->tb_m) { 926 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 927 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 928 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 929 m_freem(txb->tb_m); 930 } 931 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map); 932 } 933 934 dwqe_dmamem_free(sc, sc->sc_txring); 935 free(sc->sc_txbuf, M_DEVBUF, 0); 936 937 for (i = 0; i < DWQE_NRXDESC; i++) { 938 rxb = &sc->sc_rxbuf[i]; 939 if (rxb->tb_m) { 940 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 941 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 942 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 943 m_freem(rxb->tb_m); 944 } 945 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map); 946 } 947 948 dwqe_dmamem_free(sc, sc->sc_rxring); 949 free(sc->sc_rxbuf, M_DEVBUF, 0); 950 } 951 952 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 953 static uint32_t 954 bitrev32(uint32_t x) 955 { 956 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 957 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 958 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 959 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 960 961 return (x >> 16) | (x << 16); 962 } 963 964 void 965 dwqe_iff(struct dwqe_softc *sc) 966 { 967 struct arpcom *ac = &sc->sc_ac; 968 struct ifnet *ifp = &sc->sc_ac.ac_if; 969 struct ether_multi *enm; 970 struct ether_multistep step; 971 uint32_t crc, hash[2], hashbit, hashreg; 972 uint32_t reg; 973 974 reg = 0; 975 976 ifp->if_flags &= ~IFF_ALLMULTI; 977 bzero(hash, sizeof(hash)); 978 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 979 ifp->if_flags |= IFF_ALLMULTI; 980 reg |= GMAC_MAC_PACKET_FILTER_PM; 981 if (ifp->if_flags & IFF_PROMISC) 982 reg |= GMAC_MAC_PACKET_FILTER_PR | 983 GMAC_MAC_PACKET_FILTER_PCF_ALL; 984 } else { 985 reg |= GMAC_MAC_PACKET_FILTER_HMC; 986 ETHER_FIRST_MULTI(step, ac, enm); 987 while (enm != NULL) { 988 crc = ether_crc32_le(enm->enm_addrlo, 989 ETHER_ADDR_LEN) & 0x7f; 990 991 crc = bitrev32(~crc) >> 26; 992 hashreg = (crc >> 5); 993 hashbit = (crc & 0x1f); 994 hash[hashreg] |= (1 << hashbit); 995 996 ETHER_NEXT_MULTI(step, enm); 997 } 998 } 999 1000 dwqe_lladdr_write(sc); 1001 1002 dwqe_write(sc, GMAC_MAC_HASH_TAB_REG0, hash[0]); 1003 dwqe_write(sc, GMAC_MAC_HASH_TAB_REG1, hash[1]); 1004 1005 dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg); 1006 } 1007 1008 int 1009 dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used) 1010 { 1011 struct dwqe_desc *txd, *txd_start; 1012 bus_dmamap_t map; 1013 int cur, frag, i; 1014 1015 cur = frag = *idx; 1016 map = sc->sc_txbuf[cur].tb_map; 1017 1018 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1019 if (m_defrag(m, M_DONTWAIT)) 1020 return (EFBIG); 1021 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) 1022 return (EFBIG); 1023 } 1024 1025 /* Sync the DMA map. */ 1026 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1027 BUS_DMASYNC_PREWRITE); 1028 1029 txd = txd_start = &sc->sc_txdesc[frag]; 1030 for (i = 0; i < map->dm_nsegs; i++) { 1031 /* TODO: check for 32-bit vs 64-bit support */ 1032 KASSERT((map->dm_segs[i].ds_addr >> 32) == 0); 1033 1034 txd->sd_tdes0 = (uint32_t)map->dm_segs[i].ds_addr; 1035 txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32); 1036 txd->sd_tdes2 = map->dm_segs[i].ds_len; 1037 txd->sd_tdes3 = m->m_pkthdr.len; 1038 if (i == 0) 1039 txd->sd_tdes3 |= TDES3_FS; 1040 if (i == (map->dm_nsegs - 1)) { 1041 txd->sd_tdes2 |= TDES2_IC; 1042 txd->sd_tdes3 |= TDES3_LS; 1043 } 1044 if (i != 0) 1045 txd->sd_tdes3 |= TDES3_OWN; 1046 1047 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 1048 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1049 1050 cur = frag; 1051 if (frag == (DWQE_NTXDESC - 1)) { 1052 txd = &sc->sc_txdesc[0]; 1053 frag = 0; 1054 } else { 1055 txd++; 1056 frag++; 1057 } 1058 KASSERT(frag != sc->sc_tx_cons); 1059 } 1060 1061 txd_start->sd_tdes3 |= TDES3_OWN; 1062 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 1063 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1064 1065 KASSERT(sc->sc_txbuf[cur].tb_m == NULL); 1066 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map; 1067 sc->sc_txbuf[cur].tb_map = map; 1068 sc->sc_txbuf[cur].tb_m = m; 1069 1070 *idx = frag; 1071 *used += map->dm_nsegs; 1072 1073 return (0); 1074 } 1075 1076 void 1077 dwqe_reset(struct dwqe_softc *sc) 1078 { 1079 int n; 1080 1081 dwqe_write(sc, GMAC_BUS_MODE, dwqe_read(sc, GMAC_BUS_MODE) | 1082 GMAC_BUS_MODE_SWR); 1083 1084 for (n = 0; n < 30000; n++) { 1085 if ((dwqe_read(sc, GMAC_BUS_MODE) & 1086 GMAC_BUS_MODE_SWR) == 0) 1087 return; 1088 delay(10); 1089 } 1090 1091 printf("%s: reset timeout\n", sc->sc_dev.dv_xname); 1092 } 1093 1094 struct dwqe_dmamem * 1095 dwqe_dmamem_alloc(struct dwqe_softc *sc, bus_size_t size, bus_size_t align) 1096 { 1097 struct dwqe_dmamem *tdm; 1098 int nsegs; 1099 1100 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO); 1101 tdm->tdm_size = size; 1102 1103 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1104 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1105 goto tdmfree; 1106 1107 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1, 1108 &nsegs, BUS_DMA_WAITOK) != 0) 1109 goto destroy; 1110 1111 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size, 1112 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1113 goto free; 1114 1115 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size, 1116 NULL, BUS_DMA_WAITOK) != 0) 1117 goto unmap; 1118 1119 bzero(tdm->tdm_kva, size); 1120 1121 return (tdm); 1122 1123 unmap: 1124 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size); 1125 free: 1126 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1127 destroy: 1128 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1129 tdmfree: 1130 free(tdm, M_DEVBUF, 0); 1131 1132 return (NULL); 1133 } 1134 1135 void 1136 dwqe_dmamem_free(struct dwqe_softc *sc, struct dwqe_dmamem *tdm) 1137 { 1138 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size); 1139 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1140 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1141 free(tdm, M_DEVBUF, 0); 1142 } 1143 1144 struct mbuf * 1145 dwqe_alloc_mbuf(struct dwqe_softc *sc, bus_dmamap_t map) 1146 { 1147 struct mbuf *m = NULL; 1148 1149 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1150 if (!m) 1151 return (NULL); 1152 m->m_len = m->m_pkthdr.len = MCLBYTES; 1153 m_adj(m, ETHER_ALIGN); 1154 1155 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1156 printf("%s: could not load mbuf DMA map", DEVNAME(sc)); 1157 m_freem(m); 1158 return (NULL); 1159 } 1160 1161 bus_dmamap_sync(sc->sc_dmat, map, 0, 1162 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1163 1164 return (m); 1165 } 1166 1167 void 1168 dwqe_fill_rx_ring(struct dwqe_softc *sc) 1169 { 1170 struct dwqe_desc *rxd; 1171 struct dwqe_buf *rxb; 1172 u_int slots; 1173 1174 for (slots = if_rxr_get(&sc->sc_rx_ring, DWQE_NRXDESC); 1175 slots > 0; slots--) { 1176 rxb = &sc->sc_rxbuf[sc->sc_rx_prod]; 1177 rxb->tb_m = dwqe_alloc_mbuf(sc, rxb->tb_map); 1178 if (rxb->tb_m == NULL) 1179 break; 1180 1181 /* TODO: check for 32-bit vs 64-bit support */ 1182 KASSERT((rxb->tb_map->dm_segs[0].ds_addr >> 32) == 0); 1183 1184 rxd = &sc->sc_rxdesc[sc->sc_rx_prod]; 1185 rxd->sd_tdes0 = (uint32_t)rxb->tb_map->dm_segs[0].ds_addr; 1186 rxd->sd_tdes1 = (uint32_t)(rxb->tb_map->dm_segs[0].ds_addr >> 32); 1187 rxd->sd_tdes2 = 0; 1188 rxd->sd_tdes3 = RDES3_OWN | RDES3_IC | RDES3_BUF1V; 1189 1190 if (sc->sc_rx_prod == (DWQE_NRXDESC - 1)) 1191 sc->sc_rx_prod = 0; 1192 else 1193 sc->sc_rx_prod++; 1194 } 1195 if_rxr_put(&sc->sc_rx_ring, slots); 1196 1197 dwqe_write(sc, GMAC_CHAN_RX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring) + 1198 sc->sc_rx_prod * sizeof(*rxd)); 1199 1200 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 1201 timeout_add(&sc->sc_rxto, 1); 1202 } 1203