1 /* $OpenBSD: if_dwge.c,v 1.21 2023/11/20 20:41:18 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org> 4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Synopsys Designware ethernet controller. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/device.h> 29 #include <sys/kernel.h> 30 #include <sys/malloc.h> 31 #include <sys/mbuf.h> 32 #include <sys/queue.h> 33 #include <sys/socket.h> 34 #include <sys/sockio.h> 35 #include <sys/timeout.h> 36 37 #include <machine/bus.h> 38 #include <machine/fdt.h> 39 40 #include <net/if.h> 41 #include <net/if_media.h> 42 43 #include <dev/ofw/openfirm.h> 44 #include <dev/ofw/ofw_clock.h> 45 #include <dev/ofw/ofw_gpio.h> 46 #include <dev/ofw/ofw_misc.h> 47 #include <dev/ofw/ofw_pinctrl.h> 48 #include <dev/ofw/ofw_regulator.h> 49 #include <dev/ofw/fdt.h> 50 51 #include <dev/mii/mii.h> 52 #include <dev/mii/miivar.h> 53 54 #if NBPFILTER > 0 55 #include <net/bpf.h> 56 #endif 57 58 #if NKSTAT > 0 59 #include <sys/kstat.h> 60 #endif 61 62 #include <netinet/in.h> 63 #include <netinet/if_ether.h> 64 65 /* Registers */ 66 67 #define GMAC_MAC_CONF 0x0000 68 #define GMAC_MAC_CONF_JD (1 << 22) 69 #define GMAC_MAC_CONF_BE (1 << 21) 70 #define GMAC_MAC_CONF_DCRS (1 << 16) 71 #define GMAC_MAC_CONF_PS (1 << 15) 72 #define GMAC_MAC_CONF_FES (1 << 14) 73 #define GMAC_MAC_CONF_LM (1 << 12) 74 #define GMAC_MAC_CONF_DM (1 << 11) 75 #define GMAC_MAC_CONF_TE (1 << 3) 76 #define GMAC_MAC_CONF_RE (1 << 2) 77 #define GMAC_MAC_FRM_FILT 0x0004 78 #define GMAC_MAC_FRM_FILT_PM (1 << 4) 79 #define GMAC_MAC_FRM_FILT_HMC (1 << 2) 80 #define GMAC_MAC_FRM_FILT_PR (1 << 0) 81 #define GMAC_HASH_TAB_HI 0x0008 82 #define GMAC_HASH_TAB_LO 0x000c 83 #define GMAC_GMII_ADDR 0x0010 84 #define GMAC_GMII_ADDR_PA_SHIFT 11 85 #define GMAC_GMII_ADDR_GR_SHIFT 6 86 #define GMAC_GMII_ADDR_CR_SHIFT 2 87 #define GMAC_GMII_ADDR_CR_MASK 0xf 88 #define GMAC_GMII_ADDR_CR_DIV_42 0 89 #define GMAC_GMII_ADDR_CR_DIV_62 1 90 #define GMAC_GMII_ADDR_CR_DIV_16 2 91 #define GMAC_GMII_ADDR_CR_DIV_26 3 92 #define GMAC_GMII_ADDR_CR_DIV_102 4 93 #define GMAC_GMII_ADDR_CR_DIV_124 5 94 #define GMAC_GMII_ADDR_GW (1 << 1) 95 #define GMAC_GMII_ADDR_GB (1 << 0) 96 #define GMAC_GMII_DATA 0x0014 97 #define GMAC_VERSION 0x0020 98 #define GMAC_VERSION_SNPS_MASK 0xff 99 #define GMAC_INT_MASK 0x003c 100 #define GMAC_INT_MASK_LPIIM (1 << 10) 101 #define GMAC_INT_MASK_PIM (1 << 3) 102 #define GMAC_INT_MASK_RIM (1 << 0) 103 #define GMAC_MAC_ADDR0_HI 0x0040 104 #define GMAC_MAC_ADDR0_LO 0x0044 105 #define GMAC_MAC_MMC_CTRL 0x0100 106 #define GMAC_MAC_MMC_CTRL_ROR (1 << 2) 107 #define GMAC_MAC_MMC_CTRL_CR (1 << 0) 108 #define GMAC_MMC_RX_INT_MSK 0x010c 109 #define GMAC_MMC_TX_INT_MSK 0x0110 110 #define GMAC_MMC_TXOCTETCNT_GB 0x0114 111 #define GMAC_MMC_TXFRMCNT_GB 0x0118 112 #define GMAC_MMC_TXUNDFLWERR 0x0148 113 #define GMAC_MMC_TXCARERR 0x0160 114 #define GMAC_MMC_TXOCTETCNT_G 0x0164 115 #define GMAC_MMC_TXFRMCNT_G 0x0168 116 #define GMAC_MMC_RXFRMCNT_GB 0x0180 117 #define GMAC_MMC_RXOCTETCNT_GB 0x0184 118 #define GMAC_MMC_RXOCTETCNT_G 0x0188 119 #define GMAC_MMC_RXMCFRMCNT_G 0x0190 120 #define GMAC_MMC_RXCRCERR 0x0194 121 #define GMAC_MMC_RXLENERR 0x01c8 122 #define GMAC_MMC_RXFIFOOVRFLW 0x01d4 123 #define GMAC_MMC_IPC_INT_MSK 0x0200 124 #define GMAC_BUS_MODE 0x1000 125 #define GMAC_BUS_MODE_8XPBL (1 << 24) 126 #define GMAC_BUS_MODE_USP (1 << 23) 127 #define GMAC_BUS_MODE_RPBL_MASK (0x3f << 17) 128 #define GMAC_BUS_MODE_RPBL_SHIFT 17 129 #define GMAC_BUS_MODE_FB (1 << 16) 130 #define GMAC_BUS_MODE_PBL_MASK (0x3f << 8) 131 #define GMAC_BUS_MODE_PBL_SHIFT 8 132 #define GMAC_BUS_MODE_SWR (1 << 0) 133 #define GMAC_TX_POLL_DEMAND 0x1004 134 #define GMAC_RX_DESC_LIST_ADDR 0x100c 135 #define GMAC_TX_DESC_LIST_ADDR 0x1010 136 #define GMAC_STATUS 0x1014 137 #define GMAC_STATUS_MMC (1 << 27) 138 #define GMAC_STATUS_RI (1 << 6) 139 #define GMAC_STATUS_TU (1 << 2) 140 #define GMAC_STATUS_TI (1 << 0) 141 #define GMAC_OP_MODE 0x1018 142 #define GMAC_OP_MODE_RSF (1 << 25) 143 #define GMAC_OP_MODE_TSF (1 << 21) 144 #define GMAC_OP_MODE_FTF (1 << 20) 145 #define GMAC_OP_MODE_TTC_MASK (0x7 << 14) 146 #define GMAC_OP_MODE_TTC_64 (0x0 << 14) 147 #define GMAC_OP_MODE_TTC_128 (0x1 << 14) 148 #define GMAC_OP_MODE_ST (1 << 13) 149 #define GMAC_OP_MODE_RTC_MASK (0x3 << 3) 150 #define GMAC_OP_MODE_RTC_64 (0x0 << 3) 151 #define GMAC_OP_MODE_RTC_128 (0x3 << 3) 152 #define GMAC_OP_MODE_OSF (1 << 2) 153 #define GMAC_OP_MODE_SR (1 << 1) 154 #define GMAC_INT_ENA 0x101c 155 #define GMAC_INT_ENA_NIE (1 << 16) 156 #define GMAC_INT_ENA_RIE (1 << 6) 157 #define GMAC_INT_ENA_TUE (1 << 2) 158 #define GMAC_INT_ENA_TIE (1 << 0) 159 #define GMAC_AXI_BUS_MODE 0x1028 160 #define GMAC_AXI_BUS_MODE_WR_OSR_LMT_MASK (0xf << 20) 161 #define GMAC_AXI_BUS_MODE_WR_OSR_LMT_SHIFT 20 162 #define GMAC_AXI_BUS_MODE_RD_OSR_LMT_MASK (0xf << 16) 163 #define GMAC_AXI_BUS_MODE_RD_OSR_LMT_SHIFT 16 164 #define GMAC_AXI_BUS_MODE_BLEN_256 (1 << 7) 165 #define GMAC_AXI_BUS_MODE_BLEN_128 (1 << 6) 166 #define GMAC_AXI_BUS_MODE_BLEN_64 (1 << 5) 167 #define GMAC_AXI_BUS_MODE_BLEN_32 (1 << 4) 168 #define GMAC_AXI_BUS_MODE_BLEN_16 (1 << 3) 169 #define GMAC_AXI_BUS_MODE_BLEN_8 (1 << 2) 170 #define GMAC_AXI_BUS_MODE_BLEN_4 (1 << 1) 171 #define GMAC_HW_FEATURE 0x1058 172 #define GMAC_HW_FEATURE_ENHDESSEL (1 << 24) 173 174 /* 175 * DWGE descriptors. 176 */ 177 178 struct dwge_desc { 179 uint32_t sd_status; 180 uint32_t sd_len; 181 uint32_t sd_addr; 182 uint32_t sd_next; 183 }; 184 185 /* Tx status bits. */ 186 #define TDES0_DB (1 << 0) 187 #define TDES0_UF (1 << 1) 188 #define TDES0_ED (1 << 2) 189 #define TDES0_CC_MASK (0xf << 3) 190 #define TDES0_CC_SHIFT 3 191 #define TDES0_EC (1 << 8) 192 #define TDES0_LC (1 << 9) 193 #define TDES0_NC (1 << 10) 194 #define TDES0_PCE (1 << 12) 195 #define TDES0_JT (1 << 14) 196 #define TDES0_IHE (1 << 16) 197 #define TDES0_OWN (1U << 31) 198 199 #define ETDES0_TCH (1 << 20) 200 #define ETDES0_FS (1 << 28) 201 #define ETDES0_LS (1 << 29) 202 #define ETDES0_IC (1 << 30) 203 204 /* Rx status bits */ 205 #define RDES0_PE (1 << 0) 206 #define RDES0_CE (1 << 1) 207 #define RDES0_RE (1 << 3) 208 #define RDES0_RWT (1 << 4) 209 #define RDES0_FT (1 << 5) 210 #define RDES0_LC (1 << 6) 211 #define RDES0_IPC (1 << 7) 212 #define RDES0_LS (1 << 8) 213 #define RDES0_FS (1 << 9) 214 #define RDES0_OE (1 << 11) 215 #define RDES0_SAF (1 << 13) 216 #define RDES0_DE (1 << 14) 217 #define RDES0_ES (1 << 15) 218 #define RDES0_FL_MASK 0x3fff 219 #define RDES0_FL_SHIFT 16 220 #define RDES0_AFM (1 << 30) 221 #define RDES0_OWN (1U << 31) 222 223 /* Tx size bits */ 224 #define TDES1_TBS1 (0xfff << 0) 225 #define TDES1_TCH (1 << 24) 226 #define TDES1_DC (1 << 26) 227 #define TDES1_CIC_MASK (0x3 << 27) 228 #define TDES1_CIC_IP (1 << 27) 229 #define TDES1_CIC_NO_PSE (2 << 27) 230 #define TDES1_CIC_FULL (3 << 27) 231 #define TDES1_FS (1 << 29) 232 #define TDES1_LS (1 << 30) 233 #define TDES1_IC (1U << 31) 234 235 /* Rx size bits */ 236 #define RDES1_RBS1 (0xfff << 0) 237 #define RDES1_RCH (1 << 24) 238 #define RDES1_DIC (1U << 31) 239 240 #define ERDES1_RCH (1 << 14) 241 242 struct dwge_buf { 243 bus_dmamap_t tb_map; 244 struct mbuf *tb_m; 245 }; 246 247 #define DWGE_NTXDESC 512 248 #define DWGE_NTXSEGS 16 249 250 #define DWGE_NRXDESC 512 251 252 struct dwge_dmamem { 253 bus_dmamap_t tdm_map; 254 bus_dma_segment_t tdm_seg; 255 size_t tdm_size; 256 caddr_t tdm_kva; 257 }; 258 #define DWGE_DMA_MAP(_tdm) ((_tdm)->tdm_map) 259 #define DWGE_DMA_LEN(_tdm) ((_tdm)->tdm_size) 260 #define DWGE_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr) 261 #define DWGE_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva) 262 263 struct dwge_softc { 264 struct device sc_dev; 265 int sc_node; 266 bus_space_tag_t sc_iot; 267 bus_space_handle_t sc_ioh; 268 bus_dma_tag_t sc_dmat; 269 void *sc_ih; 270 271 struct if_device sc_ifd; 272 273 struct arpcom sc_ac; 274 #define sc_lladdr sc_ac.ac_enaddr 275 struct mii_data sc_mii; 276 #define sc_media sc_mii.mii_media 277 uint64_t sc_fixed_media; 278 int sc_link; 279 int sc_phyloc; 280 int sc_force_thresh_dma_mode; 281 int sc_enh_desc; 282 int sc_defrag; 283 284 struct dwge_dmamem *sc_txring; 285 struct dwge_buf *sc_txbuf; 286 struct dwge_desc *sc_txdesc; 287 int sc_tx_prod; 288 int sc_tx_cons; 289 290 struct dwge_dmamem *sc_rxring; 291 struct dwge_buf *sc_rxbuf; 292 struct dwge_desc *sc_rxdesc; 293 int sc_rx_prod; 294 struct if_rxring sc_rx_ring; 295 int sc_rx_cons; 296 297 struct timeout sc_tick; 298 struct timeout sc_rxto; 299 300 uint32_t sc_clk; 301 302 bus_size_t sc_clk_sel; 303 uint32_t sc_clk_sel_125; 304 uint32_t sc_clk_sel_25; 305 uint32_t sc_clk_sel_2_5; 306 307 #if NKSTAT > 0 308 struct mutex sc_kstat_mtx; 309 struct kstat *sc_kstat; 310 #endif 311 }; 312 313 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 314 315 int dwge_match(struct device *, void *, void *); 316 void dwge_attach(struct device *, struct device *, void *); 317 void dwge_setup_allwinner(struct dwge_softc *); 318 void dwge_setup_rockchip(struct dwge_softc *); 319 320 const struct cfattach dwge_ca = { 321 sizeof(struct dwge_softc), dwge_match, dwge_attach 322 }; 323 324 struct cfdriver dwge_cd = { 325 NULL, "dwge", DV_IFNET 326 }; 327 328 void dwge_reset_phy(struct dwge_softc *); 329 330 uint32_t dwge_read(struct dwge_softc *, bus_addr_t); 331 void dwge_write(struct dwge_softc *, bus_addr_t, uint32_t); 332 333 int dwge_ioctl(struct ifnet *, u_long, caddr_t); 334 void dwge_start(struct ifqueue *); 335 void dwge_watchdog(struct ifnet *); 336 337 int dwge_media_change(struct ifnet *); 338 void dwge_media_status(struct ifnet *, struct ifmediareq *); 339 340 int dwge_mii_readreg(struct device *, int, int); 341 void dwge_mii_writereg(struct device *, int, int, int); 342 void dwge_mii_statchg(struct device *); 343 344 void dwge_lladdr_read(struct dwge_softc *, uint8_t *); 345 void dwge_lladdr_write(struct dwge_softc *); 346 347 void dwge_tick(void *); 348 void dwge_rxtick(void *); 349 350 int dwge_intr(void *); 351 void dwge_tx_proc(struct dwge_softc *); 352 void dwge_rx_proc(struct dwge_softc *); 353 354 void dwge_up(struct dwge_softc *); 355 void dwge_down(struct dwge_softc *); 356 void dwge_iff(struct dwge_softc *); 357 int dwge_encap(struct dwge_softc *, struct mbuf *, int *, int *); 358 359 void dwge_reset(struct dwge_softc *); 360 void dwge_stop_dma(struct dwge_softc *); 361 362 struct dwge_dmamem * 363 dwge_dmamem_alloc(struct dwge_softc *, bus_size_t, bus_size_t); 364 void dwge_dmamem_free(struct dwge_softc *, struct dwge_dmamem *); 365 struct mbuf *dwge_alloc_mbuf(struct dwge_softc *, bus_dmamap_t); 366 void dwge_fill_rx_ring(struct dwge_softc *); 367 368 #if NKSTAT > 0 369 int dwge_kstat_read(struct kstat *); 370 void dwge_kstat_attach(struct dwge_softc *); 371 #endif 372 373 int 374 dwge_match(struct device *parent, void *cfdata, void *aux) 375 { 376 struct fdt_attach_args *faa = aux; 377 378 return (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac") || 379 OF_is_compatible(faa->fa_node, "amlogic,meson-axg-dwmac") || 380 OF_is_compatible(faa->fa_node, "amlogic,meson-g12a-dwmac") || 381 OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") || 382 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") || 383 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") || 384 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac") || 385 OF_is_compatible(faa->fa_node, "snps,dwmac")); 386 } 387 388 void 389 dwge_attach(struct device *parent, struct device *self, void *aux) 390 { 391 struct dwge_softc *sc = (void *)self; 392 struct fdt_attach_args *faa = aux; 393 struct ifnet *ifp = &sc->sc_ac.ac_if; 394 uint32_t phy, phy_supply; 395 uint32_t axi_config; 396 uint32_t mode, pbl; 397 uint32_t version; 398 uint32_t feature; 399 int node; 400 401 sc->sc_node = faa->fa_node; 402 sc->sc_iot = faa->fa_iot; 403 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 404 faa->fa_reg[0].size, 0, &sc->sc_ioh)) { 405 printf("%s: cannot map registers\n", self->dv_xname); 406 return; 407 } 408 sc->sc_dmat = faa->fa_dmat; 409 410 /* Lookup PHY. */ 411 phy = OF_getpropint(faa->fa_node, "phy", 0); 412 if (phy == 0) 413 phy = OF_getpropint(faa->fa_node, "phy-handle", 0); 414 node = OF_getnodebyphandle(phy); 415 if (node) 416 sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY); 417 else 418 sc->sc_phyloc = MII_PHY_ANY; 419 420 pinctrl_byname(faa->fa_node, "default"); 421 422 /* Enable clocks. */ 423 clock_set_assigned(faa->fa_node); 424 clock_enable(faa->fa_node, "stmmaceth"); 425 reset_deassert(faa->fa_node, "stmmaceth"); 426 if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") || 427 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") || 428 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") || 429 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")) { 430 clock_enable(faa->fa_node, "mac_clk_rx"); 431 clock_enable(faa->fa_node, "mac_clk_tx"); 432 clock_enable(faa->fa_node, "aclk_mac"); 433 clock_enable(faa->fa_node, "pclk_mac"); 434 } 435 delay(5000); 436 437 version = dwge_read(sc, GMAC_VERSION); 438 printf(": rev 0x%02x", version & GMAC_VERSION_SNPS_MASK); 439 440 if ((version & GMAC_VERSION_SNPS_MASK) > 0x35) { 441 feature = dwge_read(sc, GMAC_HW_FEATURE); 442 if (feature & GMAC_HW_FEATURE_ENHDESSEL) 443 sc->sc_enh_desc = 1; 444 } 445 446 /* 447 * The GMAC on the StarFive JH7100 (core version 3.70) 448 * sometimes transmits corrupted packets. The exact 449 * conditions under which this happens are unclear, but 450 * defragmenting mbufs before transmitting them fixes the 451 * issue. 452 */ 453 if (OF_is_compatible(faa->fa_node, "starfive,jh7100-gmac")) 454 sc->sc_defrag = 1; 455 456 /* Power up PHY. */ 457 phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0); 458 if (phy_supply) 459 regulator_enable(phy_supply); 460 461 /* Reset PHY */ 462 dwge_reset_phy(sc); 463 464 node = OF_getnodebyname(faa->fa_node, "fixed-link"); 465 if (node) { 466 ifp->if_baudrate = IF_Mbps(OF_getpropint(node, "speed", 0)); 467 468 switch (OF_getpropint(node, "speed", 0)) { 469 case 1000: 470 sc->sc_fixed_media = IFM_ETHER | IFM_1000_T; 471 break; 472 case 100: 473 sc->sc_fixed_media = IFM_ETHER | IFM_100_TX; 474 break; 475 default: 476 sc->sc_fixed_media = IFM_ETHER | IFM_AUTO; 477 break; 478 } 479 480 if (OF_getpropbool(node, "full-duplex")) { 481 ifp->if_link_state = LINK_STATE_FULL_DUPLEX; 482 sc->sc_fixed_media |= IFM_FDX; 483 } else { 484 ifp->if_link_state = LINK_STATE_UP; 485 } 486 } 487 488 sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth"); 489 if (sc->sc_clk > 250000000) 490 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_124; 491 else if (sc->sc_clk > 150000000) 492 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_102; 493 else if (sc->sc_clk > 100000000) 494 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_62; 495 else if (sc->sc_clk > 60000000) 496 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_42; 497 else if (sc->sc_clk > 35000000) 498 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26; 499 else 500 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_16; 501 502 if (OF_getprop(faa->fa_node, "local-mac-address", 503 &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN) 504 dwge_lladdr_read(sc, sc->sc_lladdr); 505 printf(", address %s\n", ether_sprintf(sc->sc_lladdr)); 506 507 timeout_set(&sc->sc_tick, dwge_tick, sc); 508 timeout_set(&sc->sc_rxto, dwge_rxtick, sc); 509 510 ifp->if_softc = sc; 511 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 512 ifp->if_xflags = IFXF_MPSAFE; 513 ifp->if_ioctl = dwge_ioctl; 514 ifp->if_qstart = dwge_start; 515 ifp->if_watchdog = dwge_watchdog; 516 ifq_init_maxlen(&ifp->if_snd, DWGE_NTXDESC - 1); 517 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 518 519 ifp->if_capabilities = IFCAP_VLAN_MTU; 520 521 sc->sc_mii.mii_ifp = ifp; 522 sc->sc_mii.mii_readreg = dwge_mii_readreg; 523 sc->sc_mii.mii_writereg = dwge_mii_writereg; 524 sc->sc_mii.mii_statchg = dwge_mii_statchg; 525 526 ifmedia_init(&sc->sc_media, 0, dwge_media_change, dwge_media_status); 527 528 /* Do hardware specific initializations. */ 529 if (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac")) 530 dwge_setup_allwinner(sc); 531 if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") || 532 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") || 533 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") || 534 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")) 535 dwge_setup_rockchip(sc); 536 537 if (OF_getpropbool(faa->fa_node, "snps,force_thresh_dma_mode")) 538 sc->sc_force_thresh_dma_mode = 1; 539 540 dwge_reset(sc); 541 542 /* Configure MAC. */ 543 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 544 GMAC_MAC_CONF_JD | GMAC_MAC_CONF_BE | GMAC_MAC_CONF_DCRS); 545 546 /* Configure DMA engine. */ 547 mode = dwge_read(sc, GMAC_BUS_MODE); 548 mode |= GMAC_BUS_MODE_USP; 549 if (!OF_getpropbool(faa->fa_node, "snps,no-pbl-x8")) 550 mode |= GMAC_BUS_MODE_8XPBL; 551 mode &= ~(GMAC_BUS_MODE_RPBL_MASK | GMAC_BUS_MODE_PBL_MASK); 552 pbl = OF_getpropint(faa->fa_node, "snps,pbl", 8); 553 mode |= pbl << GMAC_BUS_MODE_RPBL_SHIFT; 554 mode |= pbl << GMAC_BUS_MODE_PBL_SHIFT; 555 if (OF_getpropbool(faa->fa_node, "snps,fixed-burst")) 556 mode |= GMAC_BUS_MODE_FB; 557 dwge_write(sc, GMAC_BUS_MODE, mode); 558 559 /* Configure AXI master. */ 560 axi_config = OF_getpropint(faa->fa_node, "snps,axi-config", 0); 561 node = OF_getnodebyphandle(axi_config); 562 if (node) { 563 uint32_t blen[7] = { 0 }; 564 uint32_t osr_lmt; 565 int i; 566 567 mode = dwge_read(sc, GMAC_AXI_BUS_MODE); 568 569 osr_lmt = OF_getpropint(node, "snps,wr_osr_lmt", 1); 570 mode &= ~GMAC_AXI_BUS_MODE_WR_OSR_LMT_MASK; 571 mode |= (osr_lmt << GMAC_AXI_BUS_MODE_WR_OSR_LMT_SHIFT); 572 osr_lmt = OF_getpropint(node, "snps,rd_osr_lmt", 1); 573 mode &= ~GMAC_AXI_BUS_MODE_RD_OSR_LMT_MASK; 574 mode |= (osr_lmt << GMAC_AXI_BUS_MODE_RD_OSR_LMT_SHIFT); 575 576 OF_getpropintarray(node, "snps,blen", blen, sizeof(blen)); 577 for (i = 0; i < nitems(blen); i++) { 578 switch (blen[i]) { 579 case 256: 580 mode |= GMAC_AXI_BUS_MODE_BLEN_256; 581 break; 582 case 128: 583 mode |= GMAC_AXI_BUS_MODE_BLEN_128; 584 break; 585 case 64: 586 mode |= GMAC_AXI_BUS_MODE_BLEN_64; 587 break; 588 case 32: 589 mode |= GMAC_AXI_BUS_MODE_BLEN_32; 590 break; 591 case 16: 592 mode |= GMAC_AXI_BUS_MODE_BLEN_16; 593 break; 594 case 8: 595 mode |= GMAC_AXI_BUS_MODE_BLEN_8; 596 break; 597 case 4: 598 mode |= GMAC_AXI_BUS_MODE_BLEN_4; 599 break; 600 } 601 } 602 603 dwge_write(sc, GMAC_AXI_BUS_MODE, mode); 604 } 605 606 if (sc->sc_fixed_media == 0) { 607 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc, 608 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0); 609 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 610 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 611 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, 612 NULL); 613 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 614 } else 615 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 616 } else { 617 ifmedia_add(&sc->sc_media, sc->sc_fixed_media, 0, NULL); 618 ifmedia_set(&sc->sc_media, sc->sc_fixed_media); 619 620 /* force a configuration of the clocks/mac */ 621 sc->sc_mii.mii_statchg(self); 622 } 623 624 if_attach(ifp); 625 ether_ifattach(ifp); 626 #if NKSTAT > 0 627 dwge_kstat_attach(sc); 628 #endif 629 630 /* Disable interrupts. */ 631 dwge_write(sc, GMAC_INT_ENA, 0); 632 dwge_write(sc, GMAC_INT_MASK, 633 GMAC_INT_MASK_LPIIM | GMAC_INT_MASK_PIM | GMAC_INT_MASK_RIM); 634 dwge_write(sc, GMAC_MMC_IPC_INT_MSK, 0xffffffff); 635 636 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE, 637 dwge_intr, sc, sc->sc_dev.dv_xname); 638 if (sc->sc_ih == NULL) 639 printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname); 640 641 sc->sc_ifd.if_node = faa->fa_node; 642 sc->sc_ifd.if_ifp = ifp; 643 if_register(&sc->sc_ifd); 644 } 645 646 void 647 dwge_reset_phy(struct dwge_softc *sc) 648 { 649 uint32_t *gpio; 650 uint32_t delays[3]; 651 int active = 1; 652 int len; 653 654 len = OF_getproplen(sc->sc_node, "snps,reset-gpio"); 655 if (len <= 0) 656 return; 657 658 gpio = malloc(len, M_TEMP, M_WAITOK); 659 660 /* Gather information. */ 661 OF_getpropintarray(sc->sc_node, "snps,reset-gpio", gpio, len); 662 if (OF_getpropbool(sc->sc_node, "snps-reset-active-low")) 663 active = 0; 664 delays[0] = delays[1] = delays[2] = 0; 665 OF_getpropintarray(sc->sc_node, "snps,reset-delays-us", delays, 666 sizeof(delays)); 667 668 /* Perform reset sequence. */ 669 gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT); 670 gpio_controller_set_pin(gpio, !active); 671 delay(delays[0]); 672 gpio_controller_set_pin(gpio, active); 673 delay(delays[1]); 674 gpio_controller_set_pin(gpio, !active); 675 delay(delays[2]); 676 677 free(gpio, M_TEMP, len); 678 } 679 680 uint32_t 681 dwge_read(struct dwge_softc *sc, bus_addr_t addr) 682 { 683 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr); 684 } 685 686 void 687 dwge_write(struct dwge_softc *sc, bus_addr_t addr, uint32_t data) 688 { 689 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data); 690 } 691 692 void 693 dwge_lladdr_read(struct dwge_softc *sc, uint8_t *lladdr) 694 { 695 uint32_t machi, maclo; 696 697 machi = dwge_read(sc, GMAC_MAC_ADDR0_HI); 698 maclo = dwge_read(sc, GMAC_MAC_ADDR0_LO); 699 700 lladdr[0] = (maclo >> 0) & 0xff; 701 lladdr[1] = (maclo >> 8) & 0xff; 702 lladdr[2] = (maclo >> 16) & 0xff; 703 lladdr[3] = (maclo >> 24) & 0xff; 704 lladdr[4] = (machi >> 0) & 0xff; 705 lladdr[5] = (machi >> 8) & 0xff; 706 } 707 708 void 709 dwge_lladdr_write(struct dwge_softc *sc) 710 { 711 dwge_write(sc, GMAC_MAC_ADDR0_HI, 712 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0); 713 dwge_write(sc, GMAC_MAC_ADDR0_LO, 714 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 | 715 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0); 716 } 717 718 void 719 dwge_start(struct ifqueue *ifq) 720 { 721 struct ifnet *ifp = ifq->ifq_if; 722 struct dwge_softc *sc = ifp->if_softc; 723 struct mbuf *m; 724 int error, idx, left, used; 725 726 if (!(ifp->if_flags & IFF_RUNNING)) 727 return; 728 if (ifq_is_oactive(&ifp->if_snd)) 729 return; 730 if (ifq_empty(&ifp->if_snd)) 731 return; 732 if (!sc->sc_link) 733 return; 734 735 idx = sc->sc_tx_prod; 736 left = sc->sc_tx_cons; 737 if (left <= idx) 738 left += DWGE_NTXDESC; 739 left -= idx; 740 used = 0; 741 742 for (;;) { 743 if (used + DWGE_NTXSEGS + 1 > left) { 744 ifq_set_oactive(ifq); 745 break; 746 } 747 748 m = ifq_dequeue(ifq); 749 if (m == NULL) 750 break; 751 752 error = dwge_encap(sc, m, &idx, &used); 753 if (error == EFBIG) { 754 m_freem(m); /* give up: drop it */ 755 ifp->if_oerrors++; 756 continue; 757 } 758 759 #if NBPFILTER > 0 760 if (ifp->if_bpf) 761 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 762 #endif 763 } 764 765 if (sc->sc_tx_prod != idx) { 766 sc->sc_tx_prod = idx; 767 768 /* Set a timeout in case the chip goes out to lunch. */ 769 ifp->if_timer = 5; 770 771 dwge_write(sc, GMAC_TX_POLL_DEMAND, 0xffffffff); 772 } 773 } 774 775 int 776 dwge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 777 { 778 struct dwge_softc *sc = ifp->if_softc; 779 struct ifreq *ifr = (struct ifreq *)addr; 780 int error = 0, s; 781 782 s = splnet(); 783 784 switch (cmd) { 785 case SIOCSIFADDR: 786 ifp->if_flags |= IFF_UP; 787 /* FALLTHROUGH */ 788 case SIOCSIFFLAGS: 789 if (ifp->if_flags & IFF_UP) { 790 if (ifp->if_flags & IFF_RUNNING) 791 error = ENETRESET; 792 else 793 dwge_up(sc); 794 } else { 795 if (ifp->if_flags & IFF_RUNNING) 796 dwge_down(sc); 797 } 798 break; 799 800 case SIOCGIFMEDIA: 801 case SIOCSIFMEDIA: 802 if (sc->sc_fixed_media != 0) 803 error = ENOTTY; 804 else 805 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 806 break; 807 808 case SIOCGIFRXR: 809 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 810 NULL, MCLBYTES, &sc->sc_rx_ring); 811 break; 812 813 default: 814 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 815 break; 816 } 817 818 if (error == ENETRESET) { 819 if (ifp->if_flags & IFF_RUNNING) 820 dwge_iff(sc); 821 error = 0; 822 } 823 824 splx(s); 825 return (error); 826 } 827 828 void 829 dwge_watchdog(struct ifnet *ifp) 830 { 831 printf("%s\n", __func__); 832 } 833 834 int 835 dwge_media_change(struct ifnet *ifp) 836 { 837 struct dwge_softc *sc = ifp->if_softc; 838 839 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 840 mii_mediachg(&sc->sc_mii); 841 842 return (0); 843 } 844 845 void 846 dwge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 847 { 848 struct dwge_softc *sc = ifp->if_softc; 849 850 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 851 mii_pollstat(&sc->sc_mii); 852 ifmr->ifm_active = sc->sc_mii.mii_media_active; 853 ifmr->ifm_status = sc->sc_mii.mii_media_status; 854 } 855 } 856 857 int 858 dwge_mii_readreg(struct device *self, int phy, int reg) 859 { 860 struct dwge_softc *sc = (void *)self; 861 int n; 862 863 dwge_write(sc, GMAC_GMII_ADDR, 864 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 865 phy << GMAC_GMII_ADDR_PA_SHIFT | 866 reg << GMAC_GMII_ADDR_GR_SHIFT | 867 GMAC_GMII_ADDR_GB); 868 for (n = 0; n < 1000; n++) { 869 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 870 return dwge_read(sc, GMAC_GMII_DATA); 871 delay(10); 872 } 873 874 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 875 return (0); 876 } 877 878 void 879 dwge_mii_writereg(struct device *self, int phy, int reg, int val) 880 { 881 struct dwge_softc *sc = (void *)self; 882 int n; 883 884 dwge_write(sc, GMAC_GMII_DATA, val); 885 dwge_write(sc, GMAC_GMII_ADDR, 886 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 887 phy << GMAC_GMII_ADDR_PA_SHIFT | 888 reg << GMAC_GMII_ADDR_GR_SHIFT | 889 GMAC_GMII_ADDR_GW | GMAC_GMII_ADDR_GB); 890 for (n = 0; n < 1000; n++) { 891 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 892 return; 893 delay(10); 894 } 895 896 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 897 } 898 899 void 900 dwge_mii_statchg(struct device *self) 901 { 902 struct dwge_softc *sc = (void *)self; 903 uint32_t conf; 904 uint64_t media_active; 905 906 conf = dwge_read(sc, GMAC_MAC_CONF); 907 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES); 908 909 media_active = sc->sc_fixed_media; 910 if (media_active == 0) 911 media_active = sc->sc_mii.mii_media_active; 912 913 switch (IFM_SUBTYPE(media_active)) { 914 case IFM_1000_SX: 915 case IFM_1000_LX: 916 case IFM_1000_CX: 917 case IFM_1000_T: 918 sc->sc_link = 1; 919 break; 920 case IFM_100_TX: 921 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES; 922 sc->sc_link = 1; 923 break; 924 case IFM_10_T: 925 conf |= GMAC_MAC_CONF_PS; 926 sc->sc_link = 1; 927 break; 928 default: 929 sc->sc_link = 0; 930 return; 931 } 932 933 if (sc->sc_link == 0) 934 return; 935 936 conf &= ~GMAC_MAC_CONF_DM; 937 if ((media_active & IFM_GMASK) == IFM_FDX) 938 conf |= GMAC_MAC_CONF_DM; 939 940 /* XXX: RX/TX flow control? */ 941 942 dwge_write(sc, GMAC_MAC_CONF, conf); 943 } 944 945 void 946 dwge_tick(void *arg) 947 { 948 struct dwge_softc *sc = arg; 949 int s; 950 951 s = splnet(); 952 mii_tick(&sc->sc_mii); 953 splx(s); 954 955 timeout_add_sec(&sc->sc_tick, 1); 956 } 957 958 void 959 dwge_rxtick(void *arg) 960 { 961 struct dwge_softc *sc = arg; 962 uint32_t mode; 963 int s; 964 965 s = splnet(); 966 967 mode = dwge_read(sc, GMAC_OP_MODE); 968 dwge_write(sc, GMAC_OP_MODE, mode & ~GMAC_OP_MODE_SR); 969 970 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 971 0, DWGE_DMA_LEN(sc->sc_rxring), 972 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 973 974 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, 0); 975 976 sc->sc_rx_prod = sc->sc_rx_cons = 0; 977 dwge_fill_rx_ring(sc); 978 979 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 980 0, DWGE_DMA_LEN(sc->sc_rxring), 981 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 982 983 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 984 dwge_write(sc, GMAC_OP_MODE, mode); 985 986 splx(s); 987 } 988 989 int 990 dwge_intr(void *arg) 991 { 992 struct dwge_softc *sc = arg; 993 uint32_t reg; 994 995 reg = dwge_read(sc, GMAC_STATUS); 996 dwge_write(sc, GMAC_STATUS, reg); 997 998 if (reg & GMAC_STATUS_RI) 999 dwge_rx_proc(sc); 1000 1001 if (reg & GMAC_STATUS_TI || 1002 reg & GMAC_STATUS_TU) 1003 dwge_tx_proc(sc); 1004 1005 #if NKSTAT > 0 1006 if (reg & GMAC_STATUS_MMC) { 1007 mtx_enter(&sc->sc_kstat_mtx); 1008 dwge_kstat_read(sc->sc_kstat); 1009 mtx_leave(&sc->sc_kstat_mtx); 1010 } 1011 #endif 1012 1013 return (1); 1014 } 1015 1016 void 1017 dwge_tx_proc(struct dwge_softc *sc) 1018 { 1019 struct ifnet *ifp = &sc->sc_ac.ac_if; 1020 struct dwge_desc *txd; 1021 struct dwge_buf *txb; 1022 int idx, txfree; 1023 1024 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 0, 1025 DWGE_DMA_LEN(sc->sc_txring), 1026 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1027 1028 txfree = 0; 1029 while (sc->sc_tx_cons != sc->sc_tx_prod) { 1030 idx = sc->sc_tx_cons; 1031 KASSERT(idx < DWGE_NTXDESC); 1032 1033 txd = &sc->sc_txdesc[idx]; 1034 if (txd->sd_status & TDES0_OWN) 1035 break; 1036 1037 txb = &sc->sc_txbuf[idx]; 1038 if (txb->tb_m) { 1039 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1040 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1041 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1042 1043 m_freem(txb->tb_m); 1044 txb->tb_m = NULL; 1045 } 1046 1047 txfree++; 1048 1049 if (sc->sc_tx_cons == (DWGE_NTXDESC - 1)) 1050 sc->sc_tx_cons = 0; 1051 else 1052 sc->sc_tx_cons++; 1053 1054 txd->sd_status = sc->sc_enh_desc ? ETDES0_TCH : 0; 1055 } 1056 1057 if (sc->sc_tx_cons == sc->sc_tx_prod) 1058 ifp->if_timer = 0; 1059 1060 if (txfree) { 1061 if (ifq_is_oactive(&ifp->if_snd)) 1062 ifq_restart(&ifp->if_snd); 1063 } 1064 } 1065 1066 void 1067 dwge_rx_proc(struct dwge_softc *sc) 1068 { 1069 struct ifnet *ifp = &sc->sc_ac.ac_if; 1070 struct dwge_desc *rxd; 1071 struct dwge_buf *rxb; 1072 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1073 struct mbuf *m; 1074 int idx, len, cnt, put; 1075 1076 if ((ifp->if_flags & IFF_RUNNING) == 0) 1077 return; 1078 1079 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 1080 DWGE_DMA_LEN(sc->sc_rxring), 1081 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1082 1083 cnt = if_rxr_inuse(&sc->sc_rx_ring); 1084 put = 0; 1085 while (put < cnt) { 1086 idx = sc->sc_rx_cons; 1087 KASSERT(idx < DWGE_NRXDESC); 1088 1089 rxd = &sc->sc_rxdesc[idx]; 1090 if (rxd->sd_status & RDES0_OWN) 1091 break; 1092 1093 len = (rxd->sd_status >> RDES0_FL_SHIFT) & RDES0_FL_MASK; 1094 rxb = &sc->sc_rxbuf[idx]; 1095 KASSERT(rxb->tb_m); 1096 1097 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1098 len, BUS_DMASYNC_POSTREAD); 1099 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1100 1101 m = rxb->tb_m; 1102 rxb->tb_m = NULL; 1103 if (rxd->sd_status & RDES0_ES) { 1104 ifp->if_ierrors++; 1105 m_freem(m); 1106 } else { 1107 /* Strip off CRC. */ 1108 len -= ETHER_CRC_LEN; 1109 KASSERT(len > 0); 1110 1111 m->m_pkthdr.len = m->m_len = len; 1112 1113 ml_enqueue(&ml, m); 1114 } 1115 1116 put++; 1117 if (sc->sc_rx_cons == (DWGE_NRXDESC - 1)) 1118 sc->sc_rx_cons = 0; 1119 else 1120 sc->sc_rx_cons++; 1121 } 1122 1123 if_rxr_put(&sc->sc_rx_ring, put); 1124 if (ifiq_input(&ifp->if_rcv, &ml)) 1125 if_rxr_livelocked(&sc->sc_rx_ring); 1126 1127 dwge_fill_rx_ring(sc); 1128 1129 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 1130 DWGE_DMA_LEN(sc->sc_rxring), 1131 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1132 1133 } 1134 1135 void 1136 dwge_up(struct dwge_softc *sc) 1137 { 1138 struct ifnet *ifp = &sc->sc_ac.ac_if; 1139 struct dwge_buf *txb, *rxb; 1140 uint32_t mode; 1141 int i; 1142 1143 /* Allocate Tx descriptor ring. */ 1144 sc->sc_txring = dwge_dmamem_alloc(sc, 1145 DWGE_NTXDESC * sizeof(struct dwge_desc), 8); 1146 sc->sc_txdesc = DWGE_DMA_KVA(sc->sc_txring); 1147 1148 sc->sc_txbuf = malloc(sizeof(struct dwge_buf) * DWGE_NTXDESC, 1149 M_DEVBUF, M_WAITOK); 1150 for (i = 0; i < DWGE_NTXDESC; i++) { 1151 txb = &sc->sc_txbuf[i]; 1152 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWGE_NTXSEGS, 1153 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map); 1154 txb->tb_m = NULL; 1155 1156 sc->sc_txdesc[i].sd_next = 1157 DWGE_DMA_DVA(sc->sc_txring) + 1158 ((i+1) % DWGE_NTXDESC) * sizeof(struct dwge_desc); 1159 if (sc->sc_enh_desc) 1160 sc->sc_txdesc[i].sd_status = ETDES0_TCH; 1161 else 1162 sc->sc_txdesc[i].sd_len = TDES1_TCH; 1163 } 1164 1165 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1166 0, DWGE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE); 1167 1168 sc->sc_tx_prod = sc->sc_tx_cons = 0; 1169 1170 dwge_write(sc, GMAC_TX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_txring)); 1171 1172 /* Allocate descriptor ring. */ 1173 sc->sc_rxring = dwge_dmamem_alloc(sc, 1174 DWGE_NRXDESC * sizeof(struct dwge_desc), 8); 1175 sc->sc_rxdesc = DWGE_DMA_KVA(sc->sc_rxring); 1176 1177 sc->sc_rxbuf = malloc(sizeof(struct dwge_buf) * DWGE_NRXDESC, 1178 M_DEVBUF, M_WAITOK); 1179 1180 for (i = 0; i < DWGE_NRXDESC; i++) { 1181 rxb = &sc->sc_rxbuf[i]; 1182 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1183 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map); 1184 rxb->tb_m = NULL; 1185 1186 sc->sc_rxdesc[i].sd_next = 1187 DWGE_DMA_DVA(sc->sc_rxring) + 1188 ((i+1) % DWGE_NRXDESC) * sizeof(struct dwge_desc); 1189 sc->sc_rxdesc[i].sd_len = 1190 sc->sc_enh_desc ? ERDES1_RCH : RDES1_RCH; 1191 } 1192 1193 if_rxr_init(&sc->sc_rx_ring, 2, DWGE_NRXDESC); 1194 1195 sc->sc_rx_prod = sc->sc_rx_cons = 0; 1196 dwge_fill_rx_ring(sc); 1197 1198 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 1199 0, DWGE_DMA_LEN(sc->sc_rxring), 1200 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1201 1202 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 1203 1204 dwge_lladdr_write(sc); 1205 1206 /* Configure media. */ 1207 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 1208 mii_mediachg(&sc->sc_mii); 1209 1210 /* Program promiscuous mode and multicast filters. */ 1211 dwge_iff(sc); 1212 1213 ifp->if_flags |= IFF_RUNNING; 1214 ifq_clr_oactive(&ifp->if_snd); 1215 1216 dwge_write(sc, GMAC_INT_ENA, GMAC_INT_ENA_NIE | 1217 GMAC_INT_ENA_RIE | GMAC_INT_ENA_TIE | GMAC_INT_ENA_TUE); 1218 1219 mode = dwge_read(sc, GMAC_OP_MODE); 1220 if (sc->sc_force_thresh_dma_mode) { 1221 mode &= ~(GMAC_OP_MODE_TSF | GMAC_OP_MODE_TTC_MASK); 1222 mode |= GMAC_OP_MODE_TTC_128; 1223 mode &= ~(GMAC_OP_MODE_RSF | GMAC_OP_MODE_RTC_MASK); 1224 mode |= GMAC_OP_MODE_RTC_128; 1225 } else { 1226 mode |= GMAC_OP_MODE_TSF | GMAC_OP_MODE_OSF; 1227 mode |= GMAC_OP_MODE_RSF; 1228 } 1229 dwge_write(sc, GMAC_OP_MODE, mode | GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1230 1231 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 1232 GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE); 1233 1234 if (sc->sc_fixed_media == 0) 1235 timeout_add_sec(&sc->sc_tick, 1); 1236 } 1237 1238 void 1239 dwge_down(struct dwge_softc *sc) 1240 { 1241 struct ifnet *ifp = &sc->sc_ac.ac_if; 1242 struct dwge_buf *txb, *rxb; 1243 uint32_t dmactrl; 1244 int i; 1245 1246 timeout_del(&sc->sc_rxto); 1247 if (sc->sc_fixed_media == 0) 1248 timeout_del(&sc->sc_tick); 1249 1250 ifp->if_flags &= ~IFF_RUNNING; 1251 ifq_clr_oactive(&ifp->if_snd); 1252 ifp->if_timer = 0; 1253 1254 dwge_stop_dma(sc); 1255 1256 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, 1257 GMAC_MAC_CONF) & ~(GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE)); 1258 1259 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1260 dmactrl &= ~(GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1261 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1262 1263 dwge_write(sc, GMAC_INT_ENA, 0); 1264 1265 intr_barrier(sc->sc_ih); 1266 ifq_barrier(&ifp->if_snd); 1267 1268 for (i = 0; i < DWGE_NTXDESC; i++) { 1269 txb = &sc->sc_txbuf[i]; 1270 if (txb->tb_m) { 1271 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1272 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1273 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1274 m_freem(txb->tb_m); 1275 } 1276 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map); 1277 } 1278 1279 dwge_dmamem_free(sc, sc->sc_txring); 1280 free(sc->sc_txbuf, M_DEVBUF, 0); 1281 1282 for (i = 0; i < DWGE_NRXDESC; i++) { 1283 rxb = &sc->sc_rxbuf[i]; 1284 if (rxb->tb_m) { 1285 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1286 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1287 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1288 m_freem(rxb->tb_m); 1289 } 1290 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map); 1291 } 1292 1293 dwge_dmamem_free(sc, sc->sc_rxring); 1294 free(sc->sc_rxbuf, M_DEVBUF, 0); 1295 } 1296 1297 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 1298 static uint32_t 1299 bitrev32(uint32_t x) 1300 { 1301 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 1302 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 1303 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 1304 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 1305 1306 return (x >> 16) | (x << 16); 1307 } 1308 1309 void 1310 dwge_iff(struct dwge_softc *sc) 1311 { 1312 struct arpcom *ac = &sc->sc_ac; 1313 struct ifnet *ifp = &sc->sc_ac.ac_if; 1314 struct ether_multi *enm; 1315 struct ether_multistep step; 1316 uint32_t crc, hash[2], hashbit, hashreg; 1317 uint32_t reg; 1318 1319 reg = 0; 1320 1321 ifp->if_flags &= ~IFF_ALLMULTI; 1322 bzero(hash, sizeof(hash)); 1323 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1324 ifp->if_flags |= IFF_ALLMULTI; 1325 reg |= GMAC_MAC_FRM_FILT_PM; 1326 if (ifp->if_flags & IFF_PROMISC) 1327 reg |= GMAC_MAC_FRM_FILT_PR; 1328 } else { 1329 reg |= GMAC_MAC_FRM_FILT_HMC; 1330 ETHER_FIRST_MULTI(step, ac, enm); 1331 while (enm != NULL) { 1332 crc = ether_crc32_le(enm->enm_addrlo, 1333 ETHER_ADDR_LEN) & 0x7f; 1334 1335 crc = bitrev32(~crc) >> 26; 1336 hashreg = (crc >> 5); 1337 hashbit = (crc & 0x1f); 1338 hash[hashreg] |= (1 << hashbit); 1339 1340 ETHER_NEXT_MULTI(step, enm); 1341 } 1342 } 1343 1344 dwge_lladdr_write(sc); 1345 1346 dwge_write(sc, GMAC_HASH_TAB_HI, hash[1]); 1347 dwge_write(sc, GMAC_HASH_TAB_LO, hash[0]); 1348 1349 dwge_write(sc, GMAC_MAC_FRM_FILT, reg); 1350 } 1351 1352 int 1353 dwge_encap(struct dwge_softc *sc, struct mbuf *m, int *idx, int *used) 1354 { 1355 struct dwge_desc *txd, *txd_start; 1356 bus_dmamap_t map; 1357 int cur, frag, i; 1358 1359 cur = frag = *idx; 1360 map = sc->sc_txbuf[cur].tb_map; 1361 1362 if (sc->sc_defrag) { 1363 if (m_defrag(m, M_DONTWAIT)) 1364 return (ENOBUFS); 1365 } 1366 1367 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1368 if (m_defrag(m, M_DONTWAIT)) 1369 return (EFBIG); 1370 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) 1371 return (EFBIG); 1372 } 1373 1374 /* Sync the DMA map. */ 1375 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1376 BUS_DMASYNC_PREWRITE); 1377 1378 txd = txd_start = &sc->sc_txdesc[frag]; 1379 for (i = 0; i < map->dm_nsegs; i++) { 1380 txd->sd_addr = map->dm_segs[i].ds_addr; 1381 if (sc->sc_enh_desc) { 1382 txd->sd_status = ETDES0_TCH; 1383 txd->sd_len = map->dm_segs[i].ds_len; 1384 if (i == 0) 1385 txd->sd_status |= ETDES0_FS; 1386 if (i == (map->dm_nsegs - 1)) 1387 txd->sd_status |= ETDES0_LS | ETDES0_IC; 1388 } else { 1389 txd->sd_status = 0; 1390 txd->sd_len = map->dm_segs[i].ds_len | TDES1_TCH; 1391 if (i == 0) 1392 txd->sd_len |= TDES1_FS; 1393 if (i == (map->dm_nsegs - 1)) 1394 txd->sd_len |= TDES1_LS | TDES1_IC; 1395 } 1396 if (i != 0) 1397 txd->sd_status |= TDES0_OWN; 1398 1399 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1400 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1401 1402 cur = frag; 1403 if (frag == (DWGE_NTXDESC - 1)) { 1404 txd = &sc->sc_txdesc[0]; 1405 frag = 0; 1406 } else { 1407 txd++; 1408 frag++; 1409 } 1410 KASSERT(frag != sc->sc_tx_cons); 1411 } 1412 1413 txd_start->sd_status |= TDES0_OWN; 1414 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1415 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1416 1417 KASSERT(sc->sc_txbuf[cur].tb_m == NULL); 1418 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map; 1419 sc->sc_txbuf[cur].tb_map = map; 1420 sc->sc_txbuf[cur].tb_m = m; 1421 1422 *idx = frag; 1423 *used += map->dm_nsegs; 1424 1425 return (0); 1426 } 1427 1428 void 1429 dwge_reset(struct dwge_softc *sc) 1430 { 1431 int n; 1432 1433 dwge_stop_dma(sc); 1434 1435 dwge_write(sc, GMAC_BUS_MODE, dwge_read(sc, GMAC_BUS_MODE) | 1436 GMAC_BUS_MODE_SWR); 1437 1438 for (n = 0; n < 30000; n++) { 1439 if ((dwge_read(sc, GMAC_BUS_MODE) & 1440 GMAC_BUS_MODE_SWR) == 0) 1441 return; 1442 delay(10); 1443 } 1444 1445 printf("%s: reset timeout\n", sc->sc_dev.dv_xname); 1446 } 1447 1448 void 1449 dwge_stop_dma(struct dwge_softc *sc) 1450 { 1451 uint32_t dmactrl; 1452 1453 /* Stop DMA. */ 1454 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1455 dmactrl &= ~GMAC_OP_MODE_ST; 1456 dmactrl |= GMAC_OP_MODE_FTF; 1457 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1458 } 1459 1460 struct dwge_dmamem * 1461 dwge_dmamem_alloc(struct dwge_softc *sc, bus_size_t size, bus_size_t align) 1462 { 1463 struct dwge_dmamem *tdm; 1464 int nsegs; 1465 1466 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO); 1467 tdm->tdm_size = size; 1468 1469 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1470 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1471 goto tdmfree; 1472 1473 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1, 1474 &nsegs, BUS_DMA_WAITOK) != 0) 1475 goto destroy; 1476 1477 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size, 1478 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1479 goto free; 1480 1481 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size, 1482 NULL, BUS_DMA_WAITOK) != 0) 1483 goto unmap; 1484 1485 bzero(tdm->tdm_kva, size); 1486 1487 return (tdm); 1488 1489 unmap: 1490 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size); 1491 free: 1492 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1493 destroy: 1494 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1495 tdmfree: 1496 free(tdm, M_DEVBUF, 0); 1497 1498 return (NULL); 1499 } 1500 1501 void 1502 dwge_dmamem_free(struct dwge_softc *sc, struct dwge_dmamem *tdm) 1503 { 1504 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size); 1505 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1506 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1507 free(tdm, M_DEVBUF, 0); 1508 } 1509 1510 struct mbuf * 1511 dwge_alloc_mbuf(struct dwge_softc *sc, bus_dmamap_t map) 1512 { 1513 struct mbuf *m = NULL; 1514 1515 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1516 if (!m) 1517 return (NULL); 1518 m->m_len = m->m_pkthdr.len = MCLBYTES; 1519 m_adj(m, ETHER_ALIGN); 1520 1521 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1522 printf("%s: could not load mbuf DMA map", DEVNAME(sc)); 1523 m_freem(m); 1524 return (NULL); 1525 } 1526 1527 bus_dmamap_sync(sc->sc_dmat, map, 0, 1528 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1529 1530 return (m); 1531 } 1532 1533 void 1534 dwge_fill_rx_ring(struct dwge_softc *sc) 1535 { 1536 struct dwge_desc *rxd; 1537 struct dwge_buf *rxb; 1538 u_int slots; 1539 1540 for (slots = if_rxr_get(&sc->sc_rx_ring, DWGE_NRXDESC); 1541 slots > 0; slots--) { 1542 rxb = &sc->sc_rxbuf[sc->sc_rx_prod]; 1543 rxb->tb_m = dwge_alloc_mbuf(sc, rxb->tb_map); 1544 if (rxb->tb_m == NULL) 1545 break; 1546 1547 rxd = &sc->sc_rxdesc[sc->sc_rx_prod]; 1548 rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len; 1549 rxd->sd_len |= sc->sc_enh_desc ? ERDES1_RCH : RDES1_RCH; 1550 rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr; 1551 rxd->sd_status = RDES0_OWN; 1552 1553 if (sc->sc_rx_prod == (DWGE_NRXDESC - 1)) 1554 sc->sc_rx_prod = 0; 1555 else 1556 sc->sc_rx_prod++; 1557 } 1558 if_rxr_put(&sc->sc_rx_ring, slots); 1559 1560 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 1561 timeout_add(&sc->sc_rxto, 1); 1562 } 1563 1564 /* 1565 * Allwinner A20/A31. 1566 */ 1567 1568 void 1569 dwge_setup_allwinner(struct dwge_softc *sc) 1570 { 1571 char phy_mode[8]; 1572 uint32_t freq; 1573 1574 /* default to RGMII */ 1575 OF_getprop(sc->sc_node, "phy-mode", phy_mode, sizeof(phy_mode)); 1576 if (strcmp(phy_mode, "mii") == 0) 1577 freq = 25000000; 1578 else 1579 freq = 125000000; 1580 clock_set_frequency(sc->sc_node, "allwinner_gmac_tx", freq); 1581 } 1582 1583 /* 1584 * Rockchip RK3288/RK3399. 1585 */ 1586 1587 /* RK3308 registers */ 1588 #define RK3308_GRF_MAC_CON0 0x04a0 1589 #define RK3308_MAC_SPEED_100M ((0x1 << 0) << 16 | (0x1 << 0)) 1590 #define RK3308_MAC_SPEED_10M ((0x1 << 0) << 16 | (0x0 << 0)) 1591 #define RK3308_INTF_SEL_RMII ((0x1 << 4) << 16 | (0x1 << 4)) 1592 1593 /* RK3288 registers */ 1594 #define RK3288_GRF_SOC_CON1 0x0248 1595 #define RK3288_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 6) << 16 | (0x1 << 6)) 1596 #define RK3288_GMAC_PHY_INTF_SEL_RMII ((0x7 << 6) << 16 | (0x4 << 6)) 1597 #define RK3288_RMII_MODE_RMII ((1 << 14) << 16 | (1 << 14)) 1598 #define RK3288_RMII_MODE_MII ((1 << 14) << 16 | (0 << 14)) 1599 #define RK3288_GMAC_CLK_SEL_125 ((0x3 << 12) << 16 | (0x0 << 12)) 1600 #define RK3288_GMAC_CLK_SEL_25 ((0x3 << 12) << 16 | (0x3 << 12)) 1601 #define RK3288_GMAC_CLK_SEL_2_5 ((0x3 << 12) << 16 | (0x2 << 12)) 1602 1603 #define RK3288_GRF_SOC_CON3 0x0250 1604 #define RK3288_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1605 #define RK3288_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1606 #define RK3288_GMAC_TXCLK_DLY_ENA ((1 << 14) << 16 | (1 << 14)) 1607 #define RK3288_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1608 1609 /* RK3328 registers */ 1610 #define RK3328_GRF_MAC_CON0 0x0900 1611 #define RK3328_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1612 #define RK3328_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1613 1614 #define RK3328_GRF_MAC_CON1 0x0904 1615 #define RK3328_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 4) << 16 | (0x1 << 4)) 1616 #define RK3328_GMAC_PHY_INTF_SEL_RMII ((0x7 << 4) << 16 | (0x4 << 4)) 1617 #define RK3328_RMII_MODE_RMII ((1 << 9) << 16 | (1 << 9)) 1618 #define RK3328_RMII_MODE_MII ((1 << 9) << 16 | (0 << 9)) 1619 #define RK3328_GMAC_CLK_SEL_125 ((0x3 << 11) << 16 | (0x0 << 11)) 1620 #define RK3328_GMAC_CLK_SEL_25 ((0x3 << 11) << 16 | (0x3 << 11)) 1621 #define RK3328_GMAC_CLK_SEL_2_5 ((0x3 << 11) << 16 | (0x2 << 11)) 1622 #define RK3328_GMAC_RXCLK_DLY_ENA ((1 << 1) << 16 | (1 << 1)) 1623 #define RK3328_GMAC_TXCLK_DLY_ENA ((1 << 0) << 16 | (1 << 0)) 1624 1625 /* RK3399 registers */ 1626 #define RK3399_GRF_SOC_CON5 0xc214 1627 #define RK3399_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 9) << 16 | (0x1 << 9)) 1628 #define RK3399_GMAC_PHY_INTF_SEL_RMII ((0x7 << 9) << 16 | (0x4 << 9)) 1629 #define RK3399_RMII_MODE_RMII ((1 << 6) << 16 | (1 << 6)) 1630 #define RK3399_RMII_MODE_MII ((1 << 6) << 16 | (0 << 6)) 1631 #define RK3399_GMAC_CLK_SEL_125 ((0x3 << 4) << 16 | (0x0 << 4)) 1632 #define RK3399_GMAC_CLK_SEL_25 ((0x3 << 4) << 16 | (0x3 << 4)) 1633 #define RK3399_GMAC_CLK_SEL_2_5 ((0x3 << 4) << 16 | (0x2 << 4)) 1634 #define RK3399_GRF_SOC_CON6 0xc218 1635 #define RK3399_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1636 #define RK3399_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8)) 1637 #define RK3399_GMAC_TXCLK_DLY_ENA ((1 << 7) << 16 | (1 << 7)) 1638 #define RK3399_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1639 1640 void dwge_mii_statchg_rockchip(struct device *); 1641 1642 void 1643 dwge_setup_rockchip(struct dwge_softc *sc) 1644 { 1645 struct regmap *rm; 1646 uint32_t grf; 1647 int tx_delay, rx_delay; 1648 char clock_mode[8]; 1649 1650 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1651 rm = regmap_byphandle(grf); 1652 if (rm == NULL) 1653 return; 1654 1655 tx_delay = OF_getpropint(sc->sc_node, "tx_delay", 0x30); 1656 rx_delay = OF_getpropint(sc->sc_node, "rx_delay", 0x10); 1657 1658 if (OF_is_compatible(sc->sc_node, "rockchip,rk3288-gmac")) { 1659 /* Use RGMII interface. */ 1660 regmap_write_4(rm, RK3288_GRF_SOC_CON1, 1661 RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_RMII_MODE_MII); 1662 1663 /* Program clock delay lines. */ 1664 regmap_write_4(rm, RK3288_GRF_SOC_CON3, 1665 RK3288_GMAC_TXCLK_DLY_ENA | RK3288_GMAC_RXCLK_DLY_ENA | 1666 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay) | 1667 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay)); 1668 1669 /* Clock speed bits. */ 1670 sc->sc_clk_sel = RK3288_GRF_SOC_CON1; 1671 sc->sc_clk_sel_2_5 = RK3288_GMAC_CLK_SEL_2_5; 1672 sc->sc_clk_sel_25 = RK3288_GMAC_CLK_SEL_25; 1673 sc->sc_clk_sel_125 = RK3288_GMAC_CLK_SEL_125; 1674 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3308-mac")) { 1675 /* Use RMII interface. */ 1676 regmap_write_4(rm, RK3308_GRF_MAC_CON0, 1677 RK3308_INTF_SEL_RMII | RK3308_MAC_SPEED_100M); 1678 1679 /* Adjust MAC clock if necessary. */ 1680 OF_getprop(sc->sc_node, "clock_in_out", clock_mode, 1681 sizeof(clock_mode)); 1682 if (strcmp(clock_mode, "output") == 0) { 1683 clock_set_frequency(sc->sc_node, "stmmaceth", 1684 50000000); 1685 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26; 1686 } 1687 1688 /* Clock speed bits. */ 1689 sc->sc_clk_sel = RK3308_GRF_MAC_CON0; 1690 sc->sc_clk_sel_2_5 = RK3308_MAC_SPEED_10M; 1691 sc->sc_clk_sel_25 = RK3308_MAC_SPEED_100M; 1692 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3328-gmac")) { 1693 /* Use RGMII interface. */ 1694 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1695 RK3328_GMAC_PHY_INTF_SEL_RGMII | RK3328_RMII_MODE_MII); 1696 1697 /* Program clock delay lines. */ 1698 regmap_write_4(rm, RK3328_GRF_MAC_CON0, 1699 RK3328_GMAC_CLK_TX_DL_CFG(tx_delay) | 1700 RK3328_GMAC_CLK_RX_DL_CFG(rx_delay)); 1701 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1702 RK3328_GMAC_TXCLK_DLY_ENA | RK3328_GMAC_RXCLK_DLY_ENA); 1703 1704 /* Clock speed bits. */ 1705 sc->sc_clk_sel = RK3328_GRF_MAC_CON1; 1706 sc->sc_clk_sel_2_5 = RK3328_GMAC_CLK_SEL_2_5; 1707 sc->sc_clk_sel_25 = RK3328_GMAC_CLK_SEL_25; 1708 sc->sc_clk_sel_125 = RK3328_GMAC_CLK_SEL_125; 1709 } else { 1710 /* Use RGMII interface. */ 1711 regmap_write_4(rm, RK3399_GRF_SOC_CON5, 1712 RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_RMII_MODE_MII); 1713 1714 /* Program clock delay lines. */ 1715 regmap_write_4(rm, RK3399_GRF_SOC_CON6, 1716 RK3399_GMAC_TXCLK_DLY_ENA | RK3399_GMAC_RXCLK_DLY_ENA | 1717 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay) | 1718 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay)); 1719 1720 /* Clock speed bits. */ 1721 sc->sc_clk_sel = RK3399_GRF_SOC_CON5; 1722 sc->sc_clk_sel_2_5 = RK3399_GMAC_CLK_SEL_2_5; 1723 sc->sc_clk_sel_25 = RK3399_GMAC_CLK_SEL_25; 1724 sc->sc_clk_sel_125 = RK3399_GMAC_CLK_SEL_125; 1725 } 1726 1727 sc->sc_mii.mii_statchg = dwge_mii_statchg_rockchip; 1728 } 1729 1730 void 1731 dwge_mii_statchg_rockchip(struct device *self) 1732 { 1733 struct dwge_softc *sc = (void *)self; 1734 struct regmap *rm; 1735 uint32_t grf; 1736 uint32_t gmac_clk_sel = 0; 1737 uint64_t media_active; 1738 1739 dwge_mii_statchg(self); 1740 1741 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1742 rm = regmap_byphandle(grf); 1743 if (rm == NULL) 1744 return; 1745 1746 media_active = sc->sc_fixed_media; 1747 if (media_active == 0) 1748 media_active = sc->sc_mii.mii_media_active; 1749 1750 switch (IFM_SUBTYPE(media_active)) { 1751 case IFM_10_T: 1752 gmac_clk_sel = sc->sc_clk_sel_2_5; 1753 break; 1754 case IFM_100_TX: 1755 gmac_clk_sel = sc->sc_clk_sel_25; 1756 break; 1757 case IFM_1000_T: 1758 gmac_clk_sel = sc->sc_clk_sel_125; 1759 break; 1760 } 1761 1762 regmap_write_4(rm, sc->sc_clk_sel, gmac_clk_sel); 1763 } 1764 1765 #if NKSTAT > 0 1766 1767 struct dwge_counter { 1768 const char *c_name; 1769 enum kstat_kv_unit c_unit; 1770 uint32_t c_reg; 1771 }; 1772 1773 const struct dwge_counter dwge_counters[] = { 1774 { "tx octets total", KSTAT_KV_U_BYTES, GMAC_MMC_TXOCTETCNT_GB }, 1775 { "tx frames total", KSTAT_KV_U_PACKETS, GMAC_MMC_TXFRMCNT_GB }, 1776 { "tx underflow", KSTAT_KV_U_PACKETS, GMAC_MMC_TXUNDFLWERR }, 1777 { "tx carrier err", KSTAT_KV_U_PACKETS, GMAC_MMC_TXCARERR }, 1778 { "tx good octets", KSTAT_KV_U_BYTES, GMAC_MMC_TXOCTETCNT_G }, 1779 { "tx good frames", KSTAT_KV_U_PACKETS, GMAC_MMC_TXFRMCNT_G }, 1780 { "rx frames total", KSTAT_KV_U_PACKETS, GMAC_MMC_RXFRMCNT_GB }, 1781 { "rx octets total", KSTAT_KV_U_BYTES, GMAC_MMC_RXOCTETCNT_GB }, 1782 { "rx good octets", KSTAT_KV_U_BYTES, GMAC_MMC_RXOCTETCNT_G }, 1783 { "rx good mcast", KSTAT_KV_U_PACKETS, GMAC_MMC_RXMCFRMCNT_G }, 1784 { "rx crc errors", KSTAT_KV_U_PACKETS, GMAC_MMC_RXCRCERR }, 1785 { "rx len errors", KSTAT_KV_U_PACKETS, GMAC_MMC_RXLENERR }, 1786 { "rx fifo err", KSTAT_KV_U_PACKETS, GMAC_MMC_RXFIFOOVRFLW }, 1787 }; 1788 1789 void 1790 dwge_kstat_attach(struct dwge_softc *sc) 1791 { 1792 struct kstat *ks; 1793 struct kstat_kv *kvs; 1794 int i; 1795 1796 mtx_init(&sc->sc_kstat_mtx, IPL_NET); 1797 1798 /* clear counters, enable reset-on-read */ 1799 dwge_write(sc, GMAC_MAC_MMC_CTRL, GMAC_MAC_MMC_CTRL_ROR | 1800 GMAC_MAC_MMC_CTRL_CR); 1801 1802 ks = kstat_create(DEVNAME(sc), 0, "dwge-stats", 0, 1803 KSTAT_T_KV, 0); 1804 if (ks == NULL) 1805 return; 1806 1807 kvs = mallocarray(nitems(dwge_counters), sizeof(*kvs), M_DEVBUF, 1808 M_WAITOK | M_ZERO); 1809 for (i = 0; i < nitems(dwge_counters); i++) { 1810 kstat_kv_unit_init(&kvs[i], dwge_counters[i].c_name, 1811 KSTAT_KV_T_COUNTER64, dwge_counters[i].c_unit); 1812 } 1813 1814 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 1815 ks->ks_softc = sc; 1816 ks->ks_data = kvs; 1817 ks->ks_datalen = nitems(dwge_counters) * sizeof(*kvs); 1818 ks->ks_read = dwge_kstat_read; 1819 sc->sc_kstat = ks; 1820 kstat_install(ks); 1821 } 1822 1823 int 1824 dwge_kstat_read(struct kstat *ks) 1825 { 1826 struct kstat_kv *kvs = ks->ks_data; 1827 struct dwge_softc *sc = ks->ks_softc; 1828 int i; 1829 1830 for (i = 0; i < nitems(dwge_counters); i++) 1831 kstat_kv_u64(&kvs[i]) += dwge_read(sc, dwge_counters[i].c_reg); 1832 1833 getnanouptime(&ks->ks_updated); 1834 return 0; 1835 } 1836 1837 #endif 1838