1 /* $OpenBSD: if_mvneta.c,v 1.13 2020/07/10 13:26:36 patrick Exp $ */ 2 /* $NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $ */ 3 /* 4 * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include "bpfilter.h" 30 31 #include <sys/param.h> 32 #include <sys/device.h> 33 #include <sys/systm.h> 34 #include <sys/endian.h> 35 #include <sys/errno.h> 36 #include <sys/kernel.h> 37 #include <sys/mutex.h> 38 #include <sys/socket.h> 39 #include <sys/sockio.h> 40 #include <uvm/uvm_extern.h> 41 #include <sys/mbuf.h> 42 43 #include <machine/bus.h> 44 #include <machine/fdt.h> 45 46 #include <dev/ofw/openfirm.h> 47 #include <dev/ofw/ofw_clock.h> 48 #include <dev/ofw/ofw_misc.h> 49 #include <dev/ofw/ofw_pinctrl.h> 50 #include <dev/ofw/fdt.h> 51 52 #include <dev/fdt/if_mvnetareg.h> 53 54 #ifdef __armv7__ 55 #include <armv7/marvell/mvmbusvar.h> 56 #endif 57 58 #include <net/if.h> 59 #include <net/if_media.h> 60 #include <net/if_types.h> 61 62 #include <net/bpf.h> 63 64 #include <netinet/in.h> 65 #include <netinet/if_ether.h> 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 70 #if NBPFILTER > 0 71 #include <net/bpf.h> 72 #endif 73 74 #ifdef MVNETA_DEBUG 75 #define DPRINTF(x) if (mvneta_debug) printf x 76 #define DPRINTFN(n,x) if (mvneta_debug >= (n)) printf x 77 int mvneta_debug = MVNETA_DEBUG; 78 #else 79 #define DPRINTF(x) 80 #define DPRINTFN(n,x) 81 #endif 82 83 #define MVNETA_READ(sc, reg) \ 84 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) 85 #define MVNETA_WRITE(sc, reg, val) \ 86 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 87 #define MVNETA_READ_FILTER(sc, reg, val, c) \ 88 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c)) 89 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \ 90 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c)) 91 92 #define MVNETA_LINKUP_READ(sc) \ 93 MVNETA_READ(sc, MVNETA_PS0) 94 #define MVNETA_IS_LINKUP(sc) (MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP) 95 96 #define MVNETA_TX_RING_CNT 256 97 #define MVNETA_TX_RING_MSK (MVNETA_TX_RING_CNT - 1) 98 #define MVNETA_TX_RING_NEXT(x) (((x) + 1) & MVNETA_TX_RING_MSK) 99 #define MVNETA_TX_QUEUE_CNT 1 100 #define MVNETA_RX_RING_CNT 256 101 #define MVNETA_RX_RING_MSK (MVNETA_RX_RING_CNT - 1) 102 #define MVNETA_RX_RING_NEXT(x) (((x) + 1) & MVNETA_RX_RING_MSK) 103 #define MVNETA_RX_QUEUE_CNT 1 104 105 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) == 106 (MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT); 107 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) == 108 (MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT); 109 110 #define MVNETA_NTXSEG 30 111 112 struct mvneta_dmamem { 113 bus_dmamap_t mdm_map; 114 bus_dma_segment_t mdm_seg; 115 size_t mdm_size; 116 caddr_t mdm_kva; 117 }; 118 #define MVNETA_DMA_MAP(_mdm) ((_mdm)->mdm_map) 119 #define MVNETA_DMA_LEN(_mdm) ((_mdm)->mdm_size) 120 #define MVNETA_DMA_DVA(_mdm) ((_mdm)->mdm_map->dm_segs[0].ds_addr) 121 #define MVNETA_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva) 122 123 struct mvneta_buf { 124 bus_dmamap_t tb_map; 125 struct mbuf *tb_m; 126 }; 127 128 struct mvneta_softc { 129 struct device sc_dev; 130 struct mii_bus *sc_mdio; 131 132 bus_space_tag_t sc_iot; 133 bus_space_handle_t sc_ioh; 134 bus_dma_tag_t sc_dmat; 135 136 struct arpcom sc_ac; 137 #define sc_enaddr sc_ac.ac_enaddr 138 struct mii_data sc_mii; 139 #define sc_media sc_mii.mii_media 140 141 struct timeout sc_tick_ch; 142 143 struct mvneta_dmamem *sc_txring; 144 struct mvneta_buf *sc_txbuf; 145 struct mvneta_tx_desc *sc_txdesc; 146 int sc_tx_prod; /* next free tx desc */ 147 int sc_tx_cnt; /* amount of tx sent */ 148 int sc_tx_cons; /* first tx desc sent */ 149 150 struct mvneta_dmamem *sc_rxring; 151 struct mvneta_buf *sc_rxbuf; 152 struct mvneta_rx_desc *sc_rxdesc; 153 int sc_rx_prod; /* next rx desc to fill */ 154 struct if_rxring sc_rx_ring; 155 int sc_rx_cons; /* next rx desc recvd */ 156 157 enum { 158 PHY_MODE_QSGMII, 159 PHY_MODE_SGMII, 160 PHY_MODE_RGMII, 161 PHY_MODE_RGMII_ID, 162 } sc_phy_mode; 163 int sc_fixed_link; 164 int sc_inband_status; 165 int sc_phy; 166 int sc_phyloc; 167 int sc_link; 168 int sc_sfp; 169 }; 170 171 172 int mvneta_miibus_readreg(struct device *, int, int); 173 void mvneta_miibus_writereg(struct device *, int, int, int); 174 void mvneta_miibus_statchg(struct device *); 175 176 void mvneta_wininit(struct mvneta_softc *); 177 178 /* Gigabit Ethernet Port part functions */ 179 int mvneta_match(struct device *, void *, void *); 180 void mvneta_attach(struct device *, struct device *, void *); 181 void mvneta_attach_deferred(struct device *); 182 183 void mvneta_tick(void *); 184 int mvneta_intr(void *); 185 186 void mvneta_start(struct ifnet *); 187 int mvneta_ioctl(struct ifnet *, u_long, caddr_t); 188 void mvneta_inband_statchg(struct mvneta_softc *); 189 void mvneta_port_change(struct mvneta_softc *); 190 void mvneta_port_up(struct mvneta_softc *); 191 int mvneta_up(struct mvneta_softc *); 192 void mvneta_down(struct mvneta_softc *); 193 void mvneta_watchdog(struct ifnet *); 194 195 int mvneta_mediachange(struct ifnet *); 196 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *); 197 198 int mvneta_encap(struct mvneta_softc *, struct mbuf *, uint32_t *); 199 void mvneta_rx_proc(struct mvneta_softc *); 200 void mvneta_tx_proc(struct mvneta_softc *); 201 uint8_t mvneta_crc8(const uint8_t *, size_t); 202 void mvneta_iff(struct mvneta_softc *); 203 204 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *, 205 bus_size_t, bus_size_t); 206 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *); 207 void mvneta_fill_rx_ring(struct mvneta_softc *); 208 209 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff"); 210 211 struct cfdriver mvneta_cd = { 212 NULL, "mvneta", DV_IFNET 213 }; 214 215 struct cfattach mvneta_ca = { 216 sizeof (struct mvneta_softc), mvneta_match, mvneta_attach, 217 }; 218 219 int 220 mvneta_miibus_readreg(struct device *dev, int phy, int reg) 221 { 222 struct mvneta_softc *sc = (struct mvneta_softc *) dev; 223 return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg); 224 } 225 226 void 227 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val) 228 { 229 struct mvneta_softc *sc = (struct mvneta_softc *) dev; 230 return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val); 231 } 232 233 void 234 mvneta_miibus_statchg(struct device *self) 235 { 236 struct mvneta_softc *sc = (struct mvneta_softc *)self; 237 238 if (sc->sc_mii.mii_media_status & IFM_ACTIVE) { 239 uint32_t panc = MVNETA_READ(sc, MVNETA_PANC); 240 241 panc &= ~(MVNETA_PANC_SETMIISPEED | 242 MVNETA_PANC_SETGMIISPEED | 243 MVNETA_PANC_SETFULLDX); 244 245 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 246 case IFM_1000_SX: 247 case IFM_1000_LX: 248 case IFM_1000_CX: 249 case IFM_1000_T: 250 panc |= MVNETA_PANC_SETGMIISPEED; 251 break; 252 case IFM_100_TX: 253 panc |= MVNETA_PANC_SETMIISPEED; 254 break; 255 case IFM_10_T: 256 break; 257 } 258 259 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 260 panc |= MVNETA_PANC_SETFULLDX; 261 262 MVNETA_WRITE(sc, MVNETA_PANC, panc); 263 } 264 265 mvneta_port_change(sc); 266 } 267 268 void 269 mvneta_inband_statchg(struct mvneta_softc *sc) 270 { 271 uint32_t reg; 272 273 sc->sc_mii.mii_media_status = IFM_AVALID; 274 sc->sc_mii.mii_media_active = IFM_ETHER; 275 276 reg = MVNETA_READ(sc, MVNETA_PS0); 277 if (reg & MVNETA_PS0_LINKUP) 278 sc->sc_mii.mii_media_status |= IFM_ACTIVE; 279 if (reg & MVNETA_PS0_GMIISPEED) 280 sc->sc_mii.mii_media_active |= IFM_1000_T; 281 else if (reg & MVNETA_PS0_MIISPEED) 282 sc->sc_mii.mii_media_active |= IFM_100_TX; 283 else 284 sc->sc_mii.mii_media_active |= IFM_10_T; 285 if (reg & MVNETA_PS0_FULLDX) 286 sc->sc_mii.mii_media_active |= IFM_FDX; 287 288 mvneta_port_change(sc); 289 } 290 291 void 292 mvneta_enaddr_write(struct mvneta_softc *sc) 293 { 294 uint32_t maddrh, maddrl; 295 maddrh = sc->sc_enaddr[0] << 24; 296 maddrh |= sc->sc_enaddr[1] << 16; 297 maddrh |= sc->sc_enaddr[2] << 8; 298 maddrh |= sc->sc_enaddr[3]; 299 maddrl = sc->sc_enaddr[4] << 8; 300 maddrl |= sc->sc_enaddr[5]; 301 MVNETA_WRITE(sc, MVNETA_MACAH, maddrh); 302 MVNETA_WRITE(sc, MVNETA_MACAL, maddrl); 303 } 304 305 void 306 mvneta_wininit(struct mvneta_softc *sc) 307 { 308 uint32_t en; 309 int i; 310 311 #ifdef __armv7__ 312 if (mvmbus_dram_info == NULL) 313 panic("%s: mbus dram information not set up", 314 sc->sc_dev.dv_xname); 315 #endif 316 317 for (i = 0; i < MVNETA_NWINDOW; i++) { 318 MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0); 319 MVNETA_WRITE(sc, MVNETA_S(i), 0); 320 321 if (i < MVNETA_NREMAP) 322 MVNETA_WRITE(sc, MVNETA_HA(i), 0); 323 } 324 325 en = MVNETA_BARE_EN_MASK; 326 327 #ifdef __armv7__ 328 for (i = 0; i < mvmbus_dram_info->numcs; i++) { 329 struct mbus_dram_window *win = &mvmbus_dram_info->cs[i]; 330 331 MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 332 MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) | 333 MVNETA_BASEADDR_ATTR(win->attr) | 334 MVNETA_BASEADDR_BASE(win->base)); 335 MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size)); 336 337 en &= ~(1 << i); 338 } 339 #else 340 MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0)); 341 en &= ~(1 << 0); 342 #endif 343 344 MVNETA_WRITE(sc, MVNETA_BARE, en); 345 } 346 347 int 348 mvneta_match(struct device *parent, void *cfdata, void *aux) 349 { 350 struct fdt_attach_args *faa = aux; 351 352 return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") || 353 OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta"); 354 } 355 356 void 357 mvneta_attach(struct device *parent, struct device *self, void *aux) 358 { 359 struct mvneta_softc *sc = (struct mvneta_softc *) self; 360 struct fdt_attach_args *faa = aux; 361 uint32_t ctl0, ctl2, panc; 362 struct ifnet *ifp; 363 int i, len, node; 364 char *phy_mode; 365 char *managed; 366 367 printf("\n"); 368 369 sc->sc_iot = faa->fa_iot; 370 timeout_set(&sc->sc_tick_ch, mvneta_tick, sc); 371 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 372 faa->fa_reg[0].size, 0, &sc->sc_ioh)) { 373 printf("%s: cannot map registers\n", self->dv_xname); 374 return; 375 } 376 sc->sc_dmat = faa->fa_dmat; 377 378 clock_enable(faa->fa_node, NULL); 379 380 pinctrl_byname(faa->fa_node, "default"); 381 382 len = OF_getproplen(faa->fa_node, "phy-mode"); 383 if (len <= 0) { 384 printf("%s: cannot extract phy-mode\n", self->dv_xname); 385 return; 386 } 387 388 phy_mode = malloc(len, M_TEMP, M_WAITOK); 389 OF_getprop(faa->fa_node, "phy-mode", phy_mode, len); 390 if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii"))) 391 sc->sc_phy_mode = PHY_MODE_QSGMII; 392 else if (!strncmp(phy_mode, "sgmii", strlen("sgmii"))) 393 sc->sc_phy_mode = PHY_MODE_SGMII; 394 else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id"))) 395 sc->sc_phy_mode = PHY_MODE_RGMII_ID; 396 else if (!strncmp(phy_mode, "rgmii", strlen("rgmii"))) 397 sc->sc_phy_mode = PHY_MODE_RGMII; 398 else { 399 printf("%s: cannot use phy-mode %s\n", self->dv_xname, 400 phy_mode); 401 return; 402 } 403 free(phy_mode, M_TEMP, len); 404 405 /* TODO: check child's name to be "fixed-link" */ 406 if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 || 407 OF_child(faa->fa_node)) 408 sc->sc_fixed_link = 1; 409 410 if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) { 411 managed = malloc(len, M_TEMP, M_WAITOK); 412 OF_getprop(faa->fa_node, "managed", managed, len); 413 if (!strncmp(managed, "in-band-status", 414 strlen("in-band-status"))) { 415 sc->sc_fixed_link = 1; 416 sc->sc_inband_status = 1; 417 } 418 free(managed, M_TEMP, len); 419 } 420 421 if (!sc->sc_fixed_link) { 422 sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0); 423 node = OF_getnodebyphandle(sc->sc_phy); 424 if (!node) { 425 printf("%s: cannot find phy in fdt\n", self->dv_xname); 426 return; 427 } 428 429 if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) { 430 printf("%s: cannot extract phy addr\n", self->dv_xname); 431 return; 432 } 433 } 434 435 mvneta_wininit(sc); 436 437 if (OF_getproplen(faa->fa_node, "local-mac-address") == 438 ETHER_ADDR_LEN) { 439 OF_getprop(faa->fa_node, "local-mac-address", 440 sc->sc_enaddr, ETHER_ADDR_LEN); 441 mvneta_enaddr_write(sc); 442 } else { 443 uint32_t maddrh, maddrl; 444 maddrh = MVNETA_READ(sc, MVNETA_MACAH); 445 maddrl = MVNETA_READ(sc, MVNETA_MACAL); 446 if (maddrh || maddrl) { 447 sc->sc_enaddr[0] = maddrh >> 24; 448 sc->sc_enaddr[1] = maddrh >> 16; 449 sc->sc_enaddr[2] = maddrh >> 8; 450 sc->sc_enaddr[3] = maddrh >> 0; 451 sc->sc_enaddr[4] = maddrl >> 8; 452 sc->sc_enaddr[5] = maddrl >> 0; 453 } else 454 ether_fakeaddr(&sc->sc_ac.ac_if); 455 } 456 457 sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0); 458 459 printf("%s: Ethernet address %s\n", self->dv_xname, 460 ether_sprintf(sc->sc_enaddr)); 461 462 /* disable port */ 463 MVNETA_WRITE(sc, MVNETA_PMACC0, 464 MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN); 465 delay(200); 466 467 /* clear all cause registers */ 468 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0); 469 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0); 470 MVNETA_WRITE(sc, MVNETA_PMIC, 0); 471 472 /* mask all interrupts */ 473 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY); 474 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0); 475 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG | 476 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG); 477 MVNETA_WRITE(sc, MVNETA_PIE, 0); 478 479 /* enable MBUS Retry bit16 */ 480 MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20); 481 482 /* enable access for CPU0 */ 483 MVNETA_WRITE(sc, MVNETA_PCP2Q(0), 484 MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL); 485 486 /* reset RX and TX DMAs */ 487 MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT); 488 MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT); 489 490 /* disable legacy WRR, disable EJP, release from reset */ 491 MVNETA_WRITE(sc, MVNETA_TQC_1, 0); 492 for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) { 493 MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0); 494 MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0); 495 } 496 497 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0); 498 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0); 499 500 /* set port acceleration mode */ 501 MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM); 502 503 MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS); 504 MVNETA_WRITE(sc, MVNETA_PXCX, 0); 505 MVNETA_WRITE(sc, MVNETA_PMFS, 64); 506 507 /* Set SDC register except IPGINT bits */ 508 MVNETA_WRITE(sc, MVNETA_SDC, 509 MVNETA_SDC_RXBSZ_16_64BITWORDS | 510 MVNETA_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ 511 MVNETA_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ 512 MVNETA_SDC_TXBSZ_16_64BITWORDS); 513 514 /* XXX: Disable PHY polling in hardware */ 515 MVNETA_WRITE(sc, MVNETA_EUC, 516 MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING); 517 518 /* clear uni-/multicast tables */ 519 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT]; 520 memset(dfut, 0, sizeof(dfut)); 521 memset(dfsmt, 0, sizeof(dfut)); 522 memset(dfomt, 0, sizeof(dfut)); 523 MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT); 524 MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT); 525 MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT); 526 527 MVNETA_WRITE(sc, MVNETA_PIE, 528 MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL); 529 530 MVNETA_WRITE(sc, MVNETA_EUIC, 0); 531 532 /* Setup phy. */ 533 ctl0 = MVNETA_READ(sc, MVNETA_PMACC0); 534 ctl2 = MVNETA_READ(sc, MVNETA_PMACC2); 535 panc = MVNETA_READ(sc, MVNETA_PANC); 536 537 /* Force link down to change in-band settings. */ 538 panc &= ~MVNETA_PANC_FORCELINKPASS; 539 panc |= MVNETA_PANC_FORCELINKFAIL; 540 MVNETA_WRITE(sc, MVNETA_PANC, panc); 541 542 ctl0 &= ~MVNETA_PMACC0_PORTTYPE; 543 ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN); 544 panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN | 545 MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED | 546 MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN | 547 MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN | 548 MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN); 549 550 ctl2 |= MVNETA_PMACC2_RGMIIEN; 551 switch (sc->sc_phy_mode) { 552 case PHY_MODE_QSGMII: 553 MVNETA_WRITE(sc, MVNETA_SERDESCFG, 554 MVNETA_SERDESCFG_QSGMII_PROTO); 555 ctl2 |= MVNETA_PMACC2_PCSEN; 556 break; 557 case PHY_MODE_SGMII: 558 MVNETA_WRITE(sc, MVNETA_SERDESCFG, 559 MVNETA_SERDESCFG_SGMII_PROTO); 560 ctl2 |= MVNETA_PMACC2_PCSEN; 561 break; 562 default: 563 break; 564 } 565 566 /* Use Auto-Negotiation for Inband Status only */ 567 if (sc->sc_inband_status) { 568 panc &= ~(MVNETA_PANC_FORCELINKFAIL | 569 MVNETA_PANC_FORCELINKPASS); 570 /* TODO: read mode from SFP */ 571 if (1) { 572 /* 802.3z */ 573 ctl0 |= MVNETA_PMACC0_PORTTYPE; 574 panc |= (MVNETA_PANC_INBANDANEN | 575 MVNETA_PANC_SETGMIISPEED | 576 MVNETA_PANC_SETFULLDX); 577 } else { 578 /* SGMII */ 579 ctl2 |= MVNETA_PMACC2_INBANDAN; 580 panc |= (MVNETA_PANC_INBANDANEN | 581 MVNETA_PANC_ANSPEEDEN | 582 MVNETA_PANC_ANDUPLEXEN); 583 } 584 MVNETA_WRITE(sc, MVNETA_OMSCD, 585 MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE); 586 } else { 587 MVNETA_WRITE(sc, MVNETA_OMSCD, 588 MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE); 589 } 590 591 MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0); 592 MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2); 593 MVNETA_WRITE(sc, MVNETA_PANC, panc); 594 595 /* Port reset */ 596 while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET) 597 ; 598 599 fdt_intr_establish(faa->fa_node, IPL_NET, mvneta_intr, sc, 600 sc->sc_dev.dv_xname); 601 602 ifp = &sc->sc_ac.ac_if; 603 ifp->if_softc = sc; 604 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 605 ifp->if_start = mvneta_start; 606 ifp->if_ioctl = mvneta_ioctl; 607 ifp->if_watchdog = mvneta_watchdog; 608 ifp->if_capabilities = IFCAP_VLAN_MTU; 609 610 #if notyet 611 /* 612 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 613 */ 614 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 615 IFCAP_CSUM_UDPv4; 616 617 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 618 /* 619 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. 620 */ 621 ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4; 622 #endif 623 624 ifq_set_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN)); 625 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname)); 626 627 /* 628 * Do MII setup. 629 */ 630 sc->sc_mii.mii_ifp = ifp; 631 sc->sc_mii.mii_readreg = mvneta_miibus_readreg; 632 sc->sc_mii.mii_writereg = mvneta_miibus_writereg; 633 sc->sc_mii.mii_statchg = mvneta_miibus_statchg; 634 635 ifmedia_init(&sc->sc_mii.mii_media, 0, 636 mvneta_mediachange, mvneta_mediastatus); 637 638 config_defer(self, mvneta_attach_deferred); 639 } 640 641 void 642 mvneta_attach_deferred(struct device *self) 643 { 644 struct mvneta_softc *sc = (struct mvneta_softc *) self; 645 struct ifnet *ifp = &sc->sc_ac.ac_if; 646 647 if (!sc->sc_fixed_link) { 648 sc->sc_mdio = mii_byphandle(sc->sc_phy); 649 if (sc->sc_mdio == NULL) { 650 printf("%s: mdio bus not yet attached\n", self->dv_xname); 651 return; 652 } 653 654 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc, 655 MII_OFFSET_ANY, 0); 656 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 657 printf("%s: no PHY found!\n", self->dv_xname); 658 ifmedia_add(&sc->sc_mii.mii_media, 659 IFM_ETHER|IFM_MANUAL, 0, NULL); 660 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 661 } else 662 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 663 } else { 664 ifmedia_add(&sc->sc_mii.mii_media, 665 IFM_ETHER|IFM_MANUAL, 0, NULL); 666 ifmedia_set(&sc->sc_mii.mii_media, 667 IFM_ETHER|IFM_MANUAL); 668 669 if (sc->sc_inband_status) { 670 mvneta_inband_statchg(sc); 671 } else { 672 sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE; 673 sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 674 mvneta_miibus_statchg(self); 675 } 676 677 ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active); 678 ifp->if_link_state = LINK_STATE_FULL_DUPLEX; 679 } 680 681 /* 682 * Call MI attach routines. 683 */ 684 if_attach(ifp); 685 ether_ifattach(ifp); 686 } 687 688 void 689 mvneta_tick(void *arg) 690 { 691 struct mvneta_softc *sc = arg; 692 struct mii_data *mii = &sc->sc_mii; 693 int s; 694 695 s = splnet(); 696 mii_tick(mii); 697 splx(s); 698 699 timeout_add_sec(&sc->sc_tick_ch, 1); 700 } 701 702 int 703 mvneta_intr(void *arg) 704 { 705 struct mvneta_softc *sc = arg; 706 struct ifnet *ifp = &sc->sc_ac.ac_if; 707 uint32_t ic, misc; 708 709 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC); 710 711 if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) { 712 misc = MVNETA_READ(sc, MVNETA_PMIC); 713 MVNETA_WRITE(sc, MVNETA_PMIC, 0); 714 if (sc->sc_inband_status && (misc & 715 (MVNETA_PMI_PHYSTATUSCHNG | 716 MVNETA_PMI_LINKCHANGE | 717 MVNETA_PMI_PSCSYNCCHNG))) { 718 mvneta_inband_statchg(sc); 719 } 720 } 721 722 if (!(ifp->if_flags & IFF_RUNNING)) 723 return 1; 724 725 if (ic & MVNETA_PRXTXTI_TBTCQ(0)) 726 mvneta_tx_proc(sc); 727 728 if (ic & MVNETA_PRXTXTI_RBICTAPQ(0)) 729 mvneta_rx_proc(sc); 730 731 if (!ifq_empty(&ifp->if_snd)) 732 mvneta_start(ifp); 733 734 return 1; 735 } 736 737 void 738 mvneta_start(struct ifnet *ifp) 739 { 740 struct mvneta_softc *sc = ifp->if_softc; 741 struct mbuf *m_head = NULL; 742 int idx; 743 744 DPRINTFN(3, ("mvneta_start (idx %d)\n", sc->sc_tx_prod)); 745 746 if (!(ifp->if_flags & IFF_RUNNING)) 747 return; 748 if (ifq_is_oactive(&ifp->if_snd)) 749 return; 750 if (ifq_empty(&ifp->if_snd)) 751 return; 752 753 /* If Link is DOWN, can't start TX */ 754 if (!MVNETA_IS_LINKUP(sc)) 755 return; 756 757 bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0, 758 MVNETA_DMA_LEN(sc->sc_txring), 759 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 760 761 idx = sc->sc_tx_prod; 762 while (sc->sc_tx_cnt < MVNETA_TX_RING_CNT) { 763 m_head = ifq_deq_begin(&ifp->if_snd); 764 if (m_head == NULL) 765 break; 766 767 /* 768 * Pack the data into the transmit ring. If we 769 * don't have room, set the OACTIVE flag and wait 770 * for the NIC to drain the ring. 771 */ 772 if (mvneta_encap(sc, m_head, &idx)) { 773 ifq_deq_rollback(&ifp->if_snd, m_head); 774 ifq_set_oactive(&ifp->if_snd); 775 break; 776 } 777 778 /* now we are committed to transmit the packet */ 779 ifq_deq_commit(&ifp->if_snd, m_head); 780 781 /* 782 * If there's a BPF listener, bounce a copy of this frame 783 * to him. 784 */ 785 #if NBPFILTER > 0 786 if (ifp->if_bpf) 787 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 788 #endif 789 } 790 791 if (sc->sc_tx_prod != idx) { 792 sc->sc_tx_prod = idx; 793 794 /* 795 * Set a timeout in case the chip goes out to lunch. 796 */ 797 ifp->if_timer = 5; 798 } 799 } 800 801 int 802 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 803 { 804 struct mvneta_softc *sc = ifp->if_softc; 805 struct ifreq *ifr = (struct ifreq *)addr; 806 int s, error = 0; 807 808 s = splnet(); 809 810 switch (cmd) { 811 case SIOCSIFADDR: 812 ifp->if_flags |= IFF_UP; 813 /* FALLTHROUGH */ 814 case SIOCSIFFLAGS: 815 if (ifp->if_flags & IFF_UP) { 816 if (ifp->if_flags & IFF_RUNNING) 817 error = ENETRESET; 818 else 819 mvneta_up(sc); 820 } else { 821 if (ifp->if_flags & IFF_RUNNING) 822 mvneta_down(sc); 823 } 824 break; 825 case SIOCGIFMEDIA: 826 case SIOCSIFMEDIA: 827 DPRINTFN(2, ("mvneta_ioctl MEDIA\n")); 828 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 829 break; 830 case SIOCGIFRXR: 831 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 832 NULL, MCLBYTES, &sc->sc_rx_ring); 833 break; 834 case SIOCGIFSFFPAGE: 835 error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR); 836 if (error != 0) 837 break; 838 839 error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr); 840 rw_exit(&mvneta_sff_lock); 841 break; 842 default: 843 DPRINTFN(2, ("mvneta_ioctl ETHER\n")); 844 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 845 break; 846 } 847 848 if (error == ENETRESET) { 849 if (ifp->if_flags & IFF_RUNNING) 850 mvneta_iff(sc); 851 error = 0; 852 } 853 854 splx(s); 855 856 return error; 857 } 858 859 void 860 mvneta_port_change(struct mvneta_softc *sc) 861 { 862 if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) { 863 sc->sc_link = !sc->sc_link; 864 865 if (sc->sc_link) { 866 if (!sc->sc_inband_status) { 867 uint32_t panc = MVNETA_READ(sc, MVNETA_PANC); 868 panc &= ~MVNETA_PANC_FORCELINKFAIL; 869 panc |= MVNETA_PANC_FORCELINKPASS; 870 MVNETA_WRITE(sc, MVNETA_PANC, panc); 871 } 872 mvneta_port_up(sc); 873 } else { 874 if (!sc->sc_inband_status) { 875 uint32_t panc = MVNETA_READ(sc, MVNETA_PANC); 876 panc &= ~MVNETA_PANC_FORCELINKPASS; 877 panc |= MVNETA_PANC_FORCELINKFAIL; 878 MVNETA_WRITE(sc, MVNETA_PANC, panc); 879 } 880 } 881 } 882 } 883 884 void 885 mvneta_port_up(struct mvneta_softc *sc) 886 { 887 /* Enable port RX/TX. */ 888 MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0)); 889 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0)); 890 } 891 892 int 893 mvneta_up(struct mvneta_softc *sc) 894 { 895 struct ifnet *ifp = &sc->sc_ac.ac_if; 896 struct mvneta_buf *txb, *rxb; 897 int i; 898 899 DPRINTFN(2, ("mvneta_up\n")); 900 901 /* Allocate Tx descriptor ring. */ 902 sc->sc_txring = mvneta_dmamem_alloc(sc, 903 MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32); 904 sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring); 905 906 sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT, 907 M_DEVBUF, M_WAITOK); 908 909 for (i = 0; i < MVNETA_TX_RING_CNT; i++) { 910 txb = &sc->sc_txbuf[i]; 911 bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG, 912 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map); 913 txb->tb_m = NULL; 914 } 915 916 sc->sc_tx_prod = sc->sc_tx_cons = 0; 917 sc->sc_tx_cnt = 0; 918 919 /* Allocate Rx descriptor ring. */ 920 sc->sc_rxring = mvneta_dmamem_alloc(sc, 921 MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32); 922 sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring); 923 924 sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT, 925 M_DEVBUF, M_WAITOK); 926 927 for (i = 0; i < MVNETA_RX_RING_CNT; i++) { 928 rxb = &sc->sc_rxbuf[i]; 929 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 930 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map); 931 rxb->tb_m = NULL; 932 } 933 934 /* Set Rx descriptor ring data. */ 935 MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring)); 936 MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT | 937 ((MCLBYTES >> 3) << 19)); 938 MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0); 939 MVNETA_WRITE(sc, MVNETA_PRXC(0), 0); 940 941 /* Set Tx queue bandwidth. */ 942 MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff); 943 MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff); 944 945 /* Set Tx descriptor ring data. */ 946 MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring)); 947 MVNETA_WRITE(sc, MVNETA_PTXDQS(0), 948 MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT)); 949 950 sc->sc_rx_prod = sc->sc_rx_cons = 0; 951 952 if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT); 953 mvneta_fill_rx_ring(sc); 954 955 /* TODO: correct frame size */ 956 MVNETA_WRITE(sc, MVNETA_PMACC0, 957 (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) | 958 MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE)); 959 960 /* set max MTU */ 961 MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX); 962 MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff); 963 MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff); 964 965 /* enable port */ 966 MVNETA_WRITE(sc, MVNETA_PMACC0, 967 MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN); 968 969 mvneta_enaddr_write(sc); 970 971 /* Program promiscuous mode and multicast filters. */ 972 mvneta_iff(sc); 973 974 if (!sc->sc_fixed_link) 975 mii_mediachg(&sc->sc_mii); 976 977 if (sc->sc_link) 978 mvneta_port_up(sc); 979 980 /* Enable interrupt masks */ 981 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) | 982 MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_PMISCICSUMMARY); 983 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG | 984 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG); 985 986 timeout_add_sec(&sc->sc_tick_ch, 1); 987 988 ifp->if_flags |= IFF_RUNNING; 989 ifq_clr_oactive(&ifp->if_snd); 990 991 return 0; 992 } 993 994 void 995 mvneta_down(struct mvneta_softc *sc) 996 { 997 struct ifnet *ifp = &sc->sc_ac.ac_if; 998 uint32_t reg, txinprog, txfifoemp; 999 struct mvneta_buf *txb, *rxb; 1000 int i, cnt; 1001 1002 DPRINTFN(2, ("mvneta_down\n")); 1003 1004 timeout_del(&sc->sc_tick_ch); 1005 1006 /* Stop Rx port activity. Check port Rx activity. */ 1007 reg = MVNETA_READ(sc, MVNETA_RQC); 1008 if (reg & MVNETA_RQC_ENQ_MASK) 1009 /* Issue stop command for active channels only */ 1010 MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg)); 1011 1012 /* Stop Tx port activity. Check port Tx activity. */ 1013 if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0)) 1014 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0)); 1015 1016 txinprog = MVNETA_PS_TXINPROG_(0); 1017 txfifoemp = MVNETA_PS_TXFIFOEMP_(0); 1018 1019 #define RX_DISABLE_TIMEOUT 0x1000000 1020 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 1021 /* Wait for all Rx activity to terminate. */ 1022 cnt = 0; 1023 do { 1024 if (cnt >= RX_DISABLE_TIMEOUT) { 1025 printf("%s: timeout for RX stopped. rqc 0x%x\n", 1026 sc->sc_dev.dv_xname, reg); 1027 break; 1028 } 1029 cnt++; 1030 1031 /* 1032 * Check Receive Queue Command register that all Rx queues 1033 * are stopped 1034 */ 1035 reg = MVNETA_READ(sc, MVNETA_RQC); 1036 } while (reg & 0xff); 1037 1038 /* Double check to verify that TX FIFO is empty */ 1039 cnt = 0; 1040 while (1) { 1041 do { 1042 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1043 printf("%s: timeout for TX FIFO empty. status " 1044 "0x%x\n", sc->sc_dev.dv_xname, reg); 1045 break; 1046 } 1047 cnt++; 1048 1049 reg = MVNETA_READ(sc, MVNETA_PS); 1050 } while (!(reg & txfifoemp) || reg & txinprog); 1051 1052 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) 1053 break; 1054 1055 /* Double check */ 1056 reg = MVNETA_READ(sc, MVNETA_PS); 1057 if (reg & txfifoemp && !(reg & txinprog)) 1058 break; 1059 else 1060 printf("%s: TX FIFO empty double check failed." 1061 " %d loops, status 0x%x\n", sc->sc_dev.dv_xname, 1062 cnt, reg); 1063 } 1064 1065 delay(200); 1066 1067 /* disable port */ 1068 MVNETA_WRITE(sc, MVNETA_PMACC0, 1069 MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN); 1070 delay(200); 1071 1072 /* mask all interrupts */ 1073 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY); 1074 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0); 1075 1076 /* clear all cause registers */ 1077 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0); 1078 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0); 1079 1080 /* Free RX and TX mbufs still in the queues. */ 1081 for (i = 0; i < MVNETA_TX_RING_CNT; i++) { 1082 txb = &sc->sc_txbuf[i]; 1083 if (txb->tb_m) { 1084 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1085 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1086 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1087 m_freem(txb->tb_m); 1088 } 1089 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map); 1090 } 1091 1092 mvneta_dmamem_free(sc, sc->sc_txring); 1093 free(sc->sc_txbuf, M_DEVBUF, 0); 1094 1095 for (i = 0; i < MVNETA_RX_RING_CNT; i++) { 1096 rxb = &sc->sc_rxbuf[i]; 1097 if (rxb->tb_m) { 1098 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1099 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1100 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1101 m_freem(rxb->tb_m); 1102 } 1103 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map); 1104 } 1105 1106 mvneta_dmamem_free(sc, sc->sc_rxring); 1107 free(sc->sc_rxbuf, M_DEVBUF, 0); 1108 1109 /* reset RX and TX DMAs */ 1110 MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT); 1111 MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT); 1112 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0); 1113 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0); 1114 1115 ifp->if_flags &= ~IFF_RUNNING; 1116 ifq_clr_oactive(&ifp->if_snd); 1117 } 1118 1119 void 1120 mvneta_watchdog(struct ifnet *ifp) 1121 { 1122 struct mvneta_softc *sc = ifp->if_softc; 1123 1124 /* 1125 * Reclaim first as there is a possibility of losing Tx completion 1126 * interrupts. 1127 */ 1128 mvneta_tx_proc(sc); 1129 if (sc->sc_tx_cnt != 0) { 1130 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1131 1132 ifp->if_oerrors++; 1133 } 1134 } 1135 1136 /* 1137 * Set media options. 1138 */ 1139 int 1140 mvneta_mediachange(struct ifnet *ifp) 1141 { 1142 struct mvneta_softc *sc = ifp->if_softc; 1143 1144 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 1145 mii_mediachg(&sc->sc_mii); 1146 1147 return (0); 1148 } 1149 1150 /* 1151 * Report current media status. 1152 */ 1153 void 1154 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1155 { 1156 struct mvneta_softc *sc = ifp->if_softc; 1157 1158 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 1159 mii_pollstat(&sc->sc_mii); 1160 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1161 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1162 } 1163 1164 if (sc->sc_fixed_link) { 1165 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1166 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1167 } 1168 } 1169 1170 int 1171 mvneta_encap(struct mvneta_softc *sc, struct mbuf *m, uint32_t *idx) 1172 { 1173 struct mvneta_tx_desc *txd; 1174 bus_dmamap_t map; 1175 uint32_t cmdsts; 1176 int i, current, first, last; 1177 1178 DPRINTFN(3, ("mvneta_encap\n")); 1179 1180 first = last = current = *idx; 1181 map = sc->sc_txbuf[current].tb_map; 1182 1183 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) 1184 return (ENOBUFS); 1185 1186 if (map->dm_nsegs > (MVNETA_TX_RING_CNT - sc->sc_tx_cnt - 2)) { 1187 bus_dmamap_unload(sc->sc_dmat, map); 1188 return (ENOBUFS); 1189 } 1190 1191 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1192 BUS_DMASYNC_PREWRITE); 1193 1194 DPRINTFN(2, ("mvneta_encap: dm_nsegs=%d\n", map->dm_nsegs)); 1195 1196 cmdsts = MVNETA_TX_L4_CSUM_NOT; 1197 #if notyet 1198 int m_csumflags; 1199 if (m_csumflags & M_CSUM_IPv4) 1200 cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM; 1201 if (m_csumflags & M_CSUM_TCPv4) 1202 cmdsts |= 1203 MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP; 1204 if (m_csumflags & M_CSUM_UDPv4) 1205 cmdsts |= 1206 MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP; 1207 if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1208 const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); 1209 1210 cmdsts |= MVNETA_TX_IP_NO_FRAG | 1211 MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ 1212 } 1213 #endif 1214 1215 for (i = 0; i < map->dm_nsegs; i++) { 1216 txd = &sc->sc_txdesc[current]; 1217 memset(txd, 0, sizeof(*txd)); 1218 txd->bufptr = map->dm_segs[i].ds_addr; 1219 txd->bytecnt = map->dm_segs[i].ds_len; 1220 txd->cmdsts = cmdsts | 1221 MVNETA_TX_ZERO_PADDING; 1222 if (i == 0) 1223 txd->cmdsts |= MVNETA_TX_FIRST_DESC; 1224 if (i == (map->dm_nsegs - 1)) 1225 txd->cmdsts |= MVNETA_TX_LAST_DESC; 1226 1227 bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 1228 current * sizeof(*txd), sizeof(*txd), 1229 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1230 1231 last = current; 1232 current = MVNETA_TX_RING_NEXT(current); 1233 KASSERT(current != sc->sc_tx_cons); 1234 } 1235 1236 KASSERT(sc->sc_txbuf[last].tb_m == NULL); 1237 sc->sc_txbuf[first].tb_map = sc->sc_txbuf[last].tb_map; 1238 sc->sc_txbuf[last].tb_map = map; 1239 sc->sc_txbuf[last].tb_m = m; 1240 1241 sc->sc_tx_cnt += map->dm_nsegs; 1242 *idx = current; 1243 1244 /* Let him know we sent another packet. */ 1245 MVNETA_WRITE(sc, MVNETA_PTXSU(0), map->dm_nsegs); 1246 1247 DPRINTFN(3, ("mvneta_encap: completed successfully\n")); 1248 1249 return 0; 1250 } 1251 1252 void 1253 mvneta_rx_proc(struct mvneta_softc *sc) 1254 { 1255 struct ifnet *ifp = &sc->sc_ac.ac_if; 1256 struct mvneta_rx_desc *rxd; 1257 struct mvneta_buf *rxb; 1258 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1259 struct mbuf *m; 1260 uint32_t rxstat; 1261 int i, idx, len, ready; 1262 1263 DPRINTFN(3, ("%s: %d\n", __func__, sc->sc_rx_cons)); 1264 1265 if (!(ifp->if_flags & IFF_RUNNING)) 1266 return; 1267 1268 bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 0, 1269 MVNETA_DMA_LEN(sc->sc_rxring), 1270 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1271 1272 ready = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0))); 1273 MVNETA_WRITE(sc, MVNETA_PRXSU(0), ready); 1274 1275 for (i = 0; i < ready; i++) { 1276 idx = sc->sc_rx_cons; 1277 KASSERT(idx < MVNETA_RX_RING_CNT); 1278 1279 rxd = &sc->sc_rxdesc[idx]; 1280 1281 #ifdef DIAGNOSTIC 1282 if ((rxd->cmdsts & 1283 (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) != 1284 (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) 1285 panic("%s: buffer size is smaller than packet", 1286 __func__); 1287 #endif 1288 1289 len = rxd->bytecnt; 1290 rxb = &sc->sc_rxbuf[idx]; 1291 KASSERT(rxb->tb_m); 1292 1293 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1294 len, BUS_DMASYNC_POSTREAD); 1295 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1296 1297 m = rxb->tb_m; 1298 rxb->tb_m = NULL; 1299 m->m_pkthdr.len = m->m_len = len; 1300 1301 rxstat = rxd->cmdsts; 1302 if (rxstat & MVNETA_ERROR_SUMMARY) { 1303 #if 0 1304 int err = rxstat & MVNETA_RX_ERROR_CODE_MASK; 1305 1306 if (err == MVNETA_RX_CRC_ERROR) 1307 ifp->if_ierrors++; 1308 if (err == MVNETA_RX_OVERRUN_ERROR) 1309 ifp->if_ierrors++; 1310 if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR) 1311 ifp->if_ierrors++; 1312 if (err == MVNETA_RX_RESOURCE_ERROR) 1313 ifp->if_ierrors++; 1314 #else 1315 ifp->if_ierrors++; 1316 #endif 1317 panic("%s: handle input errors", __func__); 1318 continue; 1319 } 1320 1321 #if notyet 1322 if (rxstat & MVNETA_RX_IP_FRAME_TYPE) { 1323 int flgs = 0; 1324 1325 /* Check IPv4 header checksum */ 1326 flgs |= M_CSUM_IPv4; 1327 if (!(rxstat & MVNETA_RX_IP_HEADER_OK)) 1328 flgs |= M_CSUM_IPv4_BAD; 1329 else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) { 1330 /* 1331 * Check TCPv4/UDPv4 checksum for 1332 * non-fragmented packet only. 1333 * 1334 * It seemd that sometimes 1335 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0 1336 * even if the checksum is correct and the 1337 * packet was not fragmented. So we don't set 1338 * M_CSUM_TCP_UDP_BAD even if csum bit is 0. 1339 */ 1340 1341 if (((rxstat & MVNETA_RX_L4_TYPE_MASK) == 1342 MVNETA_RX_L4_TYPE_TCP) && 1343 ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0)) 1344 flgs |= M_CSUM_TCPv4; 1345 else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) == 1346 MVNETA_RX_L4_TYPE_UDP) && 1347 ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0)) 1348 flgs |= M_CSUM_UDPv4; 1349 } 1350 m->m_pkthdr.csum_flags = flgs; 1351 } 1352 #endif 1353 1354 /* Skip on first 2byte (HW header) */ 1355 m_adj(m, MVNETA_HWHEADER_SIZE); 1356 1357 ml_enqueue(&ml, m); 1358 1359 if_rxr_put(&sc->sc_rx_ring, 1); 1360 1361 sc->sc_rx_cons = MVNETA_RX_RING_NEXT(idx); 1362 } 1363 1364 if (ifiq_input(&ifp->if_rcv, &ml)) 1365 if_rxr_livelocked(&sc->sc_rx_ring); 1366 1367 mvneta_fill_rx_ring(sc); 1368 } 1369 1370 void 1371 mvneta_tx_proc(struct mvneta_softc *sc) 1372 { 1373 struct ifnet *ifp = &sc->sc_ac.ac_if; 1374 struct mvneta_tx_desc *txd; 1375 struct mvneta_buf *txb; 1376 int i, idx, sent; 1377 1378 DPRINTFN(3, ("%s\n", __func__)); 1379 1380 if (!(ifp->if_flags & IFF_RUNNING)) 1381 return; 1382 1383 bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0, 1384 MVNETA_DMA_LEN(sc->sc_txring), 1385 BUS_DMASYNC_POSTREAD); 1386 1387 sent = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0))); 1388 MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NORB(sent)); 1389 1390 for (i = 0; i < sent; i++) { 1391 idx = sc->sc_tx_cons; 1392 KASSERT(idx < MVNETA_TX_RING_CNT); 1393 1394 txd = &sc->sc_txdesc[idx]; 1395 txb = &sc->sc_txbuf[idx]; 1396 if (txb->tb_m) { 1397 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1398 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1399 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1400 1401 m_freem(txb->tb_m); 1402 txb->tb_m = NULL; 1403 } 1404 1405 ifq_clr_oactive(&ifp->if_snd); 1406 1407 sc->sc_tx_cnt--; 1408 1409 if (txd->cmdsts & MVNETA_ERROR_SUMMARY) { 1410 int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK; 1411 1412 if (err == MVNETA_TX_LATE_COLLISION_ERROR) 1413 ifp->if_collisions++; 1414 if (err == MVNETA_TX_UNDERRUN_ERROR) 1415 ifp->if_oerrors++; 1416 if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO) 1417 ifp->if_collisions++; 1418 } 1419 1420 sc->sc_tx_cons = MVNETA_TX_RING_NEXT(sc->sc_tx_cons); 1421 } 1422 1423 if (sc->sc_tx_cnt == 0) 1424 ifp->if_timer = 0; 1425 } 1426 1427 uint8_t 1428 mvneta_crc8(const uint8_t *data, size_t size) 1429 { 1430 int bit; 1431 uint8_t byte; 1432 uint8_t crc = 0; 1433 const uint8_t poly = 0x07; 1434 1435 while(size--) 1436 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 1437 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 1438 1439 return crc; 1440 } 1441 1442 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT); 1443 1444 void 1445 mvneta_iff(struct mvneta_softc *sc) 1446 { 1447 struct arpcom *ac = &sc->sc_ac; 1448 struct ifnet *ifp = &sc->sc_ac.ac_if; 1449 struct ether_multi *enm; 1450 struct ether_multistep step; 1451 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT]; 1452 uint32_t pxc; 1453 int i; 1454 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 1455 1456 pxc = MVNETA_READ(sc, MVNETA_PXC); 1457 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM); 1458 ifp->if_flags &= ~IFF_ALLMULTI; 1459 memset(dfut, 0, sizeof(dfut)); 1460 memset(dfsmt, 0, sizeof(dfsmt)); 1461 memset(dfomt, 0, sizeof(dfomt)); 1462 1463 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1464 ifp->if_flags |= IFF_ALLMULTI; 1465 if (ifp->if_flags & IFF_PROMISC) 1466 pxc |= MVNETA_PXC_UPM; 1467 for (i = 0; i < MVNETA_NDFSMT; i++) { 1468 dfsmt[i] = dfomt[i] = 1469 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 1470 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 1471 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 1472 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 1473 } 1474 } else { 1475 ETHER_FIRST_MULTI(step, ac, enm); 1476 while (enm != NULL) { 1477 /* chip handles some IPv4 multicast specially */ 1478 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 1479 i = enm->enm_addrlo[5]; 1480 dfsmt[i>>2] |= 1481 MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 1482 } else { 1483 i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 1484 dfomt[i>>2] |= 1485 MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 1486 } 1487 1488 ETHER_NEXT_MULTI(step, enm); 1489 } 1490 } 1491 1492 MVNETA_WRITE(sc, MVNETA_PXC, pxc); 1493 1494 /* Set Destination Address Filter Unicast Table */ 1495 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 1496 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 1497 MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT); 1498 1499 /* Set Destination Address Filter Multicast Tables */ 1500 MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT); 1501 MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT); 1502 } 1503 1504 struct mvneta_dmamem * 1505 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align) 1506 { 1507 struct mvneta_dmamem *mdm; 1508 int nsegs; 1509 1510 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO); 1511 mdm->mdm_size = size; 1512 1513 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1514 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 1515 goto mdmfree; 1516 1517 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1, 1518 &nsegs, BUS_DMA_WAITOK) != 0) 1519 goto destroy; 1520 1521 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 1522 &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0) 1523 goto free; 1524 1525 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 1526 NULL, BUS_DMA_WAITOK) != 0) 1527 goto unmap; 1528 1529 bzero(mdm->mdm_kva, size); 1530 1531 return (mdm); 1532 1533 unmap: 1534 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 1535 free: 1536 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1537 destroy: 1538 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1539 mdmfree: 1540 free(mdm, M_DEVBUF, 0); 1541 1542 return (NULL); 1543 } 1544 1545 void 1546 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm) 1547 { 1548 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 1549 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1550 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1551 free(mdm, M_DEVBUF, 0); 1552 } 1553 1554 struct mbuf * 1555 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map) 1556 { 1557 struct mbuf *m = NULL; 1558 1559 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1560 if (!m) 1561 return (NULL); 1562 m->m_len = m->m_pkthdr.len = MCLBYTES; 1563 1564 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1565 printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname); 1566 m_freem(m); 1567 return (NULL); 1568 } 1569 1570 bus_dmamap_sync(sc->sc_dmat, map, 0, 1571 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1572 1573 return (m); 1574 } 1575 1576 void 1577 mvneta_fill_rx_ring(struct mvneta_softc *sc) 1578 { 1579 struct mvneta_rx_desc *rxd; 1580 struct mvneta_buf *rxb; 1581 u_int slots; 1582 1583 for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_RX_RING_CNT); 1584 slots > 0; slots--) { 1585 rxb = &sc->sc_rxbuf[sc->sc_rx_prod]; 1586 rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map); 1587 if (rxb->tb_m == NULL) 1588 break; 1589 1590 rxd = &sc->sc_rxdesc[sc->sc_rx_prod]; 1591 memset(rxd, 0, sizeof(*rxd)); 1592 rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr; 1593 1594 bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 1595 sc->sc_rx_prod * sizeof(*rxd), sizeof(*rxd), 1596 BUS_DMASYNC_PREWRITE); 1597 1598 sc->sc_rx_prod = MVNETA_RX_RING_NEXT(sc->sc_rx_prod); 1599 1600 /* Tell him that there's a new free desc. */ 1601 MVNETA_WRITE(sc, MVNETA_PRXSU(0), 1602 MVNETA_PRXSU_NOOFNEWDESCRIPTORS(1)); 1603 } 1604 1605 if_rxr_put(&sc->sc_rx_ring, slots); 1606 } 1607