1 /* $NetBSD: if_mvgbe.c,v 1.21 2012/10/02 15:22:46 msaitoh Exp $ */ 2 /* 3 * Copyright (c) 2007, 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.21 2012/10/02 15:22:46 msaitoh Exp $"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/device.h> 33 #include <sys/endian.h> 34 #include <sys/errno.h> 35 #include <sys/kmem.h> 36 #include <sys/mutex.h> 37 #include <sys/sockio.h> 38 39 #include <dev/marvell/marvellreg.h> 40 #include <dev/marvell/marvellvar.h> 41 #include <dev/marvell/mvgbereg.h> 42 43 #include <net/if.h> 44 #include <net/if_ether.h> 45 #include <net/if_media.h> 46 47 #include <netinet/in.h> 48 #include <netinet/in_systm.h> 49 #include <netinet/ip.h> 50 51 #include <net/bpf.h> 52 #include <sys/rnd.h> 53 54 #include <dev/mii/mii.h> 55 #include <dev/mii/miivar.h> 56 57 #include "locators.h" 58 59 /* #define MVGBE_DEBUG 3 */ 60 #ifdef MVGBE_DEBUG 61 #define DPRINTF(x) if (mvgbe_debug) printf x 62 #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x 63 int mvgbe_debug = MVGBE_DEBUG; 64 #else 65 #define DPRINTF(x) 66 #define DPRINTFN(n,x) 67 #endif 68 69 70 #define MVGBE_READ(sc, reg) \ 71 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) 72 #define MVGBE_WRITE(sc, reg, val) \ 73 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 74 #define MVGBE_READ_FILTER(sc, reg, val, c) \ 75 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 76 #define MVGBE_WRITE_FILTER(sc, reg, val, c) \ 77 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 78 79 #define MVGBE_TX_RING_CNT 256 80 #define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1) 81 #define MVGBE_TX_RING_NEXT(x) (((x) + 1) & MVGBE_TX_RING_MSK) 82 #define MVGBE_RX_RING_CNT 256 83 #define MVGBE_RX_RING_MSK (MVGBE_RX_RING_CNT - 1) 84 #define MVGBE_RX_RING_NEXT(x) (((x) + 1) & MVGBE_RX_RING_MSK) 85 86 CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) == 87 (MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT); 88 CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) == 89 (MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT); 90 91 #define MVGBE_JSLOTS 384 /* XXXX */ 92 #define MVGBE_JLEN ((MVGBE_MRU + MVGBE_RXBUF_ALIGN)&~MVGBE_RXBUF_MASK) 93 #define MVGBE_NTXSEG 30 94 #define MVGBE_JPAGESZ PAGE_SIZE 95 #define MVGBE_RESID \ 96 (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ) 97 #define MVGBE_JMEM \ 98 ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID) 99 100 #define MVGBE_TX_RING_ADDR(sc, i) \ 101 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 102 offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)])) 103 104 #define MVGBE_RX_RING_ADDR(sc, i) \ 105 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 106 offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)])) 107 108 #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x) 109 #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)]) 110 #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)]) 111 112 #define MVGBE_CDTXSYNC(sc, x, n, ops) \ 113 do { \ 114 int __x, __n; \ 115 const int __descsize = sizeof(struct mvgbe_tx_desc); \ 116 \ 117 __x = (x); \ 118 __n = (n); \ 119 \ 120 /* If it will wrap around, sync to the end of the ring. */ \ 121 if ((__x + __n) > MVGBE_TX_RING_CNT) { \ 122 bus_dmamap_sync((sc)->sc_dmat, \ 123 (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \ 124 __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \ 125 __n -= (MVGBE_TX_RING_CNT - __x); \ 126 __x = 0; \ 127 } \ 128 \ 129 /* Now sync whatever is left. */ \ 130 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 131 MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \ 132 } while (0 /*CONSTCOND*/) 133 134 #define MVGBE_CDRXSYNC(sc, x, ops) \ 135 do { \ 136 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 137 MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \ 138 } while (/*CONSTCOND*/0) 139 140 141 struct mvgbe_jpool_entry { 142 int slot; 143 LIST_ENTRY(mvgbe_jpool_entry) jpool_entries; 144 }; 145 146 struct mvgbe_chain { 147 void *mvgbe_desc; 148 struct mbuf *mvgbe_mbuf; 149 struct mvgbe_chain *mvgbe_next; 150 }; 151 152 struct mvgbe_txmap_entry { 153 bus_dmamap_t dmamap; 154 SIMPLEQ_ENTRY(mvgbe_txmap_entry) link; 155 }; 156 157 struct mvgbe_chain_data { 158 struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT]; 159 struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT]; 160 int mvgbe_tx_prod; 161 int mvgbe_tx_cons; 162 int mvgbe_tx_cnt; 163 164 struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT]; 165 bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT]; 166 bus_dmamap_t mvgbe_rx_jumbo_map; 167 int mvgbe_rx_prod; 168 int mvgbe_rx_cons; 169 int mvgbe_rx_cnt; 170 171 /* Stick the jumbo mem management stuff here too. */ 172 void *mvgbe_jslots[MVGBE_JSLOTS]; 173 void *mvgbe_jumbo_buf; 174 }; 175 176 struct mvgbe_ring_data { 177 struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT]; 178 struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT]; 179 }; 180 181 struct mvgbec_softc { 182 device_t sc_dev; 183 184 bus_space_tag_t sc_iot; 185 bus_space_handle_t sc_ioh; 186 187 kmutex_t sc_mtx; 188 189 int sc_flags; 190 }; 191 192 struct mvgbe_softc { 193 device_t sc_dev; 194 int sc_port; 195 196 bus_space_tag_t sc_iot; 197 bus_space_handle_t sc_ioh; 198 bus_space_handle_t sc_dafh; /* dest address filter handle */ 199 bus_dma_tag_t sc_dmat; 200 201 struct ethercom sc_ethercom; 202 struct mii_data sc_mii; 203 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ 204 205 struct mvgbe_chain_data sc_cdata; 206 struct mvgbe_ring_data *sc_rdata; 207 bus_dmamap_t sc_ring_map; 208 int sc_if_flags; 209 int sc_wdogsoft; 210 211 LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead; 212 LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead; 213 SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head; 214 215 krndsource_t sc_rnd_source; 216 }; 217 218 219 /* Gigabit Ethernet Unit Global part functions */ 220 221 static int mvgbec_match(device_t, struct cfdata *, void *); 222 static void mvgbec_attach(device_t, device_t, void *); 223 224 static int mvgbec_print(void *, const char *); 225 static int mvgbec_search(device_t, cfdata_t, const int *, void *); 226 227 /* MII funcstions */ 228 static int mvgbec_miibus_readreg(device_t, int, int); 229 static void mvgbec_miibus_writereg(device_t, int, int, int); 230 static void mvgbec_miibus_statchg(struct ifnet *); 231 232 static void mvgbec_wininit(struct mvgbec_softc *); 233 234 /* Gigabit Ethernet Port part functions */ 235 236 static int mvgbe_match(device_t, struct cfdata *, void *); 237 static void mvgbe_attach(device_t, device_t, void *); 238 239 static int mvgbe_intr(void *); 240 241 static void mvgbe_start(struct ifnet *); 242 static int mvgbe_ioctl(struct ifnet *, u_long, void *); 243 static int mvgbe_init(struct ifnet *); 244 static void mvgbe_stop(struct ifnet *, int); 245 static void mvgbe_watchdog(struct ifnet *); 246 247 static int mvgbe_ifflags_cb(struct ethercom *); 248 249 static int mvgbe_mediachange(struct ifnet *); 250 static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *); 251 252 static int mvgbe_init_rx_ring(struct mvgbe_softc *); 253 static int mvgbe_init_tx_ring(struct mvgbe_softc *); 254 static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t); 255 static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *); 256 static void *mvgbe_jalloc(struct mvgbe_softc *); 257 static void mvgbe_jfree(struct mbuf *, void *, size_t, void *); 258 static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *); 259 static void mvgbe_rxeof(struct mvgbe_softc *); 260 static void mvgbe_txeof(struct mvgbe_softc *); 261 static uint8_t mvgbe_crc8(const uint8_t *, size_t); 262 static void mvgbe_filter_setup(struct mvgbe_softc *); 263 #ifdef MVGBE_DEBUG 264 static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int); 265 #endif 266 267 CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc), 268 mvgbec_match, mvgbec_attach, NULL, NULL); 269 CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc), 270 mvgbec_match, mvgbec_attach, NULL, NULL); 271 272 CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc), 273 mvgbe_match, mvgbe_attach, NULL, NULL); 274 275 device_t mvgbec0 = NULL; 276 277 struct mvgbe_port { 278 int model; 279 int unit; 280 int ports; 281 int irqs[3]; 282 int flags; 283 #define FLAGS_FIX_TQTB (1 << 0) 284 #define FLAGS_FIX_MTU (1 << 1) 285 } mvgbe_ports[] = { 286 { MARVELL_DISCOVERY_II, 0, 3, { 32, 33, 34 }, 0 }, 287 { MARVELL_DISCOVERY_III, 0, 3, { 32, 33, 34 }, 0 }, 288 #if 0 289 { MARVELL_DISCOVERY_LT, 0, ?, { }, 0 }, 290 { MARVELL_DISCOVERY_V, 0, ?, { }, 0 }, 291 { MARVELL_DISCOVERY_VI, 0, ?, { }, 0 }, 292 #endif 293 { MARVELL_ORION_1_88F5082, 0, 1, { 21 }, FLAGS_FIX_MTU }, 294 { MARVELL_ORION_1_88F5180N, 0, 1, { 21 }, FLAGS_FIX_MTU }, 295 { MARVELL_ORION_1_88F5181, 0, 1, { 21 }, FLAGS_FIX_MTU }, 296 { MARVELL_ORION_1_88F5182, 0, 1, { 21 }, FLAGS_FIX_MTU }, 297 { MARVELL_ORION_2_88F5281, 0, 1, { 21 }, FLAGS_FIX_MTU }, 298 { MARVELL_ORION_1_88F6082, 0, 1, { 21 }, FLAGS_FIX_MTU }, 299 { MARVELL_ORION_1_88W8660, 0, 1, { 21 }, FLAGS_FIX_MTU }, 300 301 { MARVELL_KIRKWOOD_88F6180, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 302 { MARVELL_KIRKWOOD_88F6192, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 303 { MARVELL_KIRKWOOD_88F6192, 1, 1, { 15 }, FLAGS_FIX_TQTB }, 304 { MARVELL_KIRKWOOD_88F6281, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 305 { MARVELL_KIRKWOOD_88F6281, 1, 1, { 15 }, FLAGS_FIX_TQTB }, 306 { MARVELL_KIRKWOOD_88F6282, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 307 { MARVELL_KIRKWOOD_88F6282, 1, 1, { 15 }, FLAGS_FIX_TQTB }, 308 309 { MARVELL_MV78XX0_MV78100, 0, 1, { 40 }, FLAGS_FIX_TQTB }, 310 { MARVELL_MV78XX0_MV78100, 1, 1, { 44 }, FLAGS_FIX_TQTB }, 311 { MARVELL_MV78XX0_MV78200, 0, 1, { 40 }, FLAGS_FIX_TQTB }, 312 { MARVELL_MV78XX0_MV78200, 1, 1, { 44 }, FLAGS_FIX_TQTB }, 313 { MARVELL_MV78XX0_MV78200, 2, 1, { 48 }, FLAGS_FIX_TQTB }, 314 { MARVELL_MV78XX0_MV78200, 3, 1, { 52 }, FLAGS_FIX_TQTB }, 315 }; 316 317 318 /* ARGSUSED */ 319 static int 320 mvgbec_match(device_t parent, cfdata_t match, void *aux) 321 { 322 struct marvell_attach_args *mva = aux; 323 int i; 324 325 if (strcmp(mva->mva_name, match->cf_name) != 0) 326 return 0; 327 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 328 return 0; 329 330 for (i = 0; i < __arraycount(mvgbe_ports); i++) 331 if (mva->mva_model == mvgbe_ports[i].model) { 332 mva->mva_size = MVGBE_SIZE; 333 return 1; 334 } 335 return 0; 336 } 337 338 /* ARGSUSED */ 339 static void 340 mvgbec_attach(device_t parent, device_t self, void *aux) 341 { 342 struct mvgbec_softc *sc = device_private(self); 343 struct marvell_attach_args *mva = aux, gbea; 344 struct mvgbe_softc *port; 345 struct mii_softc *mii; 346 device_t child; 347 uint32_t phyaddr; 348 int i, j; 349 350 aprint_naive("\n"); 351 aprint_normal(": Marvell Gigabit Ethernet Controller\n"); 352 353 sc->sc_dev = self; 354 sc->sc_iot = mva->mva_iot; 355 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 356 mva->mva_size, &sc->sc_ioh)) { 357 aprint_error_dev(self, "Cannot map registers\n"); 358 return; 359 } 360 361 if (mvgbec0 == NULL) 362 mvgbec0 = self; 363 364 phyaddr = 0; 365 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); 366 367 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 368 369 /* Disable and clear Gigabit Ethernet Unit interrupts */ 370 MVGBE_WRITE(sc, MVGBE_EUIM, 0); 371 MVGBE_WRITE(sc, MVGBE_EUIC, 0); 372 373 mvgbec_wininit(sc); 374 375 memset(&gbea, 0, sizeof(gbea)); 376 for (i = 0; i < __arraycount(mvgbe_ports); i++) { 377 if (mvgbe_ports[i].model != mva->mva_model || 378 mvgbe_ports[i].unit != mva->mva_unit) 379 continue; 380 381 sc->sc_flags = mvgbe_ports[i].flags; 382 383 for (j = 0; j < mvgbe_ports[i].ports; j++) { 384 gbea.mva_name = "mvgbe"; 385 gbea.mva_model = mva->mva_model; 386 gbea.mva_iot = sc->sc_iot; 387 gbea.mva_ioh = sc->sc_ioh; 388 gbea.mva_unit = j; 389 gbea.mva_dmat = mva->mva_dmat; 390 gbea.mva_irq = mvgbe_ports[i].irqs[j]; 391 child = config_found_sm_loc(sc->sc_dev, "mvgbec", NULL, 392 &gbea, mvgbec_print, mvgbec_search); 393 if (child) { 394 port = device_private(child); 395 mii = LIST_FIRST(&port->sc_mii.mii_phys); 396 phyaddr |= MVGBE_PHYADDR_PHYAD(j, mii->mii_phy); 397 } 398 } 399 break; 400 } 401 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); 402 } 403 404 static int 405 mvgbec_print(void *aux, const char *pnp) 406 { 407 struct marvell_attach_args *gbea = aux; 408 409 if (pnp) 410 aprint_normal("%s at %s port %d", 411 gbea->mva_name, pnp, gbea->mva_unit); 412 else { 413 if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT) 414 aprint_normal(" port %d", gbea->mva_unit); 415 if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT) 416 aprint_normal(" irq %d", gbea->mva_irq); 417 } 418 return UNCONF; 419 } 420 421 /* ARGSUSED */ 422 static int 423 mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 424 { 425 struct marvell_attach_args *gbea = aux; 426 427 if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit && 428 cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT) 429 gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ]; 430 431 return config_match(parent, cf, aux); 432 } 433 434 static int 435 mvgbec_miibus_readreg(device_t dev, int phy, int reg) 436 { 437 struct mvgbe_softc *sc = device_private(dev); 438 struct mvgbec_softc *csc; 439 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 440 uint32_t smi, val; 441 int i; 442 443 if (mvgbec0 == NULL) { 444 aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); 445 return -1; 446 } 447 csc = device_private(mvgbec0); 448 449 mutex_enter(&csc->sc_mtx); 450 451 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 452 DELAY(1); 453 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 454 break; 455 } 456 if (i == MVGBE_PHY_TIMEOUT) { 457 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 458 mutex_exit(&csc->sc_mtx); 459 return -1; 460 } 461 462 smi = 463 MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ; 464 MVGBE_WRITE(csc, MVGBE_SMI, smi); 465 466 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 467 DELAY(1); 468 smi = MVGBE_READ(csc, MVGBE_SMI); 469 if (smi & MVGBE_SMI_READVALID) 470 break; 471 } 472 473 mutex_exit(&csc->sc_mtx); 474 475 DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n", 476 i, MVGBE_PHY_TIMEOUT)); 477 478 val = smi & MVGBE_SMI_DATA_MASK; 479 480 DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 481 phy, reg, val)); 482 483 return val; 484 } 485 486 static void 487 mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val) 488 { 489 struct mvgbe_softc *sc = device_private(dev); 490 struct mvgbec_softc *csc; 491 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 492 uint32_t smi; 493 int i; 494 495 if (mvgbec0 == NULL) { 496 aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); 497 return; 498 } 499 csc = device_private(mvgbec0); 500 501 DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n", 502 phy, reg, val)); 503 504 mutex_enter(&csc->sc_mtx); 505 506 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 507 DELAY(1); 508 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 509 break; 510 } 511 if (i == MVGBE_PHY_TIMEOUT) { 512 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 513 mutex_exit(&csc->sc_mtx); 514 return; 515 } 516 517 smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | 518 MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK); 519 MVGBE_WRITE(csc, MVGBE_SMI, smi); 520 521 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 522 DELAY(1); 523 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 524 break; 525 } 526 527 mutex_exit(&csc->sc_mtx); 528 529 if (i == MVGBE_PHY_TIMEOUT) 530 aprint_error_ifnet(ifp, "phy write timed out\n"); 531 } 532 533 static void 534 mvgbec_miibus_statchg(struct ifnet *ifp) 535 { 536 537 /* nothing to do */ 538 } 539 540 541 static void 542 mvgbec_wininit(struct mvgbec_softc *sc) 543 { 544 device_t pdev = device_parent(sc->sc_dev); 545 uint64_t base; 546 uint32_t en, ac, size; 547 int window, target, attr, rv, i; 548 static int tags[] = { 549 MARVELL_TAG_SDRAM_CS0, 550 MARVELL_TAG_SDRAM_CS1, 551 MARVELL_TAG_SDRAM_CS2, 552 MARVELL_TAG_SDRAM_CS3, 553 554 MARVELL_TAG_UNDEFINED, 555 }; 556 557 /* First disable all address decode windows */ 558 en = MVGBE_BARE_EN_MASK; 559 MVGBE_WRITE(sc, MVGBE_BARE, en); 560 561 ac = 0; 562 for (window = 0, i = 0; 563 tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) { 564 rv = marvell_winparams_by_tag(pdev, tags[i], 565 &target, &attr, &base, &size); 566 if (rv != 0 || size == 0) 567 continue; 568 569 if (base > 0xffffffffULL) { 570 if (window >= MVGBE_NREMAP) { 571 aprint_error_dev(sc->sc_dev, 572 "can't remap window %d\n", window); 573 continue; 574 } 575 MVGBE_WRITE(sc, MVGBE_HA(window), 576 (base >> 32) & 0xffffffff); 577 } 578 579 MVGBE_WRITE(sc, MVGBE_BASEADDR(window), 580 MVGBE_BASEADDR_TARGET(target) | 581 MVGBE_BASEADDR_ATTR(attr) | 582 MVGBE_BASEADDR_BASE(base)); 583 MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size)); 584 585 en &= ~(1 << window); 586 /* set full access (r/w) */ 587 ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA); 588 window++; 589 } 590 /* allow to access decode window */ 591 MVGBE_WRITE(sc, MVGBE_EPAP, ac); 592 593 MVGBE_WRITE(sc, MVGBE_BARE, en); 594 } 595 596 597 /* ARGSUSED */ 598 static int 599 mvgbe_match(device_t parent, cfdata_t match, void *aux) 600 { 601 struct marvell_attach_args *mva = aux; 602 uint32_t pbase, maddrh, maddrl; 603 604 pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE; 605 maddrh = 606 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH); 607 maddrl = 608 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL); 609 if ((maddrh | maddrl) == 0) 610 return 0; 611 612 return 1; 613 } 614 615 /* ARGSUSED */ 616 static void 617 mvgbe_attach(device_t parent, device_t self, void *aux) 618 { 619 struct mvgbe_softc *sc = device_private(self); 620 struct marvell_attach_args *mva = aux; 621 struct mvgbe_txmap_entry *entry; 622 struct ifnet *ifp; 623 bus_dma_segment_t seg; 624 bus_dmamap_t dmamap; 625 int rseg, i; 626 uint32_t maddrh, maddrl; 627 void *kva; 628 629 aprint_naive("\n"); 630 aprint_normal("\n"); 631 632 sc->sc_dev = self; 633 sc->sc_port = mva->mva_unit; 634 sc->sc_iot = mva->mva_iot; 635 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 636 MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE, 637 MVGBE_PORTR_SIZE, &sc->sc_ioh)) { 638 aprint_error_dev(self, "Cannot map registers\n"); 639 return; 640 } 641 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 642 MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE, 643 MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) { 644 aprint_error_dev(self, 645 "Cannot map destination address filter registers\n"); 646 return; 647 } 648 sc->sc_dmat = mva->mva_dmat; 649 650 maddrh = MVGBE_READ(sc, MVGBE_MACAH); 651 maddrl = MVGBE_READ(sc, MVGBE_MACAL); 652 sc->sc_enaddr[0] = maddrh >> 24; 653 sc->sc_enaddr[1] = maddrh >> 16; 654 sc->sc_enaddr[2] = maddrh >> 8; 655 sc->sc_enaddr[3] = maddrh >> 0; 656 sc->sc_enaddr[4] = maddrl >> 8; 657 sc->sc_enaddr[5] = maddrl >> 0; 658 aprint_normal_dev(self, "Ethernet address %s\n", 659 ether_sprintf(sc->sc_enaddr)); 660 661 /* clear all ethernet port interrupts */ 662 MVGBE_WRITE(sc, MVGBE_IC, 0); 663 MVGBE_WRITE(sc, MVGBE_ICE, 0); 664 665 marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc); 666 667 /* Allocate the descriptor queues. */ 668 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 669 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 670 aprint_error_dev(self, "can't alloc rx buffers\n"); 671 return; 672 } 673 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, 674 sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) { 675 aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n", 676 (u_long)sizeof(struct mvgbe_ring_data)); 677 goto fail1; 678 } 679 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1, 680 sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT, 681 &sc->sc_ring_map)) { 682 aprint_error_dev(self, "can't create dma map\n"); 683 goto fail2; 684 } 685 if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva, 686 sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) { 687 aprint_error_dev(self, "can't load dma map\n"); 688 goto fail3; 689 } 690 for (i = 0; i < MVGBE_RX_RING_CNT; i++) 691 sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 692 693 SIMPLEQ_INIT(&sc->sc_txmap_head); 694 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 695 sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 696 697 if (bus_dmamap_create(sc->sc_dmat, 698 MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0, 699 BUS_DMA_NOWAIT, &dmamap)) { 700 aprint_error_dev(self, "Can't create TX dmamap\n"); 701 goto fail4; 702 } 703 704 entry = kmem_alloc(sizeof(*entry), KM_SLEEP); 705 if (!entry) { 706 aprint_error_dev(self, "Can't alloc txmap entry\n"); 707 bus_dmamap_destroy(sc->sc_dmat, dmamap); 708 goto fail4; 709 } 710 entry->dmamap = dmamap; 711 SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link); 712 } 713 714 sc->sc_rdata = (struct mvgbe_ring_data *)kva; 715 memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data)); 716 717 /* 718 * We can support 802.1Q VLAN-sized frames and jumbo 719 * Ethernet frames. 720 */ 721 sc->sc_ethercom.ec_capabilities |= 722 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 723 724 /* Try to allocate memory for jumbo buffers. */ 725 if (mvgbe_alloc_jumbo_mem(sc)) { 726 aprint_error_dev(self, "jumbo buffer allocation failed\n"); 727 goto fail4; 728 } 729 730 ifp = &sc->sc_ethercom.ec_if; 731 ifp->if_softc = sc; 732 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 733 ifp->if_start = mvgbe_start; 734 ifp->if_ioctl = mvgbe_ioctl; 735 ifp->if_init = mvgbe_init; 736 ifp->if_stop = mvgbe_stop; 737 ifp->if_watchdog = mvgbe_watchdog; 738 /* 739 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 740 */ 741 sc->sc_ethercom.ec_if.if_capabilities |= 742 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 743 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 744 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 745 /* 746 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. 747 */ 748 sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx; 749 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN)); 750 IFQ_SET_READY(&ifp->if_snd); 751 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 752 753 mvgbe_stop(ifp, 0); 754 755 /* 756 * Do MII setup. 757 */ 758 sc->sc_mii.mii_ifp = ifp; 759 sc->sc_mii.mii_readreg = mvgbec_miibus_readreg; 760 sc->sc_mii.mii_writereg = mvgbec_miibus_writereg; 761 sc->sc_mii.mii_statchg = mvgbec_miibus_statchg; 762 763 sc->sc_ethercom.ec_mii = &sc->sc_mii; 764 ifmedia_init(&sc->sc_mii.mii_media, 0, 765 mvgbe_mediachange, mvgbe_mediastatus); 766 mii_attach(self, &sc->sc_mii, 0xffffffff, 767 MII_PHY_ANY, parent == mvgbec0 ? 0 : 1, 0); 768 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 769 aprint_error_dev(self, "no PHY found!\n"); 770 ifmedia_add(&sc->sc_mii.mii_media, 771 IFM_ETHER|IFM_MANUAL, 0, NULL); 772 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 773 } else 774 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 775 776 /* 777 * Call MI attach routines. 778 */ 779 if_attach(ifp); 780 781 ether_ifattach(ifp, sc->sc_enaddr); 782 ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb); 783 784 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 785 RND_TYPE_NET, 0); 786 787 return; 788 789 fail4: 790 while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) { 791 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 792 bus_dmamap_destroy(sc->sc_dmat, entry->dmamap); 793 } 794 bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map); 795 fail3: 796 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map); 797 fail2: 798 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data)); 799 fail1: 800 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 801 return; 802 } 803 804 805 static int 806 mvgbe_intr(void *arg) 807 { 808 struct mvgbe_softc *sc = arg; 809 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 810 uint32_t ic, ice, datum = 0; 811 int claimed = 0; 812 813 for (;;) { 814 ice = MVGBE_READ(sc, MVGBE_ICE); 815 ic = MVGBE_READ(sc, MVGBE_IC); 816 817 DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice)); 818 if (ic == 0 && ice == 0) 819 break; 820 821 datum = datum ^ ic ^ ice; 822 823 MVGBE_WRITE(sc, MVGBE_IC, ~ic); 824 MVGBE_WRITE(sc, MVGBE_ICE, ~ice); 825 826 claimed = 1; 827 828 if (ice & MVGBE_ICE_LINKCHG) { 829 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 830 /* Enable port RX and TX. */ 831 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 832 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 833 } else { 834 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0)); 835 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 836 } 837 } 838 839 if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR)) 840 mvgbe_rxeof(sc); 841 842 if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR)) 843 mvgbe_txeof(sc); 844 } 845 846 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 847 mvgbe_start(ifp); 848 849 rnd_add_uint32(&sc->sc_rnd_source, datum); 850 851 return claimed; 852 } 853 854 static void 855 mvgbe_start(struct ifnet *ifp) 856 { 857 struct mvgbe_softc *sc = ifp->if_softc; 858 struct mbuf *m_head = NULL; 859 uint32_t idx = sc->sc_cdata.mvgbe_tx_prod; 860 int pkts = 0; 861 862 DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx, 863 sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf)); 864 865 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 866 return; 867 /* If Link is DOWN, can't start TX */ 868 if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP)) 869 return; 870 871 while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) { 872 IFQ_POLL(&ifp->if_snd, m_head); 873 if (m_head == NULL) 874 break; 875 876 /* 877 * Pack the data into the transmit ring. If we 878 * don't have room, set the OACTIVE flag and wait 879 * for the NIC to drain the ring. 880 */ 881 if (mvgbe_encap(sc, m_head, &idx)) { 882 ifp->if_flags |= IFF_OACTIVE; 883 break; 884 } 885 886 /* now we are committed to transmit the packet */ 887 IFQ_DEQUEUE(&ifp->if_snd, m_head); 888 pkts++; 889 890 /* 891 * If there's a BPF listener, bounce a copy of this frame 892 * to him. 893 */ 894 bpf_mtap(ifp, m_head); 895 } 896 if (pkts == 0) 897 return; 898 899 /* Transmit at Queue 0 */ 900 if (idx != sc->sc_cdata.mvgbe_tx_prod) { 901 sc->sc_cdata.mvgbe_tx_prod = idx; 902 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 903 904 /* 905 * Set a timeout in case the chip goes out to lunch. 906 */ 907 ifp->if_timer = 1; 908 sc->sc_wdogsoft = 1; 909 } 910 } 911 912 static int 913 mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 914 { 915 struct mvgbe_softc *sc = ifp->if_softc; 916 struct ifreq *ifr = data; 917 int s, error = 0; 918 919 s = splnet(); 920 921 switch (cmd) { 922 case SIOCGIFMEDIA: 923 case SIOCSIFMEDIA: 924 DPRINTFN(2, ("mvgbe_ioctl MEDIA\n")); 925 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 926 break; 927 default: 928 DPRINTFN(2, ("mvgbe_ioctl ETHER\n")); 929 error = ether_ioctl(ifp, cmd, data); 930 if (error == ENETRESET) { 931 if (ifp->if_flags & IFF_RUNNING) { 932 mvgbe_filter_setup(sc); 933 } 934 error = 0; 935 } 936 break; 937 } 938 939 splx(s); 940 941 return error; 942 } 943 944 int mvgbe_rximt = 0; 945 int mvgbe_tximt = 0; 946 947 static int 948 mvgbe_init(struct ifnet *ifp) 949 { 950 struct mvgbe_softc *sc = ifp->if_softc; 951 struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); 952 struct mii_data *mii = &sc->sc_mii; 953 uint32_t reg; 954 int i; 955 956 DPRINTFN(2, ("mvgbe_init\n")); 957 958 /* Cancel pending I/O and free all RX/TX buffers. */ 959 mvgbe_stop(ifp, 0); 960 961 /* clear all ethernet port interrupts */ 962 MVGBE_WRITE(sc, MVGBE_IC, 0); 963 MVGBE_WRITE(sc, MVGBE_ICE, 0); 964 965 /* Init TX/RX descriptors */ 966 if (mvgbe_init_tx_ring(sc) == ENOBUFS) { 967 aprint_error_ifnet(ifp, 968 "initialization failed: no memory for tx buffers\n"); 969 return ENOBUFS; 970 } 971 if (mvgbe_init_rx_ring(sc) == ENOBUFS) { 972 aprint_error_ifnet(ifp, 973 "initialization failed: no memory for rx buffers\n"); 974 return ENOBUFS; 975 } 976 977 if (csc->sc_flags & FLAGS_FIX_MTU) 978 MVGBE_WRITE(sc, MVGBE_MTU, 0); /* hw reset value is wrong */ 979 MVGBE_WRITE(sc, MVGBE_PSC, 980 MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */ 981 MVGBE_PSC_RESERVED | /* Must be set to 1 */ 982 MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */ 983 MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */ 984 MVGBE_PSC_SETFULLDX); /* Set_FullDx */ 985 /* XXXX: mvgbe(4) always use RGMII. */ 986 MVGBE_WRITE(sc, MVGBE_PSC1, 987 MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN); 988 /* XXXX: Also always Weighted Round-Robin Priority Mode */ 989 MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0)); 990 991 MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0)); 992 MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0)); 993 994 if (csc->sc_flags & FLAGS_FIX_TQTB) { 995 /* 996 * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff. 997 * And offset 0x72704 must be programmed to 0x03ffffff. 998 * Queue 1 through 7 must be programmed to 0x0. 999 */ 1000 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff); 1001 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff); 1002 for (i = 1; i < 8; i++) { 1003 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0); 1004 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0); 1005 } 1006 } else 1007 for (i = 1; i < 8; i++) { 1008 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff); 1009 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff); 1010 MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff); 1011 } 1012 1013 MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS); 1014 MVGBE_WRITE(sc, MVGBE_PXCX, 0); 1015 MVGBE_WRITE(sc, MVGBE_SDC, 1016 MVGBE_SDC_RXBSZ_16_64BITWORDS | 1017 #if BYTE_ORDER == LITTLE_ENDIAN 1018 MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ 1019 MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ 1020 #endif 1021 MVGBE_SDC_IPGINTRX(mvgbe_rximt) | 1022 MVGBE_SDC_TXBSZ_16_64BITWORDS); 1023 MVGBE_WRITE(sc, MVGBE_PTFUT, MVGBE_PTFUT_IPGINTTX(mvgbe_tximt)); 1024 1025 mvgbe_filter_setup(sc); 1026 1027 mii_mediachg(mii); 1028 1029 /* Enable port */ 1030 reg = MVGBE_READ(sc, MVGBE_PSC); 1031 MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN); 1032 1033 /* If Link is UP, Start RX and TX traffic */ 1034 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 1035 /* Enable port RX/TX. */ 1036 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 1037 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1038 } 1039 1040 /* Enable interrupt masks */ 1041 MVGBE_WRITE(sc, MVGBE_PIM, 1042 MVGBE_IC_RXBUF | 1043 MVGBE_IC_EXTEND | 1044 MVGBE_IC_RXBUFQ_MASK | 1045 MVGBE_IC_RXERROR | 1046 MVGBE_IC_RXERRQ_MASK); 1047 MVGBE_WRITE(sc, MVGBE_PEIM, 1048 MVGBE_ICE_TXBUF | 1049 MVGBE_ICE_TXERR | 1050 MVGBE_ICE_LINKCHG); 1051 1052 ifp->if_flags |= IFF_RUNNING; 1053 ifp->if_flags &= ~IFF_OACTIVE; 1054 1055 return 0; 1056 } 1057 1058 /* ARGSUSED */ 1059 static void 1060 mvgbe_stop(struct ifnet *ifp, int disable) 1061 { 1062 struct mvgbe_softc *sc = ifp->if_softc; 1063 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1064 uint32_t reg; 1065 int i, cnt; 1066 1067 DPRINTFN(2, ("mvgbe_stop\n")); 1068 1069 /* Stop Rx port activity. Check port Rx activity. */ 1070 reg = MVGBE_READ(sc, MVGBE_RQC); 1071 if (reg & MVGBE_RQC_ENQ_MASK) 1072 /* Issue stop command for active channels only */ 1073 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg)); 1074 1075 /* Stop Tx port activity. Check port Tx activity. */ 1076 if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ) 1077 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 1078 1079 /* Force link down */ 1080 reg = MVGBE_READ(sc, MVGBE_PSC); 1081 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL); 1082 1083 #define RX_DISABLE_TIMEOUT 0x1000000 1084 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 1085 /* Wait for all Rx activity to terminate. */ 1086 cnt = 0; 1087 do { 1088 if (cnt >= RX_DISABLE_TIMEOUT) { 1089 aprint_error_ifnet(ifp, 1090 "timeout for RX stopped. rqc 0x%x\n", reg); 1091 break; 1092 } 1093 cnt++; 1094 1095 /* 1096 * Check Receive Queue Command register that all Rx queues 1097 * are stopped 1098 */ 1099 reg = MVGBE_READ(sc, MVGBE_RQC); 1100 } while (reg & 0xff); 1101 1102 /* Double check to verify that TX FIFO is empty */ 1103 cnt = 0; 1104 while (1) { 1105 do { 1106 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1107 aprint_error_ifnet(ifp, 1108 "timeout for TX FIFO empty. status 0x%x\n", 1109 reg); 1110 break; 1111 } 1112 cnt++; 1113 1114 reg = MVGBE_READ(sc, MVGBE_PS); 1115 } while 1116 (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG); 1117 1118 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) 1119 break; 1120 1121 /* Double check */ 1122 reg = MVGBE_READ(sc, MVGBE_PS); 1123 if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG)) 1124 break; 1125 else 1126 aprint_error_ifnet(ifp, 1127 "TX FIFO empty double check failed." 1128 " %d loops, status 0x%x\n", cnt, reg); 1129 } 1130 1131 /* Reset the Enable bit in the Port Serial Control Register */ 1132 reg = MVGBE_READ(sc, MVGBE_PSC); 1133 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN); 1134 1135 /* Disable interrupts */ 1136 MVGBE_WRITE(sc, MVGBE_PIM, 0); 1137 MVGBE_WRITE(sc, MVGBE_PEIM, 0); 1138 1139 /* Free RX and TX mbufs still in the queues. */ 1140 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1141 if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) { 1142 m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf); 1143 cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 1144 } 1145 } 1146 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1147 if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) { 1148 m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf); 1149 cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 1150 } 1151 } 1152 1153 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1154 } 1155 1156 static void 1157 mvgbe_watchdog(struct ifnet *ifp) 1158 { 1159 struct mvgbe_softc *sc = ifp->if_softc; 1160 1161 /* 1162 * Reclaim first as there is a possibility of losing Tx completion 1163 * interrupts. 1164 */ 1165 mvgbe_txeof(sc); 1166 if (sc->sc_cdata.mvgbe_tx_cnt != 0) { 1167 if (sc->sc_wdogsoft) { 1168 /* 1169 * There is race condition between CPU and DMA 1170 * engine. When DMA engine encounters queue end, 1171 * it clears MVGBE_TQC_ENQ bit. 1172 */ 1173 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1174 ifp->if_timer = 5; 1175 sc->sc_wdogsoft = 0; 1176 } else { 1177 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1178 1179 ifp->if_oerrors++; 1180 1181 mvgbe_init(ifp); 1182 } 1183 } 1184 } 1185 1186 static int 1187 mvgbe_ifflags_cb(struct ethercom *ec) 1188 { 1189 struct ifnet *ifp = &ec->ec_if; 1190 struct mvgbe_softc *sc = ifp->if_softc; 1191 int change = ifp->if_flags ^ sc->sc_if_flags; 1192 1193 if (change != 0) 1194 sc->sc_if_flags = ifp->if_flags; 1195 1196 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1197 return ENETRESET; 1198 1199 if ((change & IFF_PROMISC) != 0) 1200 mvgbe_filter_setup(sc); 1201 1202 return 0; 1203 } 1204 1205 /* 1206 * Set media options. 1207 */ 1208 static int 1209 mvgbe_mediachange(struct ifnet *ifp) 1210 { 1211 return ether_mediachange(ifp); 1212 } 1213 1214 /* 1215 * Report current media status. 1216 */ 1217 static void 1218 mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1219 { 1220 ether_mediastatus(ifp, ifmr); 1221 } 1222 1223 1224 static int 1225 mvgbe_init_rx_ring(struct mvgbe_softc *sc) 1226 { 1227 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1228 struct mvgbe_ring_data *rd = sc->sc_rdata; 1229 int i; 1230 1231 memset(rd->mvgbe_rx_ring, 0, 1232 sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT); 1233 1234 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1235 cd->mvgbe_rx_chain[i].mvgbe_desc = 1236 &rd->mvgbe_rx_ring[i]; 1237 if (i == MVGBE_RX_RING_CNT - 1) { 1238 cd->mvgbe_rx_chain[i].mvgbe_next = 1239 &cd->mvgbe_rx_chain[0]; 1240 rd->mvgbe_rx_ring[i].nextdescptr = 1241 MVGBE_RX_RING_ADDR(sc, 0); 1242 } else { 1243 cd->mvgbe_rx_chain[i].mvgbe_next = 1244 &cd->mvgbe_rx_chain[i + 1]; 1245 rd->mvgbe_rx_ring[i].nextdescptr = 1246 MVGBE_RX_RING_ADDR(sc, i + 1); 1247 } 1248 } 1249 1250 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1251 if (mvgbe_newbuf(sc, i, NULL, 1252 sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) { 1253 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1254 "failed alloc of %dth mbuf\n", i); 1255 return ENOBUFS; 1256 } 1257 } 1258 sc->sc_cdata.mvgbe_rx_prod = 0; 1259 sc->sc_cdata.mvgbe_rx_cons = 0; 1260 1261 return 0; 1262 } 1263 1264 static int 1265 mvgbe_init_tx_ring(struct mvgbe_softc *sc) 1266 { 1267 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1268 struct mvgbe_ring_data *rd = sc->sc_rdata; 1269 int i; 1270 1271 memset(sc->sc_rdata->mvgbe_tx_ring, 0, 1272 sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT); 1273 1274 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1275 cd->mvgbe_tx_chain[i].mvgbe_desc = 1276 &rd->mvgbe_tx_ring[i]; 1277 if (i == MVGBE_TX_RING_CNT - 1) { 1278 cd->mvgbe_tx_chain[i].mvgbe_next = 1279 &cd->mvgbe_tx_chain[0]; 1280 rd->mvgbe_tx_ring[i].nextdescptr = 1281 MVGBE_TX_RING_ADDR(sc, 0); 1282 } else { 1283 cd->mvgbe_tx_chain[i].mvgbe_next = 1284 &cd->mvgbe_tx_chain[i + 1]; 1285 rd->mvgbe_tx_ring[i].nextdescptr = 1286 MVGBE_TX_RING_ADDR(sc, i + 1); 1287 } 1288 rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST; 1289 } 1290 1291 sc->sc_cdata.mvgbe_tx_prod = 0; 1292 sc->sc_cdata.mvgbe_tx_cons = 0; 1293 sc->sc_cdata.mvgbe_tx_cnt = 0; 1294 1295 MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT, 1296 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1297 1298 return 0; 1299 } 1300 1301 static int 1302 mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m, 1303 bus_dmamap_t dmamap) 1304 { 1305 struct mbuf *m_new = NULL; 1306 struct mvgbe_chain *c; 1307 struct mvgbe_rx_desc *r; 1308 int align; 1309 vaddr_t offset; 1310 1311 if (m == NULL) { 1312 void *buf = NULL; 1313 1314 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1315 if (m_new == NULL) { 1316 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1317 "no memory for rx list -- packet dropped!\n"); 1318 return ENOBUFS; 1319 } 1320 1321 /* Allocate the jumbo buffer */ 1322 buf = mvgbe_jalloc(sc); 1323 if (buf == NULL) { 1324 m_freem(m_new); 1325 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 1326 "dropped!\n", sc->sc_ethercom.ec_if.if_xname)); 1327 return ENOBUFS; 1328 } 1329 1330 /* Attach the buffer to the mbuf */ 1331 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1332 MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc); 1333 } else { 1334 /* 1335 * We're re-using a previously allocated mbuf; 1336 * be sure to re-init pointers and lengths to 1337 * default values. 1338 */ 1339 m_new = m; 1340 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1341 m_new->m_data = m_new->m_ext.ext_buf; 1342 } 1343 align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK; 1344 if (align != 0) { 1345 DPRINTFN(1,("align = %d\n", align)); 1346 m_adj(m_new, MVGBE_RXBUF_ALIGN - align); 1347 } 1348 1349 c = &sc->sc_cdata.mvgbe_rx_chain[i]; 1350 r = c->mvgbe_desc; 1351 c->mvgbe_mbuf = m_new; 1352 offset = (vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf; 1353 r->bufptr = dmamap->dm_segs[0].ds_addr + offset; 1354 r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK; 1355 r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT; 1356 1357 /* Invalidate RX buffer */ 1358 bus_dmamap_sync(sc->sc_dmat, dmamap, offset, r->bufsize, 1359 BUS_DMASYNC_PREREAD); 1360 1361 MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1362 1363 return 0; 1364 } 1365 1366 /* 1367 * Memory management for jumbo frames. 1368 */ 1369 1370 static int 1371 mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc) 1372 { 1373 char *ptr, *kva; 1374 bus_dma_segment_t seg; 1375 int i, rseg, state, error; 1376 struct mvgbe_jpool_entry *entry; 1377 1378 state = error = 0; 1379 1380 /* Grab a big chunk o' storage. */ 1381 if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0, 1382 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1383 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); 1384 return ENOBUFS; 1385 } 1386 1387 state = 1; 1388 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM, 1389 (void **)&kva, BUS_DMA_NOWAIT)) { 1390 aprint_error_dev(sc->sc_dev, 1391 "can't map dma buffers (%d bytes)\n", MVGBE_JMEM); 1392 error = ENOBUFS; 1393 goto out; 1394 } 1395 1396 state = 2; 1397 if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0, 1398 BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) { 1399 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 1400 error = ENOBUFS; 1401 goto out; 1402 } 1403 1404 state = 3; 1405 if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map, 1406 kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1407 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 1408 error = ENOBUFS; 1409 goto out; 1410 } 1411 1412 state = 4; 1413 sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva; 1414 DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf)); 1415 1416 LIST_INIT(&sc->sc_jfree_listhead); 1417 LIST_INIT(&sc->sc_jinuse_listhead); 1418 1419 /* 1420 * Now divide it up into 9K pieces and save the addresses 1421 * in an array. 1422 */ 1423 ptr = sc->sc_cdata.mvgbe_jumbo_buf; 1424 for (i = 0; i < MVGBE_JSLOTS; i++) { 1425 sc->sc_cdata.mvgbe_jslots[i] = ptr; 1426 ptr += MVGBE_JLEN; 1427 entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP); 1428 if (entry == NULL) { 1429 aprint_error_dev(sc->sc_dev, 1430 "no memory for jumbo buffer queue!\n"); 1431 error = ENOBUFS; 1432 goto out; 1433 } 1434 entry->slot = i; 1435 if (i) 1436 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, 1437 jpool_entries); 1438 else 1439 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, 1440 jpool_entries); 1441 } 1442 out: 1443 if (error != 0) { 1444 switch (state) { 1445 case 4: 1446 bus_dmamap_unload(sc->sc_dmat, 1447 sc->sc_cdata.mvgbe_rx_jumbo_map); 1448 case 3: 1449 bus_dmamap_destroy(sc->sc_dmat, 1450 sc->sc_cdata.mvgbe_rx_jumbo_map); 1451 case 2: 1452 bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM); 1453 case 1: 1454 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1455 break; 1456 default: 1457 break; 1458 } 1459 } 1460 1461 return error; 1462 } 1463 1464 /* 1465 * Allocate a jumbo buffer. 1466 */ 1467 static void * 1468 mvgbe_jalloc(struct mvgbe_softc *sc) 1469 { 1470 struct mvgbe_jpool_entry *entry; 1471 1472 entry = LIST_FIRST(&sc->sc_jfree_listhead); 1473 1474 if (entry == NULL) 1475 return NULL; 1476 1477 LIST_REMOVE(entry, jpool_entries); 1478 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries); 1479 return sc->sc_cdata.mvgbe_jslots[entry->slot]; 1480 } 1481 1482 /* 1483 * Release a jumbo buffer. 1484 */ 1485 static void 1486 mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1487 { 1488 struct mvgbe_jpool_entry *entry; 1489 struct mvgbe_softc *sc; 1490 int i, s; 1491 1492 /* Extract the softc struct pointer. */ 1493 sc = (struct mvgbe_softc *)arg; 1494 1495 if (sc == NULL) 1496 panic("%s: can't find softc pointer!", __func__); 1497 1498 /* calculate the slot this buffer belongs to */ 1499 1500 i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN; 1501 1502 if ((i < 0) || (i >= MVGBE_JSLOTS)) 1503 panic("%s: asked to free buffer that we don't manage!", 1504 __func__); 1505 1506 s = splvm(); 1507 entry = LIST_FIRST(&sc->sc_jinuse_listhead); 1508 if (entry == NULL) 1509 panic("%s: buffer not in use!", __func__); 1510 entry->slot = i; 1511 LIST_REMOVE(entry, jpool_entries); 1512 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries); 1513 1514 if (__predict_true(m != NULL)) 1515 pool_cache_put(mb_cache, m); 1516 splx(s); 1517 } 1518 1519 static int 1520 mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head, 1521 uint32_t *txidx) 1522 { 1523 struct mvgbe_tx_desc *f = NULL; 1524 struct mvgbe_txmap_entry *entry; 1525 bus_dma_segment_t *txseg; 1526 bus_dmamap_t txmap; 1527 uint32_t first, current, last, cmdsts = 0; 1528 int m_csumflags, i; 1529 bool needs_defrag = false; 1530 1531 DPRINTFN(3, ("mvgbe_encap\n")); 1532 1533 entry = SIMPLEQ_FIRST(&sc->sc_txmap_head); 1534 if (entry == NULL) { 1535 DPRINTFN(2, ("mvgbe_encap: no txmap available\n")); 1536 return ENOBUFS; 1537 } 1538 txmap = entry->dmamap; 1539 1540 first = current = last = *txidx; 1541 1542 /* 1543 * Preserve m_pkthdr.csum_flags here since m_head might be 1544 * updated by m_defrag() 1545 */ 1546 m_csumflags = m_head->m_pkthdr.csum_flags; 1547 1548 do_defrag: 1549 if (__predict_false(needs_defrag == true)) { 1550 /* A small unaligned segment was detected. */ 1551 struct mbuf *m_new; 1552 m_new = m_defrag(m_head, M_DONTWAIT); 1553 if (m_new == NULL) 1554 return EFBIG; 1555 m_head = m_new; 1556 } 1557 1558 /* 1559 * Start packing the mbufs in this chain into 1560 * the fragment pointers. Stop when we run out 1561 * of fragments or hit the end of the mbuf chain. 1562 */ 1563 if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) { 1564 DPRINTFN(1, ("mvgbe_encap: dmamap failed\n")); 1565 return ENOBUFS; 1566 } 1567 1568 txseg = txmap->dm_segs; 1569 1570 if (__predict_true(needs_defrag == false)) { 1571 /* 1572 * Detect rarely encountered DMA limitation. 1573 */ 1574 for (i = 0; i < txmap->dm_nsegs; i++) { 1575 if (((txseg[i].ds_addr & 7) != 0) && 1576 (txseg[i].ds_len <= 8) && 1577 (txseg[i].ds_len >= 1) 1578 ) { 1579 txseg = NULL; 1580 bus_dmamap_unload(sc->sc_dmat, txmap); 1581 needs_defrag = true; 1582 goto do_defrag; 1583 } 1584 } 1585 } 1586 1587 /* Sync the DMA map. */ 1588 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1589 BUS_DMASYNC_PREWRITE); 1590 1591 if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >= 1592 MVGBE_TX_RING_CNT) { 1593 DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n")); 1594 bus_dmamap_unload(sc->sc_dmat, txmap); 1595 return ENOBUFS; 1596 } 1597 1598 1599 DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1600 1601 for (i = 0; i < txmap->dm_nsegs; i++) { 1602 f = &sc->sc_rdata->mvgbe_tx_ring[current]; 1603 f->bufptr = txseg[i].ds_addr; 1604 f->bytecnt = txseg[i].ds_len; 1605 if (i != 0) 1606 f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA; 1607 last = current; 1608 current = MVGBE_TX_RING_NEXT(current); 1609 } 1610 1611 if (m_csumflags & M_CSUM_IPv4) 1612 cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM; 1613 if (m_csumflags & M_CSUM_TCPv4) 1614 cmdsts |= 1615 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP; 1616 if (m_csumflags & M_CSUM_UDPv4) 1617 cmdsts |= 1618 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP; 1619 if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1620 const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); 1621 1622 cmdsts |= MVGBE_TX_IP_NO_FRAG | 1623 MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ 1624 } 1625 if (txmap->dm_nsegs == 1) 1626 f->cmdsts = cmdsts | 1627 MVGBE_TX_GENERATE_CRC | 1628 MVGBE_TX_ENABLE_INTERRUPT | 1629 MVGBE_TX_ZERO_PADDING | 1630 MVGBE_TX_FIRST_DESC | 1631 MVGBE_TX_LAST_DESC; 1632 else { 1633 f = &sc->sc_rdata->mvgbe_tx_ring[first]; 1634 f->cmdsts = cmdsts | 1635 MVGBE_TX_GENERATE_CRC | 1636 MVGBE_TX_FIRST_DESC; 1637 1638 f = &sc->sc_rdata->mvgbe_tx_ring[last]; 1639 f->cmdsts = 1640 MVGBE_BUFFER_OWNED_BY_DMA | 1641 MVGBE_TX_ENABLE_INTERRUPT | 1642 MVGBE_TX_ZERO_PADDING | 1643 MVGBE_TX_LAST_DESC; 1644 1645 /* Sync descriptors except first */ 1646 MVGBE_CDTXSYNC(sc, 1647 (MVGBE_TX_RING_CNT - 1 == *txidx) ? 0 : (*txidx) + 1, 1648 txmap->dm_nsegs - 1, 1649 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1650 } 1651 1652 sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head; 1653 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 1654 sc->sc_cdata.mvgbe_tx_map[last] = entry; 1655 1656 /* Finally, sync first descriptor */ 1657 sc->sc_rdata->mvgbe_tx_ring[first].cmdsts |= 1658 MVGBE_BUFFER_OWNED_BY_DMA; 1659 MVGBE_CDTXSYNC(sc, *txidx, 1, 1660 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1661 1662 sc->sc_cdata.mvgbe_tx_cnt += i; 1663 *txidx = current; 1664 1665 DPRINTFN(3, ("mvgbe_encap: completed successfully\n")); 1666 1667 return 0; 1668 } 1669 1670 static void 1671 mvgbe_rxeof(struct mvgbe_softc *sc) 1672 { 1673 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1674 struct mvgbe_rx_desc *cur_rx; 1675 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1676 struct mbuf *m; 1677 bus_dmamap_t dmamap; 1678 uint32_t rxstat; 1679 uint16_t bufsize; 1680 int idx, cur, total_len; 1681 1682 idx = sc->sc_cdata.mvgbe_rx_prod; 1683 1684 DPRINTFN(3, ("mvgbe_rxeof %d\n", idx)); 1685 1686 for (;;) { 1687 cur = idx; 1688 1689 /* Sync the descriptor */ 1690 MVGBE_CDRXSYNC(sc, idx, 1691 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1692 1693 cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx]; 1694 1695 if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1696 MVGBE_BUFFER_OWNED_BY_DMA) { 1697 /* Invalidate the descriptor -- it's not ready yet */ 1698 MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1699 sc->sc_cdata.mvgbe_rx_prod = idx; 1700 break; 1701 } 1702 #ifdef DIAGNOSTIC 1703 if ((cur_rx->cmdsts & 1704 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) != 1705 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) 1706 panic( 1707 "mvgbe_rxeof: buffer size is smaller than packet"); 1708 #endif 1709 1710 dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map; 1711 1712 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1713 BUS_DMASYNC_POSTREAD); 1714 1715 m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf; 1716 cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL; 1717 total_len = cur_rx->bytecnt; 1718 rxstat = cur_rx->cmdsts; 1719 bufsize = cur_rx->bufsize; 1720 1721 cdata->mvgbe_rx_map[idx] = NULL; 1722 1723 idx = MVGBE_RX_RING_NEXT(idx); 1724 1725 if (rxstat & MVGBE_ERROR_SUMMARY) { 1726 #if 0 1727 int err = rxstat & MVGBE_RX_ERROR_CODE_MASK; 1728 1729 if (err == MVGBE_RX_CRC_ERROR) 1730 ifp->if_ierrors++; 1731 if (err == MVGBE_RX_OVERRUN_ERROR) 1732 ifp->if_ierrors++; 1733 if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR) 1734 ifp->if_ierrors++; 1735 if (err == MVGBE_RX_RESOURCE_ERROR) 1736 ifp->if_ierrors++; 1737 #else 1738 ifp->if_ierrors++; 1739 #endif 1740 mvgbe_newbuf(sc, cur, m, dmamap); 1741 continue; 1742 } 1743 1744 if (total_len <= MVGBE_RX_CSUM_MIN_BYTE) /* XXX documented? */ 1745 goto sw_csum; 1746 1747 if (rxstat & MVGBE_RX_IP_FRAME_TYPE) { 1748 int flgs = 0; 1749 1750 /* Check IPv4 header checksum */ 1751 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1752 if (!(rxstat & MVGBE_RX_IP_HEADER_OK)) 1753 flgs |= M_CSUM_IPv4_BAD; 1754 1755 /* Check TCPv4/UDPv4 checksum */ 1756 if ((bufsize & MVGBE_RX_MAX_FRAME_LEN_ERROR) == 0) { 1757 /* Not fragmented */ 1758 1759 if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1760 MVGBE_RX_L4_TYPE_TCP) 1761 flgs |= M_CSUM_TCPv4; 1762 else if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1763 MVGBE_RX_L4_TYPE_UDP) 1764 flgs |= M_CSUM_UDPv4; 1765 1766 if (((flgs & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) 1767 && !(rxstat & MVGBE_RX_L4_CHECKSUM)) 1768 flgs |= M_CSUM_TCP_UDP_BAD; 1769 } 1770 1771 m->m_pkthdr.csum_flags = flgs; 1772 } 1773 sw_csum: 1774 1775 /* 1776 * Try to allocate a new jumbo buffer. If that 1777 * fails, copy the packet to mbufs and put the 1778 * jumbo buffer back in the ring so it can be 1779 * re-used. If allocating mbufs fails, then we 1780 * have to drop the packet. 1781 */ 1782 if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) { 1783 struct mbuf *m0; 1784 1785 m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL); 1786 mvgbe_newbuf(sc, cur, m, dmamap); 1787 if (m0 == NULL) { 1788 aprint_error_ifnet(ifp, 1789 "no receive buffers available --" 1790 " packet dropped!\n"); 1791 ifp->if_ierrors++; 1792 continue; 1793 } 1794 m = m0; 1795 } else { 1796 m->m_pkthdr.rcvif = ifp; 1797 m->m_pkthdr.len = m->m_len = total_len; 1798 } 1799 1800 /* Skip on first 2byte (HW header) */ 1801 m_adj(m, MVGBE_HWHEADER_SIZE); 1802 m->m_flags |= M_HASFCS; 1803 1804 ifp->if_ipackets++; 1805 1806 bpf_mtap(ifp, m); 1807 1808 /* pass it on. */ 1809 (*ifp->if_input)(ifp, m); 1810 } 1811 } 1812 1813 static void 1814 mvgbe_txeof(struct mvgbe_softc *sc) 1815 { 1816 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1817 struct mvgbe_tx_desc *cur_tx; 1818 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1819 struct mvgbe_txmap_entry *entry; 1820 int idx; 1821 1822 DPRINTFN(3, ("mvgbe_txeof\n")); 1823 1824 /* 1825 * Go through our tx ring and free mbufs for those 1826 * frames that have been sent. 1827 */ 1828 idx = cdata->mvgbe_tx_cons; 1829 while (idx != cdata->mvgbe_tx_prod) { 1830 MVGBE_CDTXSYNC(sc, idx, 1, 1831 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1832 1833 cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx]; 1834 #ifdef MVGBE_DEBUG 1835 if (mvgbe_debug >= 3) 1836 mvgbe_dump_txdesc(cur_tx, idx); 1837 #endif 1838 if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1839 MVGBE_BUFFER_OWNED_BY_DMA) { 1840 MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD); 1841 break; 1842 } 1843 if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC) 1844 ifp->if_opackets++; 1845 if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) { 1846 int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK; 1847 1848 if (err == MVGBE_TX_LATE_COLLISION_ERROR) 1849 ifp->if_collisions++; 1850 if (err == MVGBE_TX_UNDERRUN_ERROR) 1851 ifp->if_oerrors++; 1852 if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO) 1853 ifp->if_collisions++; 1854 } 1855 if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) { 1856 entry = cdata->mvgbe_tx_map[idx]; 1857 1858 m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf); 1859 cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL; 1860 1861 bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0, 1862 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1863 1864 bus_dmamap_unload(sc->sc_dmat, entry->dmamap); 1865 SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link); 1866 cdata->mvgbe_tx_map[idx] = NULL; 1867 } 1868 cdata->mvgbe_tx_cnt--; 1869 idx = MVGBE_TX_RING_NEXT(idx); 1870 } 1871 if (cdata->mvgbe_tx_cnt == 0) 1872 ifp->if_timer = 0; 1873 1874 if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2) 1875 ifp->if_flags &= ~IFF_OACTIVE; 1876 1877 cdata->mvgbe_tx_cons = idx; 1878 } 1879 1880 static uint8_t 1881 mvgbe_crc8(const uint8_t *data, size_t size) 1882 { 1883 int bit; 1884 uint8_t byte; 1885 uint8_t crc = 0; 1886 const uint8_t poly = 0x07; 1887 1888 while(size--) 1889 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 1890 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 1891 1892 return crc; 1893 } 1894 1895 CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT); 1896 1897 static void 1898 mvgbe_filter_setup(struct mvgbe_softc *sc) 1899 { 1900 struct ethercom *ec = &sc->sc_ethercom; 1901 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 1902 struct ether_multi *enm; 1903 struct ether_multistep step; 1904 uint32_t dfut[MVGBE_NDFUT], dfsmt[MVGBE_NDFSMT], dfomt[MVGBE_NDFOMT]; 1905 uint32_t pxc; 1906 int i; 1907 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 1908 1909 memset(dfut, 0, sizeof(dfut)); 1910 memset(dfsmt, 0, sizeof(dfsmt)); 1911 memset(dfomt, 0, sizeof(dfomt)); 1912 1913 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 1914 goto allmulti; 1915 } 1916 1917 ETHER_FIRST_MULTI(step, ec, enm); 1918 while (enm != NULL) { 1919 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1920 /* ranges are complex and somewhat rare */ 1921 goto allmulti; 1922 } 1923 /* chip handles some IPv4 multicast specially */ 1924 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 1925 i = enm->enm_addrlo[5]; 1926 dfsmt[i>>2] = 1927 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1928 } else { 1929 i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 1930 dfomt[i>>2] = 1931 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1932 } 1933 1934 ETHER_NEXT_MULTI(step, enm); 1935 } 1936 goto set; 1937 1938 allmulti: 1939 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 1940 for (i = 0; i < MVGBE_NDFSMT; i++) { 1941 dfsmt[i] = dfomt[i] = 1942 MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1943 MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1944 MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1945 MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1946 } 1947 } 1948 1949 set: 1950 pxc = MVGBE_READ(sc, MVGBE_PXC); 1951 pxc &= ~MVGBE_PXC_UPM; 1952 pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP; 1953 if (ifp->if_flags & IFF_BROADCAST) { 1954 pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP); 1955 } 1956 if (ifp->if_flags & IFF_PROMISC) { 1957 pxc |= MVGBE_PXC_UPM; 1958 } 1959 MVGBE_WRITE(sc, MVGBE_PXC, pxc); 1960 1961 /* Set Destination Address Filter Unicast Table */ 1962 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 1963 dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1964 MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT); 1965 1966 /* Set Destination Address Filter Multicast Tables */ 1967 MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT); 1968 MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT); 1969 } 1970 1971 #ifdef MVGBE_DEBUG 1972 static void 1973 mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx) 1974 { 1975 #define DESC_PRINT(X) \ 1976 if (X) \ 1977 printf("txdesc[%d]." #X "=%#x\n", idx, X); 1978 1979 #if BYTE_ORDER == BIG_ENDIAN 1980 DESC_PRINT(desc->bytecnt); 1981 DESC_PRINT(desc->l4ichk); 1982 DESC_PRINT(desc->cmdsts); 1983 DESC_PRINT(desc->nextdescptr); 1984 DESC_PRINT(desc->bufptr); 1985 #else /* LITTLE_ENDIAN */ 1986 DESC_PRINT(desc->cmdsts); 1987 DESC_PRINT(desc->l4ichk); 1988 DESC_PRINT(desc->bytecnt); 1989 DESC_PRINT(desc->bufptr); 1990 DESC_PRINT(desc->nextdescptr); 1991 #endif 1992 #undef DESC_PRINT 1993 } 1994 #endif 1995