1 /* $NetBSD: if_mvgbe.c,v 1.5 2011/02/01 23:40:12 jakllsch Exp $ */ 2 /* 3 * Copyright (c) 2007, 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.5 2011/02/01 23:40:12 jakllsch Exp $"); 29 30 #include "rnd.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/device.h> 35 #include <sys/endian.h> 36 #include <sys/errno.h> 37 #include <sys/kmem.h> 38 #include <sys/mutex.h> 39 #include <sys/sockio.h> 40 41 #include <dev/marvell/marvellreg.h> 42 #include <dev/marvell/marvellvar.h> 43 #include <dev/marvell/mvgbereg.h> 44 45 #include <net/if.h> 46 #include <net/if_ether.h> 47 #include <net/if_media.h> 48 49 #include <netinet/in.h> 50 #include <netinet/in_systm.h> 51 #include <netinet/ip.h> 52 53 #include <net/bpf.h> 54 #if NRND > 0 55 #include <sys/rnd.h> 56 #endif 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include "locators.h" 62 63 /* #define MVGBE_DEBUG 3 */ 64 #ifdef MVGBE_DEBUG 65 #define DPRINTF(x) if (mvgbe_debug) printf x 66 #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x 67 int mvgbe_debug = MVGBE_DEBUG; 68 #else 69 #define DPRINTF(x) 70 #define DPRINTFN(n,x) 71 #endif 72 73 74 #define MVGBE_READ(sc, reg) \ 75 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) 76 #define MVGBE_WRITE(sc, reg, val) \ 77 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 78 #define MVGBE_READ_FILTER(sc, reg, val, c) \ 79 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 80 #define MVGBE_WRITE_FILTER(sc, reg, val, c) \ 81 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 82 83 #define MVGBE_TX_RING_CNT 256 84 #define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1) 85 #define MVGBE_TX_RING_NEXT(x) (((x) + 1) & MVGBE_TX_RING_MSK) 86 #define MVGBE_RX_RING_CNT 256 87 #define MVGBE_RX_RING_MSK (MVGBE_RX_RING_CNT - 1) 88 #define MVGBE_RX_RING_NEXT(x) (((x) + 1) & MVGBE_RX_RING_MSK) 89 90 CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) == 91 (MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT); 92 CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) == 93 (MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT); 94 95 #define MVGBE_JSLOTS 384 /* XXXX */ 96 #define MVGBE_JLEN ((MVGBE_MRU + MVGBE_RXBUF_ALIGN)&~MVGBE_RXBUF_MASK) 97 #define MVGBE_NTXSEG 30 98 #define MVGBE_JPAGESZ PAGE_SIZE 99 #define MVGBE_RESID \ 100 (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ) 101 #define MVGBE_JMEM \ 102 ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID) 103 104 #define MVGBE_TX_RING_ADDR(sc, i) \ 105 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 106 offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)])) 107 108 #define MVGBE_RX_RING_ADDR(sc, i) \ 109 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 110 offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)])) 111 112 #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x) 113 #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)]) 114 #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)]) 115 116 #define MVGBE_CDTXSYNC(sc, x, n, ops) \ 117 do { \ 118 int __x, __n; \ 119 const int __descsize = sizeof(struct mvgbe_tx_desc); \ 120 \ 121 __x = (x); \ 122 __n = (n); \ 123 \ 124 /* If it will wrap around, sync to the end of the ring. */ \ 125 if ((__x + __n) > MVGBE_TX_RING_CNT) { \ 126 bus_dmamap_sync((sc)->sc_dmat, \ 127 (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \ 128 __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \ 129 __n -= (MVGBE_TX_RING_CNT - __x); \ 130 __x = 0; \ 131 } \ 132 \ 133 /* Now sync whatever is left. */ \ 134 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 135 MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \ 136 } while (0 /*CONSTCOND*/) 137 138 #define MVGBE_CDRXSYNC(sc, x, ops) \ 139 do { \ 140 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 141 MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \ 142 } while (/*CONSTCOND*/0) 143 144 145 struct mvgbe_jpool_entry { 146 int slot; 147 LIST_ENTRY(mvgbe_jpool_entry) jpool_entries; 148 }; 149 150 struct mvgbe_chain { 151 void *mvgbe_desc; 152 struct mbuf *mvgbe_mbuf; 153 struct mvgbe_chain *mvgbe_next; 154 }; 155 156 struct mvgbe_txmap_entry { 157 bus_dmamap_t dmamap; 158 SIMPLEQ_ENTRY(mvgbe_txmap_entry) link; 159 }; 160 161 struct mvgbe_chain_data { 162 struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT]; 163 struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT]; 164 int mvgbe_tx_prod; 165 int mvgbe_tx_cons; 166 int mvgbe_tx_cnt; 167 168 struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT]; 169 bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT]; 170 bus_dmamap_t mvgbe_rx_jumbo_map; 171 int mvgbe_rx_prod; 172 int mvgbe_rx_cons; 173 int mvgbe_rx_cnt; 174 175 /* Stick the jumbo mem management stuff here too. */ 176 void *mvgbe_jslots[MVGBE_JSLOTS]; 177 void *mvgbe_jumbo_buf; 178 }; 179 180 struct mvgbe_ring_data { 181 struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT]; 182 struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT]; 183 }; 184 185 struct mvgbec_softc { 186 device_t sc_dev; 187 188 bus_space_tag_t sc_iot; 189 bus_space_handle_t sc_ioh; 190 191 kmutex_t sc_mtx; 192 193 int sc_fix_tqtb; 194 }; 195 196 struct mvgbe_softc { 197 device_t sc_dev; 198 int sc_port; 199 200 bus_space_tag_t sc_iot; 201 bus_space_handle_t sc_ioh; 202 bus_space_handle_t sc_dafh; /* dest address filter handle */ 203 bus_dma_tag_t sc_dmat; 204 205 struct ethercom sc_ethercom; 206 struct mii_data sc_mii; 207 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ 208 209 struct mvgbe_chain_data sc_cdata; 210 struct mvgbe_ring_data *sc_rdata; 211 bus_dmamap_t sc_ring_map; 212 int sc_if_flags; 213 214 LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead; 215 LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead; 216 SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head; 217 218 #if NRND > 0 219 rndsource_element_t sc_rnd_source; 220 #endif 221 }; 222 223 224 /* Gigabit Ethernet Unit Global part functions */ 225 226 static int mvgbec_match(device_t, struct cfdata *, void *); 227 static void mvgbec_attach(device_t, device_t, void *); 228 229 static int mvgbec_print(void *, const char *); 230 static int mvgbec_search(device_t, cfdata_t, const int *, void *); 231 232 /* MII funcstions */ 233 static int mvgbec_miibus_readreg(device_t, int, int); 234 static void mvgbec_miibus_writereg(device_t, int, int, int); 235 static void mvgbec_miibus_statchg(device_t); 236 237 static void mvgbec_wininit(struct mvgbec_softc *); 238 239 /* Gigabit Ethernet Port part functions */ 240 241 static int mvgbe_match(device_t, struct cfdata *, void *); 242 static void mvgbe_attach(device_t, device_t, void *); 243 244 static int mvgbe_intr(void *); 245 246 static void mvgbe_start(struct ifnet *); 247 static int mvgbe_ioctl(struct ifnet *, u_long, void *); 248 static int mvgbe_init(struct ifnet *); 249 static void mvgbe_stop(struct ifnet *, int); 250 static void mvgbe_watchdog(struct ifnet *); 251 252 static int mvgbe_ifflags_cb(struct ethercom *); 253 254 static int mvgbe_mediachange(struct ifnet *); 255 static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *); 256 257 static int mvgbe_init_rx_ring(struct mvgbe_softc *); 258 static int mvgbe_init_tx_ring(struct mvgbe_softc *); 259 static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t); 260 static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *); 261 static void *mvgbe_jalloc(struct mvgbe_softc *); 262 static void mvgbe_jfree(struct mbuf *, void *, size_t, void *); 263 static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *); 264 static void mvgbe_rxeof(struct mvgbe_softc *); 265 static void mvgbe_txeof(struct mvgbe_softc *); 266 static uint8_t mvgbe_crc8(const uint8_t *, size_t); 267 static void mvgbe_filter_setup(struct mvgbe_softc *); 268 #ifdef MVGBE_DEBUG 269 static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int); 270 #endif 271 272 CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc), 273 mvgbec_match, mvgbec_attach, NULL, NULL); 274 CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc), 275 mvgbec_match, mvgbec_attach, NULL, NULL); 276 277 CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc), 278 mvgbe_match, mvgbe_attach, NULL, NULL); 279 280 281 struct mvgbe_port { 282 int model; 283 int unit; 284 int ports; 285 int irqs[3]; 286 int flags; 287 #define FLAGS_FIX_TQTB (1 << 0) 288 } mvgbe_ports[] = { 289 { MARVELL_DISCOVERY_II, 0, 3, { 32, 33, 34 }, 0 }, 290 { MARVELL_DISCOVERY_III, 0, 3, { 32, 33, 34 }, 0 }, 291 #if 0 292 { MARVELL_DISCOVERY_LT, 0, ?, { }, 0 }, 293 { MARVELL_DISCOVERY_V, 0, ?, { }, 0 }, 294 { MARVELL_DISCOVERY_VI, 0, ?, { }, 0 }, 295 #endif 296 { MARVELL_ORION_1_88F5082, 0, 1, { 21 }, 0 }, 297 { MARVELL_ORION_1_88F5180N, 0, 1, { 21 }, 0 }, 298 { MARVELL_ORION_1_88F5181, 0, 1, { 21 }, 0 }, 299 { MARVELL_ORION_1_88F5182, 0, 1, { 21 }, 0 }, 300 { MARVELL_ORION_2_88F5281, 0, 1, { 21 }, 0 }, 301 { MARVELL_ORION_1_88F6082, 0, 1, { 21 }, 0 }, 302 { MARVELL_ORION_1_88W8660, 0, 1, { 21 }, 0 }, 303 304 { MARVELL_KIRKWOOD_88F6180, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 305 { MARVELL_KIRKWOOD_88F6192, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 306 { MARVELL_KIRKWOOD_88F6192, 1, 1, { 14 }, FLAGS_FIX_TQTB }, 307 { MARVELL_KIRKWOOD_88F6281, 0, 1, { 11 }, FLAGS_FIX_TQTB }, 308 { MARVELL_KIRKWOOD_88F6281, 1, 1, { 14 }, FLAGS_FIX_TQTB }, 309 310 { MARVELL_MV78XX0_MV78100, 0, 1, { 40 }, FLAGS_FIX_TQTB }, 311 { MARVELL_MV78XX0_MV78100, 1, 1, { 44 }, FLAGS_FIX_TQTB }, 312 { MARVELL_MV78XX0_MV78200, 0, 1, { 40 }, FLAGS_FIX_TQTB }, 313 { MARVELL_MV78XX0_MV78200, 1, 1, { 44 }, FLAGS_FIX_TQTB }, 314 { MARVELL_MV78XX0_MV78200, 2, 1, { 48 }, FLAGS_FIX_TQTB }, 315 { MARVELL_MV78XX0_MV78200, 3, 1, { 52 }, FLAGS_FIX_TQTB }, 316 }; 317 318 319 /* ARGSUSED */ 320 static int 321 mvgbec_match(device_t parent, struct cfdata *match, void *aux) 322 { 323 struct marvell_attach_args *mva = aux; 324 int i; 325 326 if (strcmp(mva->mva_name, match->cf_name) != 0) 327 return 0; 328 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 329 return 0; 330 331 for (i = 0; i < __arraycount(mvgbe_ports); i++) 332 if (mva->mva_model == mvgbe_ports[i].model) { 333 mva->mva_size = MVGBE_SIZE; 334 return 1; 335 } 336 return 0; 337 } 338 339 /* ARGSUSED */ 340 static void 341 mvgbec_attach(device_t parent, device_t self, void *aux) 342 { 343 struct mvgbec_softc *sc = device_private(self); 344 struct marvell_attach_args *mva = aux, gbea; 345 struct mvgbe_softc *port; 346 struct mii_softc *mii; 347 device_t child; 348 uint32_t phyaddr; 349 int i, j; 350 351 aprint_naive("\n"); 352 aprint_normal(": Marvell Gigabit Ethernet Controller\n"); 353 354 sc->sc_dev = self; 355 sc->sc_iot = mva->mva_iot; 356 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 357 mva->mva_size, &sc->sc_ioh)) { 358 aprint_error_dev(self, "Cannot map registers\n"); 359 return; 360 } 361 phyaddr = 0; 362 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); 363 364 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 365 366 /* Disable and clear Gigabit Ethernet Unit interrupts */ 367 MVGBE_WRITE(sc, MVGBE_EUIM, 0); 368 MVGBE_WRITE(sc, MVGBE_EUIC, 0); 369 370 mvgbec_wininit(sc); 371 372 memset(&gbea, 0, sizeof(gbea)); 373 for (i = 0; i < __arraycount(mvgbe_ports); i++) { 374 if (mvgbe_ports[i].model != mva->mva_model || 375 mvgbe_ports[i].unit != mva->mva_unit) 376 continue; 377 378 sc->sc_fix_tqtb = mvgbe_ports[i].flags & FLAGS_FIX_TQTB; 379 380 for (j = 0; j < mvgbe_ports[i].ports; j++) { 381 gbea.mva_name = "mvgbe"; 382 gbea.mva_model = mva->mva_model; 383 gbea.mva_iot = sc->sc_iot; 384 gbea.mva_ioh = sc->sc_ioh; 385 gbea.mva_unit = j; 386 gbea.mva_dmat = mva->mva_dmat; 387 gbea.mva_irq = mvgbe_ports[i].irqs[j]; 388 child = config_found_sm_loc(sc->sc_dev, "mvgbec", NULL, 389 &gbea, mvgbec_print, mvgbec_search); 390 if (child) { 391 port = device_private(child); 392 mii = LIST_FIRST(&port->sc_mii.mii_phys); 393 phyaddr |= MVGBE_PHYADDR_PHYAD(j, mii->mii_phy); 394 } 395 } 396 break; 397 } 398 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); 399 } 400 401 static int 402 mvgbec_print(void *aux, const char *pnp) 403 { 404 struct marvell_attach_args *gbea = aux; 405 406 if (pnp) 407 aprint_normal("%s at %s port %d", 408 gbea->mva_name, pnp, gbea->mva_unit); 409 else { 410 if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT) 411 aprint_normal(" port %d", gbea->mva_unit); 412 if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT) 413 aprint_normal(" irq %d", gbea->mva_irq); 414 } 415 return UNCONF; 416 } 417 418 /* ARGSUSED */ 419 static int 420 mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 421 { 422 struct marvell_attach_args *gbea = aux; 423 424 if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit && 425 cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT) 426 gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ]; 427 428 return config_match(parent, cf, aux); 429 } 430 431 static int 432 mvgbec_miibus_readreg(device_t dev, int phy, int reg) 433 { 434 struct mvgbe_softc *sc = device_private(dev); 435 struct mvgbec_softc *csc = device_private(device_parent(dev)); 436 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 437 uint32_t smi, val; 438 int i; 439 440 mutex_enter(&csc->sc_mtx); 441 442 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 443 DELAY(1); 444 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 445 break; 446 } 447 if (i == MVGBE_PHY_TIMEOUT) { 448 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 449 mutex_exit(&csc->sc_mtx); 450 return -1; 451 } 452 453 smi = 454 MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ; 455 MVGBE_WRITE(csc, MVGBE_SMI, smi); 456 457 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 458 DELAY(1); 459 smi = MVGBE_READ(csc, MVGBE_SMI); 460 if (smi & MVGBE_SMI_READVALID) 461 break; 462 } 463 464 mutex_exit(&csc->sc_mtx); 465 466 DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n", 467 i, MVGBE_PHY_TIMEOUT)); 468 469 val = smi & MVGBE_SMI_DATA_MASK; 470 471 DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 472 phy, reg, val)); 473 474 return val; 475 } 476 477 static void 478 mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val) 479 { 480 struct mvgbe_softc *sc = device_private(dev); 481 struct mvgbec_softc *csc = device_private(device_parent(dev)); 482 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 483 uint32_t smi; 484 int i; 485 486 DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n", 487 phy, reg, val)); 488 489 mutex_enter(&csc->sc_mtx); 490 491 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 492 DELAY(1); 493 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 494 break; 495 } 496 if (i == MVGBE_PHY_TIMEOUT) { 497 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 498 mutex_exit(&csc->sc_mtx); 499 return; 500 } 501 502 smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | 503 MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK); 504 MVGBE_WRITE(csc, MVGBE_SMI, smi); 505 506 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 507 DELAY(1); 508 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 509 break; 510 } 511 512 mutex_exit(&csc->sc_mtx); 513 514 if (i == MVGBE_PHY_TIMEOUT) 515 aprint_error_ifnet(ifp, "phy write timed out\n"); 516 } 517 518 static void 519 mvgbec_miibus_statchg(device_t dev) 520 { 521 522 /* nothing to do */ 523 } 524 525 526 static void 527 mvgbec_wininit(struct mvgbec_softc *sc) 528 { 529 device_t pdev = device_parent(sc->sc_dev); 530 uint64_t base; 531 uint32_t en, ac, size; 532 int window, target, attr, rv, i; 533 static int tags[] = { 534 MARVELL_TAG_SDRAM_CS0, 535 MARVELL_TAG_SDRAM_CS1, 536 MARVELL_TAG_SDRAM_CS2, 537 MARVELL_TAG_SDRAM_CS3, 538 539 MARVELL_TAG_UNDEFINED, 540 }; 541 542 /* First disable all address decode windows */ 543 en = MVGBE_BARE_EN_MASK; 544 MVGBE_WRITE(sc, MVGBE_BARE, en); 545 546 ac = 0; 547 for (window = 0, i = 0; 548 tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) { 549 rv = marvell_winparams_by_tag(pdev, tags[i], 550 &target, &attr, &base, &size); 551 if (rv != 0 || size == 0) 552 continue; 553 554 if (base > 0xffffffffULL) { 555 if (window >= MVGBE_NREMAP) { 556 aprint_error_dev(sc->sc_dev, 557 "can't remap window %d\n", window); 558 continue; 559 } 560 MVGBE_WRITE(sc, MVGBE_HA(window), 561 (base >> 32) & 0xffffffff); 562 } 563 564 MVGBE_WRITE(sc, MVGBE_BASEADDR(window), 565 MVGBE_BASEADDR_TARGET(target) | 566 MVGBE_BASEADDR_ATTR(attr) | 567 MVGBE_BASEADDR_BASE(base)); 568 MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size)); 569 570 en &= ~(1 << window); 571 /* set full access (r/w) */ 572 ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA); 573 window++; 574 } 575 /* allow to access decode window */ 576 MVGBE_WRITE(sc, MVGBE_EPAP, ac); 577 578 MVGBE_WRITE(sc, MVGBE_BARE, en); 579 } 580 581 582 /* ARGSUSED */ 583 static int 584 mvgbe_match(device_t parent, struct cfdata *match, void *aux) 585 { 586 struct marvell_attach_args *mva = aux; 587 uint32_t pbase, maddrh, maddrl; 588 589 pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE; 590 maddrh = 591 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH); 592 maddrl = 593 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL); 594 if ((maddrh | maddrl) == 0) 595 return 0; 596 597 return 1; 598 } 599 600 /* ARGSUSED */ 601 static void 602 mvgbe_attach(device_t parent, device_t self, void *aux) 603 { 604 struct mvgbe_softc *sc = device_private(self); 605 struct marvell_attach_args *mva = aux; 606 struct mvgbe_txmap_entry *entry; 607 struct ifnet *ifp; 608 bus_dma_segment_t seg; 609 bus_dmamap_t dmamap; 610 int rseg, i; 611 uint32_t maddrh, maddrl; 612 void *kva; 613 614 aprint_naive("\n"); 615 aprint_normal("\n"); 616 617 sc->sc_dev = self; 618 sc->sc_port = mva->mva_unit; 619 sc->sc_iot = mva->mva_iot; 620 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 621 MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE, 622 MVGBE_PORTR_SIZE, &sc->sc_ioh)) { 623 aprint_error_dev(self, "Cannot map registers\n"); 624 return; 625 } 626 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 627 MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE, 628 MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) { 629 aprint_error_dev(self, 630 "Cannot map destination address filter registers\n"); 631 return; 632 } 633 sc->sc_dmat = mva->mva_dmat; 634 635 maddrh = MVGBE_READ(sc, MVGBE_MACAH); 636 maddrl = MVGBE_READ(sc, MVGBE_MACAL); 637 sc->sc_enaddr[0] = maddrh >> 24; 638 sc->sc_enaddr[1] = maddrh >> 16; 639 sc->sc_enaddr[2] = maddrh >> 8; 640 sc->sc_enaddr[3] = maddrh >> 0; 641 sc->sc_enaddr[4] = maddrl >> 8; 642 sc->sc_enaddr[5] = maddrl >> 0; 643 aprint_normal_dev(self, "Ethernet address %s\n", 644 ether_sprintf(sc->sc_enaddr)); 645 646 /* clear all ethernet port interrupts */ 647 MVGBE_WRITE(sc, MVGBE_IC, 0); 648 MVGBE_WRITE(sc, MVGBE_ICE, 0); 649 650 marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc); 651 652 /* Allocate the descriptor queues. */ 653 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 654 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 655 aprint_error_dev(self, "can't alloc rx buffers\n"); 656 return; 657 } 658 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, 659 sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) { 660 aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n", 661 (u_long)sizeof(struct mvgbe_ring_data)); 662 goto fail1; 663 } 664 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1, 665 sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT, 666 &sc->sc_ring_map)) { 667 aprint_error_dev(self, "can't create dma map\n"); 668 goto fail2; 669 } 670 if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva, 671 sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) { 672 aprint_error_dev(self, "can't load dma map\n"); 673 goto fail3; 674 } 675 for (i = 0; i < MVGBE_RX_RING_CNT; i++) 676 sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 677 678 SIMPLEQ_INIT(&sc->sc_txmap_head); 679 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 680 sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 681 682 if (bus_dmamap_create(sc->sc_dmat, 683 MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0, 684 BUS_DMA_NOWAIT, &dmamap)) { 685 aprint_error_dev(self, "Can't create TX dmamap\n"); 686 goto fail4; 687 } 688 689 entry = kmem_alloc(sizeof(*entry), KM_SLEEP); 690 if (!entry) { 691 aprint_error_dev(self, "Can't alloc txmap entry\n"); 692 bus_dmamap_destroy(sc->sc_dmat, dmamap); 693 goto fail4; 694 } 695 entry->dmamap = dmamap; 696 SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link); 697 } 698 699 sc->sc_rdata = (struct mvgbe_ring_data *)kva; 700 memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data)); 701 702 /* 703 * We can support 802.1Q VLAN-sized frames and jumbo 704 * Ethernet frames. 705 */ 706 sc->sc_ethercom.ec_capabilities |= 707 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 708 709 /* Try to allocate memory for jumbo buffers. */ 710 if (mvgbe_alloc_jumbo_mem(sc)) { 711 aprint_error_dev(self, "jumbo buffer allocation failed\n"); 712 goto fail4; 713 } 714 715 ifp = &sc->sc_ethercom.ec_if; 716 ifp->if_softc = sc; 717 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 718 ifp->if_start = mvgbe_start; 719 ifp->if_ioctl = mvgbe_ioctl; 720 ifp->if_init = mvgbe_init; 721 ifp->if_stop = mvgbe_stop; 722 ifp->if_watchdog = mvgbe_watchdog; 723 /* 724 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 725 */ 726 sc->sc_ethercom.ec_if.if_capabilities |= 727 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 728 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 729 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 730 /* 731 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. 732 */ 733 sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx; 734 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN)); 735 IFQ_SET_READY(&ifp->if_snd); 736 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 737 738 mvgbe_stop(ifp, 0); 739 740 /* 741 * Do MII setup. 742 */ 743 sc->sc_mii.mii_ifp = ifp; 744 sc->sc_mii.mii_readreg = mvgbec_miibus_readreg; 745 sc->sc_mii.mii_writereg = mvgbec_miibus_writereg; 746 sc->sc_mii.mii_statchg = mvgbec_miibus_statchg; 747 748 sc->sc_ethercom.ec_mii = &sc->sc_mii; 749 ifmedia_init(&sc->sc_mii.mii_media, 0, 750 mvgbe_mediachange, mvgbe_mediastatus); 751 mii_attach(self, &sc->sc_mii, 0xffffffff, 752 MII_PHY_ANY, MII_OFFSET_ANY, 0); 753 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 754 aprint_error_dev(self, "no PHY found!\n"); 755 ifmedia_add(&sc->sc_mii.mii_media, 756 IFM_ETHER|IFM_MANUAL, 0, NULL); 757 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 758 } else 759 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 760 761 /* 762 * Call MI attach routines. 763 */ 764 if_attach(ifp); 765 766 ether_ifattach(ifp, sc->sc_enaddr); 767 ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb); 768 769 #if NRND > 0 770 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 771 RND_TYPE_NET, 0); 772 #endif 773 774 return; 775 776 fail4: 777 while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) { 778 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 779 bus_dmamap_destroy(sc->sc_dmat, entry->dmamap); 780 } 781 bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map); 782 fail3: 783 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map); 784 fail2: 785 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data)); 786 fail1: 787 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 788 return; 789 } 790 791 792 static int 793 mvgbe_intr(void *arg) 794 { 795 struct mvgbe_softc *sc = arg; 796 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 797 uint32_t ic, ice, datum = 0; 798 int claimed = 0; 799 800 for (;;) { 801 ice = MVGBE_READ(sc, MVGBE_ICE); 802 ic = MVGBE_READ(sc, MVGBE_IC); 803 804 DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice)); 805 if (ic == 0 && ice == 0) 806 break; 807 808 datum = datum ^ ic ^ ice; 809 810 MVGBE_WRITE(sc, MVGBE_IC, ~ic); 811 MVGBE_WRITE(sc, MVGBE_ICE, ~ice); 812 813 claimed = 1; 814 815 if (ice & MVGBE_ICE_LINKCHG) { 816 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 817 /* Enable port RX and TX. */ 818 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 819 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 820 } else { 821 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0)); 822 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 823 } 824 } 825 826 if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR)) 827 mvgbe_rxeof(sc); 828 829 if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR)) 830 mvgbe_txeof(sc); 831 } 832 833 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 834 mvgbe_start(ifp); 835 836 #if NRND > 0 837 if (RND_ENABLED(&sc->sc_rnd_source)) 838 rnd_add_uint32(&sc->sc_rnd_source, datum); 839 #endif 840 841 return claimed; 842 } 843 844 static void 845 mvgbe_start(struct ifnet *ifp) 846 { 847 struct mvgbe_softc *sc = ifp->if_softc; 848 struct mbuf *m_head = NULL; 849 uint32_t idx = sc->sc_cdata.mvgbe_tx_prod; 850 int pkts = 0; 851 852 DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx, 853 sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf)); 854 855 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 856 return; 857 /* If Link is DOWN, can't start TX */ 858 if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP)) 859 return; 860 861 while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) { 862 IFQ_POLL(&ifp->if_snd, m_head); 863 if (m_head == NULL) 864 break; 865 866 /* 867 * Pack the data into the transmit ring. If we 868 * don't have room, set the OACTIVE flag and wait 869 * for the NIC to drain the ring. 870 */ 871 if (mvgbe_encap(sc, m_head, &idx)) { 872 ifp->if_flags |= IFF_OACTIVE; 873 break; 874 } 875 876 /* now we are committed to transmit the packet */ 877 IFQ_DEQUEUE(&ifp->if_snd, m_head); 878 pkts++; 879 880 /* 881 * If there's a BPF listener, bounce a copy of this frame 882 * to him. 883 */ 884 if (ifp->if_bpf) 885 bpf_ops->bpf_mtap(ifp->if_bpf, m_head); 886 } 887 if (pkts == 0) 888 return; 889 890 /* Transmit at Queue 0 */ 891 if (idx != sc->sc_cdata.mvgbe_tx_prod) { 892 sc->sc_cdata.mvgbe_tx_prod = idx; 893 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 894 895 /* 896 * Set a timeout in case the chip goes out to lunch. 897 */ 898 ifp->if_timer = 5; 899 } 900 } 901 902 static int 903 mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 904 { 905 struct mvgbe_softc *sc = ifp->if_softc; 906 struct ifreq *ifr = data; 907 int s, error = 0; 908 909 s = splnet(); 910 911 switch (cmd) { 912 case SIOCGIFMEDIA: 913 case SIOCSIFMEDIA: 914 DPRINTFN(2, ("mvgbe_ioctl MEDIA\n")); 915 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 916 break; 917 default: 918 DPRINTFN(2, ("mvgbe_ioctl ETHER\n")); 919 error = ether_ioctl(ifp, cmd, data); 920 if (error == ENETRESET) { 921 if (ifp->if_flags & IFF_RUNNING) { 922 mvgbe_filter_setup(sc); 923 } 924 error = 0; 925 } 926 break; 927 } 928 929 splx(s); 930 931 return error; 932 } 933 934 int mvgbe_rximt = 0; 935 int mvgbe_tximt = 0; 936 937 static int 938 mvgbe_init(struct ifnet *ifp) 939 { 940 struct mvgbe_softc *sc = ifp->if_softc; 941 struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); 942 struct mii_data *mii = &sc->sc_mii; 943 uint32_t reg; 944 int i; 945 946 DPRINTFN(2, ("mvgbe_init\n")); 947 948 /* Cancel pending I/O and free all RX/TX buffers. */ 949 mvgbe_stop(ifp, 0); 950 951 /* clear all ethernet port interrupts */ 952 MVGBE_WRITE(sc, MVGBE_IC, 0); 953 MVGBE_WRITE(sc, MVGBE_ICE, 0); 954 955 /* Init TX/RX descriptors */ 956 if (mvgbe_init_tx_ring(sc) == ENOBUFS) { 957 aprint_error_ifnet(ifp, 958 "initialization failed: no memory for tx buffers\n"); 959 return ENOBUFS; 960 } 961 if (mvgbe_init_rx_ring(sc) == ENOBUFS) { 962 aprint_error_ifnet(ifp, 963 "initialization failed: no memory for rx buffers\n"); 964 return ENOBUFS; 965 } 966 967 MVGBE_WRITE(sc, MVGBE_PSC, 968 MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */ 969 MVGBE_PSC_RESERVED | /* Must be set to 1 */ 970 MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */ 971 MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */ 972 MVGBE_PSC_SETFULLDX); /* Set_FullDx */ 973 /* XXXX: mvgbe(4) always use RGMII. */ 974 MVGBE_WRITE(sc, MVGBE_PSC1, 975 MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN); 976 /* XXXX: Also always Weighted Round-Robin Priority Mode */ 977 MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0)); 978 979 MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0)); 980 MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0)); 981 982 if (csc->sc_fix_tqtb) { 983 /* 984 * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff. 985 * And offset 0x72704 must be programmed to 0x03ffffff. 986 * Queue 1 through 7 must be programmed to 0x0. 987 */ 988 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff); 989 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff); 990 for (i = 1; i < 8; i++) { 991 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0); 992 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0); 993 } 994 } else 995 for (i = 1; i < 8; i++) { 996 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff); 997 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff); 998 MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff); 999 } 1000 1001 MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS); 1002 MVGBE_WRITE(sc, MVGBE_PXCX, 0); 1003 MVGBE_WRITE(sc, MVGBE_SDC, 1004 MVGBE_SDC_RXBSZ_16_64BITWORDS | 1005 #if BYTE_ORDER == LITTLE_ENDIAN 1006 MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ 1007 MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ 1008 #endif 1009 MVGBE_SDC_IPGINTRX(mvgbe_rximt) | 1010 MVGBE_SDC_TXBSZ_16_64BITWORDS); 1011 MVGBE_WRITE(sc, MVGBE_PTFUT, MVGBE_PTFUT_IPGINTTX(mvgbe_tximt)); 1012 1013 mvgbe_filter_setup(sc); 1014 1015 mii_mediachg(mii); 1016 1017 /* Enable port */ 1018 reg = MVGBE_READ(sc, MVGBE_PSC); 1019 MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN); 1020 1021 /* If Link is UP, Start RX and TX traffic */ 1022 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 1023 /* Enable port RX/TX. */ 1024 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 1025 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1026 } 1027 1028 /* Enable interrupt masks */ 1029 MVGBE_WRITE(sc, MVGBE_PIM, 1030 MVGBE_IC_RXBUF | 1031 MVGBE_IC_EXTEND | 1032 MVGBE_IC_RXBUFQ_MASK | 1033 MVGBE_IC_RXERROR | 1034 MVGBE_IC_RXERRQ_MASK); 1035 MVGBE_WRITE(sc, MVGBE_PEIM, 1036 MVGBE_ICE_TXBUF | 1037 MVGBE_ICE_TXERR | 1038 MVGBE_ICE_LINKCHG); 1039 1040 ifp->if_flags |= IFF_RUNNING; 1041 ifp->if_flags &= ~IFF_OACTIVE; 1042 1043 return 0; 1044 } 1045 1046 /* ARGSUSED */ 1047 static void 1048 mvgbe_stop(struct ifnet *ifp, int disable) 1049 { 1050 struct mvgbe_softc *sc = ifp->if_softc; 1051 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1052 uint32_t reg; 1053 int i, cnt; 1054 1055 DPRINTFN(2, ("mvgbe_stop\n")); 1056 1057 /* Stop Rx port activity. Check port Rx activity. */ 1058 reg = MVGBE_READ(sc, MVGBE_RQC); 1059 if (reg & MVGBE_RQC_ENQ_MASK) 1060 /* Issue stop command for active channels only */ 1061 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg)); 1062 1063 /* Stop Tx port activity. Check port Tx activity. */ 1064 if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ) 1065 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 1066 1067 /* Force link down */ 1068 reg = MVGBE_READ(sc, MVGBE_PSC); 1069 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL); 1070 1071 #define RX_DISABLE_TIMEOUT 0x1000000 1072 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 1073 /* Wait for all Rx activity to terminate. */ 1074 cnt = 0; 1075 do { 1076 if (cnt >= RX_DISABLE_TIMEOUT) { 1077 aprint_error_ifnet(ifp, 1078 "timeout for RX stopped. rqc 0x%x\n", reg); 1079 break; 1080 } 1081 cnt++; 1082 1083 /* 1084 * Check Receive Queue Command register that all Rx queues 1085 * are stopped 1086 */ 1087 reg = MVGBE_READ(sc, MVGBE_RQC); 1088 } while (reg & 0xff); 1089 1090 /* Double check to verify that TX FIFO is empty */ 1091 cnt = 0; 1092 while (1) { 1093 do { 1094 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1095 aprint_error_ifnet(ifp, 1096 "timeout for TX FIFO empty. status 0x%x\n", 1097 reg); 1098 break; 1099 } 1100 cnt++; 1101 1102 reg = MVGBE_READ(sc, MVGBE_PS); 1103 } while 1104 (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG); 1105 1106 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) 1107 break; 1108 1109 /* Double check */ 1110 reg = MVGBE_READ(sc, MVGBE_PS); 1111 if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG)) 1112 break; 1113 else 1114 aprint_error_ifnet(ifp, 1115 "TX FIFO empty double check failed." 1116 " %d loops, status 0x%x\n", cnt, reg); 1117 } 1118 1119 /* Reset the Enable bit in the Port Serial Control Register */ 1120 reg = MVGBE_READ(sc, MVGBE_PSC); 1121 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN); 1122 1123 /* Disable interrupts */ 1124 MVGBE_WRITE(sc, MVGBE_PIM, 0); 1125 MVGBE_WRITE(sc, MVGBE_PEIM, 0); 1126 1127 /* Free RX and TX mbufs still in the queues. */ 1128 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1129 if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) { 1130 m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf); 1131 cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 1132 } 1133 } 1134 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1135 if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) { 1136 m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf); 1137 cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 1138 } 1139 } 1140 1141 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1142 } 1143 1144 static void 1145 mvgbe_watchdog(struct ifnet *ifp) 1146 { 1147 struct mvgbe_softc *sc = ifp->if_softc; 1148 1149 /* 1150 * Reclaim first as there is a possibility of losing Tx completion 1151 * interrupts. 1152 */ 1153 mvgbe_txeof(sc); 1154 if (sc->sc_cdata.mvgbe_tx_cnt != 0) { 1155 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1156 1157 ifp->if_oerrors++; 1158 1159 mvgbe_init(ifp); 1160 } 1161 } 1162 1163 static int 1164 mvgbe_ifflags_cb(struct ethercom *ec) 1165 { 1166 struct ifnet *ifp = &ec->ec_if; 1167 struct mvgbe_softc *sc = ifp->if_softc; 1168 int change = ifp->if_flags ^ sc->sc_if_flags; 1169 1170 if (change != 0) 1171 sc->sc_if_flags = ifp->if_flags; 1172 1173 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1174 return ENETRESET; 1175 1176 if ((change & IFF_PROMISC) != 0) 1177 mvgbe_filter_setup(sc); 1178 1179 return 0; 1180 } 1181 1182 /* 1183 * Set media options. 1184 */ 1185 static int 1186 mvgbe_mediachange(struct ifnet *ifp) 1187 { 1188 return ether_mediachange(ifp); 1189 } 1190 1191 /* 1192 * Report current media status. 1193 */ 1194 static void 1195 mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1196 { 1197 ether_mediastatus(ifp, ifmr); 1198 } 1199 1200 1201 static int 1202 mvgbe_init_rx_ring(struct mvgbe_softc *sc) 1203 { 1204 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1205 struct mvgbe_ring_data *rd = sc->sc_rdata; 1206 int i; 1207 1208 memset(rd->mvgbe_rx_ring, 0, 1209 sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT); 1210 1211 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1212 cd->mvgbe_rx_chain[i].mvgbe_desc = 1213 &rd->mvgbe_rx_ring[i]; 1214 if (i == MVGBE_RX_RING_CNT - 1) { 1215 cd->mvgbe_rx_chain[i].mvgbe_next = 1216 &cd->mvgbe_rx_chain[0]; 1217 rd->mvgbe_rx_ring[i].nextdescptr = 1218 MVGBE_RX_RING_ADDR(sc, 0); 1219 } else { 1220 cd->mvgbe_rx_chain[i].mvgbe_next = 1221 &cd->mvgbe_rx_chain[i + 1]; 1222 rd->mvgbe_rx_ring[i].nextdescptr = 1223 MVGBE_RX_RING_ADDR(sc, i + 1); 1224 } 1225 } 1226 1227 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1228 if (mvgbe_newbuf(sc, i, NULL, 1229 sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) { 1230 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1231 "failed alloc of %dth mbuf\n", i); 1232 return ENOBUFS; 1233 } 1234 } 1235 sc->sc_cdata.mvgbe_rx_prod = 0; 1236 sc->sc_cdata.mvgbe_rx_cons = 0; 1237 1238 return 0; 1239 } 1240 1241 static int 1242 mvgbe_init_tx_ring(struct mvgbe_softc *sc) 1243 { 1244 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1245 struct mvgbe_ring_data *rd = sc->sc_rdata; 1246 int i; 1247 1248 memset(sc->sc_rdata->mvgbe_tx_ring, 0, 1249 sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT); 1250 1251 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1252 cd->mvgbe_tx_chain[i].mvgbe_desc = 1253 &rd->mvgbe_tx_ring[i]; 1254 if (i == MVGBE_TX_RING_CNT - 1) { 1255 cd->mvgbe_tx_chain[i].mvgbe_next = 1256 &cd->mvgbe_tx_chain[0]; 1257 rd->mvgbe_tx_ring[i].nextdescptr = 1258 MVGBE_TX_RING_ADDR(sc, 0); 1259 } else { 1260 cd->mvgbe_tx_chain[i].mvgbe_next = 1261 &cd->mvgbe_tx_chain[i + 1]; 1262 rd->mvgbe_tx_ring[i].nextdescptr = 1263 MVGBE_TX_RING_ADDR(sc, i + 1); 1264 } 1265 rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST; 1266 } 1267 1268 sc->sc_cdata.mvgbe_tx_prod = 0; 1269 sc->sc_cdata.mvgbe_tx_cons = 0; 1270 sc->sc_cdata.mvgbe_tx_cnt = 0; 1271 1272 MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT, 1273 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1274 1275 return 0; 1276 } 1277 1278 static int 1279 mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m, 1280 bus_dmamap_t dmamap) 1281 { 1282 struct mbuf *m_new = NULL; 1283 struct mvgbe_chain *c; 1284 struct mvgbe_rx_desc *r; 1285 int align; 1286 1287 if (m == NULL) { 1288 void *buf = NULL; 1289 1290 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1291 if (m_new == NULL) { 1292 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1293 "no memory for rx list -- packet dropped!\n"); 1294 return ENOBUFS; 1295 } 1296 1297 /* Allocate the jumbo buffer */ 1298 buf = mvgbe_jalloc(sc); 1299 if (buf == NULL) { 1300 m_freem(m_new); 1301 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 1302 "dropped!\n", sc->sc_ethercom.ec_if.if_xname)); 1303 return ENOBUFS; 1304 } 1305 1306 /* Attach the buffer to the mbuf */ 1307 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1308 MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc); 1309 } else { 1310 /* 1311 * We're re-using a previously allocated mbuf; 1312 * be sure to re-init pointers and lengths to 1313 * default values. 1314 */ 1315 m_new = m; 1316 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1317 m_new->m_data = m_new->m_ext.ext_buf; 1318 } 1319 align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK; 1320 if (align != 0) { 1321 DPRINTFN(1,("align = %d\n", align)); 1322 m_adj(m_new, MVGBE_RXBUF_ALIGN - align); 1323 } 1324 1325 c = &sc->sc_cdata.mvgbe_rx_chain[i]; 1326 r = c->mvgbe_desc; 1327 c->mvgbe_mbuf = m_new; 1328 r->bufptr = dmamap->dm_segs[0].ds_addr + 1329 (((vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf)); 1330 r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK; 1331 r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT; 1332 1333 MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1334 1335 return 0; 1336 } 1337 1338 /* 1339 * Memory management for jumbo frames. 1340 */ 1341 1342 static int 1343 mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc) 1344 { 1345 char *ptr, *kva; 1346 bus_dma_segment_t seg; 1347 int i, rseg, state, error; 1348 struct mvgbe_jpool_entry *entry; 1349 1350 state = error = 0; 1351 1352 /* Grab a big chunk o' storage. */ 1353 if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0, 1354 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1355 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); 1356 return ENOBUFS; 1357 } 1358 1359 state = 1; 1360 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM, 1361 (void **)&kva, BUS_DMA_NOWAIT)) { 1362 aprint_error_dev(sc->sc_dev, 1363 "can't map dma buffers (%d bytes)\n", MVGBE_JMEM); 1364 error = ENOBUFS; 1365 goto out; 1366 } 1367 1368 state = 2; 1369 if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0, 1370 BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) { 1371 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 1372 error = ENOBUFS; 1373 goto out; 1374 } 1375 1376 state = 3; 1377 if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map, 1378 kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1379 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 1380 error = ENOBUFS; 1381 goto out; 1382 } 1383 1384 state = 4; 1385 sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva; 1386 DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf)); 1387 1388 LIST_INIT(&sc->sc_jfree_listhead); 1389 LIST_INIT(&sc->sc_jinuse_listhead); 1390 1391 /* 1392 * Now divide it up into 9K pieces and save the addresses 1393 * in an array. 1394 */ 1395 ptr = sc->sc_cdata.mvgbe_jumbo_buf; 1396 for (i = 0; i < MVGBE_JSLOTS; i++) { 1397 sc->sc_cdata.mvgbe_jslots[i] = ptr; 1398 ptr += MVGBE_JLEN; 1399 entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP); 1400 if (entry == NULL) { 1401 aprint_error_dev(sc->sc_dev, 1402 "no memory for jumbo buffer queue!\n"); 1403 error = ENOBUFS; 1404 goto out; 1405 } 1406 entry->slot = i; 1407 if (i) 1408 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, 1409 jpool_entries); 1410 else 1411 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, 1412 jpool_entries); 1413 } 1414 out: 1415 if (error != 0) { 1416 switch (state) { 1417 case 4: 1418 bus_dmamap_unload(sc->sc_dmat, 1419 sc->sc_cdata.mvgbe_rx_jumbo_map); 1420 case 3: 1421 bus_dmamap_destroy(sc->sc_dmat, 1422 sc->sc_cdata.mvgbe_rx_jumbo_map); 1423 case 2: 1424 bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM); 1425 case 1: 1426 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1427 break; 1428 default: 1429 break; 1430 } 1431 } 1432 1433 return error; 1434 } 1435 1436 /* 1437 * Allocate a jumbo buffer. 1438 */ 1439 static void * 1440 mvgbe_jalloc(struct mvgbe_softc *sc) 1441 { 1442 struct mvgbe_jpool_entry *entry; 1443 1444 entry = LIST_FIRST(&sc->sc_jfree_listhead); 1445 1446 if (entry == NULL) 1447 return NULL; 1448 1449 LIST_REMOVE(entry, jpool_entries); 1450 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries); 1451 return sc->sc_cdata.mvgbe_jslots[entry->slot]; 1452 } 1453 1454 /* 1455 * Release a jumbo buffer. 1456 */ 1457 static void 1458 mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1459 { 1460 struct mvgbe_jpool_entry *entry; 1461 struct mvgbe_softc *sc; 1462 int i, s; 1463 1464 /* Extract the softc struct pointer. */ 1465 sc = (struct mvgbe_softc *)arg; 1466 1467 if (sc == NULL) 1468 panic("%s: can't find softc pointer!", __func__); 1469 1470 /* calculate the slot this buffer belongs to */ 1471 1472 i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN; 1473 1474 if ((i < 0) || (i >= MVGBE_JSLOTS)) 1475 panic("%s: asked to free buffer that we don't manage!", 1476 __func__); 1477 1478 s = splvm(); 1479 entry = LIST_FIRST(&sc->sc_jinuse_listhead); 1480 if (entry == NULL) 1481 panic("%s: buffer not in use!", __func__); 1482 entry->slot = i; 1483 LIST_REMOVE(entry, jpool_entries); 1484 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries); 1485 1486 if (__predict_true(m != NULL)) 1487 pool_cache_put(mb_cache, m); 1488 splx(s); 1489 } 1490 1491 static int 1492 mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head, 1493 uint32_t *txidx) 1494 { 1495 struct mvgbe_tx_desc *f = NULL; 1496 struct mvgbe_txmap_entry *entry; 1497 bus_dma_segment_t *txseg; 1498 bus_dmamap_t txmap; 1499 uint32_t first, current, last, cmdsts = 0; 1500 int m_csumflags, i; 1501 1502 DPRINTFN(3, ("mvgbe_encap\n")); 1503 1504 entry = SIMPLEQ_FIRST(&sc->sc_txmap_head); 1505 if (entry == NULL) { 1506 DPRINTFN(2, ("mvgbe_encap: no txmap available\n")); 1507 return ENOBUFS; 1508 } 1509 txmap = entry->dmamap; 1510 1511 first = current = last = *txidx; 1512 1513 /* 1514 * Preserve m_pkthdr.csum_flags here since m_head might be 1515 * updated by m_defrag() 1516 */ 1517 m_csumflags = m_head->m_pkthdr.csum_flags; 1518 1519 /* 1520 * Start packing the mbufs in this chain into 1521 * the fragment pointers. Stop when we run out 1522 * of fragments or hit the end of the mbuf chain. 1523 */ 1524 if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) { 1525 DPRINTFN(1, ("mvgbe_encap: dmamap failed\n")); 1526 return ENOBUFS; 1527 } 1528 1529 /* Sync the DMA map. */ 1530 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1531 BUS_DMASYNC_PREWRITE); 1532 1533 if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >= 1534 MVGBE_TX_RING_CNT) { 1535 DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n")); 1536 bus_dmamap_unload(sc->sc_dmat, txmap); 1537 return ENOBUFS; 1538 } 1539 1540 txseg = txmap->dm_segs; 1541 1542 DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1543 1544 for (i = 0; i < txmap->dm_nsegs; i++) { 1545 f = &sc->sc_rdata->mvgbe_tx_ring[current]; 1546 f->bufptr = txseg[i].ds_addr; 1547 f->bytecnt = txseg[i].ds_len; 1548 f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA; 1549 last = current; 1550 current = MVGBE_TX_RING_NEXT(current); 1551 } 1552 1553 if (m_csumflags & M_CSUM_IPv4) 1554 cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM; 1555 if (m_csumflags & M_CSUM_TCPv4) 1556 cmdsts |= 1557 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP; 1558 if (m_csumflags & M_CSUM_UDPv4) 1559 cmdsts |= 1560 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP; 1561 if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1562 const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); 1563 1564 cmdsts |= MVGBE_TX_IP_NO_FRAG | 1565 MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ 1566 } 1567 if (txmap->dm_nsegs == 1) 1568 f->cmdsts = cmdsts | 1569 MVGBE_BUFFER_OWNED_BY_DMA | 1570 MVGBE_TX_GENERATE_CRC | 1571 MVGBE_TX_ENABLE_INTERRUPT | 1572 MVGBE_TX_ZERO_PADDING | 1573 MVGBE_TX_FIRST_DESC | 1574 MVGBE_TX_LAST_DESC; 1575 else { 1576 f = &sc->sc_rdata->mvgbe_tx_ring[first]; 1577 f->cmdsts = cmdsts | 1578 MVGBE_BUFFER_OWNED_BY_DMA | 1579 MVGBE_TX_GENERATE_CRC | 1580 MVGBE_TX_FIRST_DESC; 1581 1582 f = &sc->sc_rdata->mvgbe_tx_ring[last]; 1583 f->cmdsts = 1584 MVGBE_BUFFER_OWNED_BY_DMA | 1585 MVGBE_TX_ENABLE_INTERRUPT | 1586 MVGBE_TX_ZERO_PADDING | 1587 MVGBE_TX_LAST_DESC; 1588 } 1589 1590 sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head; 1591 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 1592 sc->sc_cdata.mvgbe_tx_map[last] = entry; 1593 1594 /* Sync descriptors before handing to chip */ 1595 MVGBE_CDTXSYNC(sc, *txidx, txmap->dm_nsegs, 1596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1597 1598 sc->sc_cdata.mvgbe_tx_cnt += i; 1599 *txidx = current; 1600 1601 DPRINTFN(3, ("mvgbe_encap: completed successfully\n")); 1602 1603 return 0; 1604 } 1605 1606 static void 1607 mvgbe_rxeof(struct mvgbe_softc *sc) 1608 { 1609 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1610 struct mvgbe_rx_desc *cur_rx; 1611 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1612 struct mbuf *m; 1613 bus_dmamap_t dmamap; 1614 uint32_t rxstat; 1615 int idx, cur, total_len; 1616 1617 idx = sc->sc_cdata.mvgbe_rx_prod; 1618 1619 DPRINTFN(3, ("mvgbe_rxeof %d\n", idx)); 1620 1621 for (;;) { 1622 cur = idx; 1623 1624 /* Sync the descriptor */ 1625 MVGBE_CDRXSYNC(sc, idx, 1626 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1627 1628 cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx]; 1629 1630 if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1631 MVGBE_BUFFER_OWNED_BY_DMA) { 1632 /* Invalidate the descriptor -- it's not ready yet */ 1633 MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1634 sc->sc_cdata.mvgbe_rx_prod = idx; 1635 break; 1636 } 1637 #ifdef DIAGNOSTIC 1638 if ((cur_rx->cmdsts & 1639 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) != 1640 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) 1641 panic( 1642 "mvgbe_rxeof: buffer size is smaller than packet"); 1643 #endif 1644 1645 dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map; 1646 1647 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1648 BUS_DMASYNC_POSTREAD); 1649 1650 m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf; 1651 cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL; 1652 total_len = cur_rx->bytecnt; 1653 rxstat = cur_rx->cmdsts; 1654 1655 cdata->mvgbe_rx_map[idx] = NULL; 1656 1657 idx = MVGBE_RX_RING_NEXT(idx); 1658 1659 if (rxstat & MVGBE_ERROR_SUMMARY) { 1660 #if 0 1661 int err = rxstat & MVGBE_RX_ERROR_CODE_MASK; 1662 1663 if (err == MVGBE_RX_CRC_ERROR) 1664 ifp->if_ierrors++; 1665 if (err == MVGBE_RX_OVERRUN_ERROR) 1666 ifp->if_ierrors++; 1667 if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR) 1668 ifp->if_ierrors++; 1669 if (err == MVGBE_RX_RESOURCE_ERROR) 1670 ifp->if_ierrors++; 1671 #else 1672 ifp->if_ierrors++; 1673 #endif 1674 mvgbe_newbuf(sc, cur, m, dmamap); 1675 continue; 1676 } 1677 1678 if (total_len <= MVGBE_RX_CSUM_MIN_BYTE) /* XXX documented? */ 1679 goto sw_csum; 1680 1681 if (rxstat & MVGBE_RX_IP_FRAME_TYPE) { 1682 /* Check IPv4 header checksum */ 1683 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1684 if (!(rxstat & MVGBE_RX_IP_HEADER_OK)) 1685 m->m_pkthdr.csum_flags |= 1686 M_CSUM_IPv4_BAD; 1687 /* Check TCPv4/UDPv4 checksum */ 1688 if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1689 MVGBE_RX_L4_TYPE_TCP) 1690 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1691 else if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1692 MVGBE_RX_L4_TYPE_UDP) 1693 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1694 if (!(rxstat & MVGBE_RX_L4_CHECKSUM)) 1695 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1696 } 1697 sw_csum: 1698 1699 /* 1700 * Try to allocate a new jumbo buffer. If that 1701 * fails, copy the packet to mbufs and put the 1702 * jumbo buffer back in the ring so it can be 1703 * re-used. If allocating mbufs fails, then we 1704 * have to drop the packet. 1705 */ 1706 if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) { 1707 struct mbuf *m0; 1708 1709 m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL); 1710 mvgbe_newbuf(sc, cur, m, dmamap); 1711 if (m0 == NULL) { 1712 aprint_error_ifnet(ifp, 1713 "no receive buffers available --" 1714 " packet dropped!\n"); 1715 ifp->if_ierrors++; 1716 continue; 1717 } 1718 m = m0; 1719 } else { 1720 m->m_pkthdr.rcvif = ifp; 1721 m->m_pkthdr.len = m->m_len = total_len; 1722 } 1723 1724 /* Skip on first 2byte (HW header) */ 1725 m_adj(m, MVGBE_HWHEADER_SIZE); 1726 m->m_flags |= M_HASFCS; 1727 1728 ifp->if_ipackets++; 1729 1730 if (ifp->if_bpf) 1731 bpf_ops->bpf_mtap(ifp->if_bpf, m); 1732 1733 /* pass it on. */ 1734 (*ifp->if_input)(ifp, m); 1735 } 1736 } 1737 1738 static void 1739 mvgbe_txeof(struct mvgbe_softc *sc) 1740 { 1741 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1742 struct mvgbe_tx_desc *cur_tx; 1743 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1744 struct mvgbe_txmap_entry *entry; 1745 int idx; 1746 1747 DPRINTFN(3, ("mvgbe_txeof\n")); 1748 1749 /* 1750 * Go through our tx ring and free mbufs for those 1751 * frames that have been sent. 1752 */ 1753 idx = cdata->mvgbe_tx_cons; 1754 while (idx != cdata->mvgbe_tx_prod) { 1755 MVGBE_CDTXSYNC(sc, idx, 1, 1756 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1757 1758 cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx]; 1759 #ifdef MVGBE_DEBUG 1760 if (mvgbe_debug >= 3) 1761 mvgbe_dump_txdesc(cur_tx, idx); 1762 #endif 1763 if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1764 MVGBE_BUFFER_OWNED_BY_DMA) { 1765 MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD); 1766 break; 1767 } 1768 if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC) 1769 ifp->if_opackets++; 1770 if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) { 1771 int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK; 1772 1773 if (err == MVGBE_TX_LATE_COLLISION_ERROR) 1774 ifp->if_collisions++; 1775 if (err == MVGBE_TX_UNDERRUN_ERROR) 1776 ifp->if_oerrors++; 1777 if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO) 1778 ifp->if_collisions++; 1779 } 1780 if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) { 1781 entry = cdata->mvgbe_tx_map[idx]; 1782 1783 m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf); 1784 cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL; 1785 1786 bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0, 1787 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1788 1789 bus_dmamap_unload(sc->sc_dmat, entry->dmamap); 1790 SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link); 1791 cdata->mvgbe_tx_map[idx] = NULL; 1792 } 1793 cdata->mvgbe_tx_cnt--; 1794 idx = MVGBE_TX_RING_NEXT(idx); 1795 } 1796 if (cdata->mvgbe_tx_cnt == 0) 1797 ifp->if_timer = 0; 1798 1799 if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2) 1800 ifp->if_flags &= ~IFF_OACTIVE; 1801 1802 cdata->mvgbe_tx_cons = idx; 1803 } 1804 1805 static uint8_t 1806 mvgbe_crc8(const uint8_t *data, size_t size) 1807 { 1808 int bit; 1809 uint8_t byte; 1810 uint8_t crc = 0; 1811 const uint8_t poly = 0x07; 1812 1813 while(size--) 1814 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 1815 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 1816 1817 return crc; 1818 } 1819 1820 CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT); 1821 1822 static void 1823 mvgbe_filter_setup(struct mvgbe_softc *sc) 1824 { 1825 struct ethercom *ec = &sc->sc_ethercom; 1826 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 1827 struct ether_multi *enm; 1828 struct ether_multistep step; 1829 uint32_t *dfut, *dfsmt, *dfomt; 1830 uint32_t pxc; 1831 int i; 1832 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 1833 1834 dfut = kmem_zalloc(sizeof(*dfut) * MVGBE_NDFUT, KM_SLEEP); 1835 dfsmt = kmem_zalloc(sizeof(*dfsmt) * MVGBE_NDFSMT, KM_SLEEP); 1836 dfomt = kmem_zalloc(sizeof(*dfomt) * MVGBE_NDFOMT, KM_SLEEP); 1837 1838 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 1839 goto allmulti; 1840 } 1841 1842 ETHER_FIRST_MULTI(step, ec, enm); 1843 while (enm != NULL) { 1844 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1845 /* ranges are complex and somewhat rare */ 1846 goto allmulti; 1847 } 1848 /* chip handles some IPv4 multicast specially */ 1849 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 1850 i = enm->enm_addrlo[5]; 1851 dfsmt[i>>2] = 1852 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1853 } else { 1854 i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 1855 dfomt[i>>2] = 1856 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1857 } 1858 1859 ETHER_NEXT_MULTI(step, enm); 1860 } 1861 goto set; 1862 1863 allmulti: 1864 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 1865 for (i = 0; i < MVGBE_NDFSMT; i++) { 1866 dfsmt[i] = dfomt[i] = 1867 MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1868 MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1869 MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 1870 MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1871 } 1872 } 1873 1874 set: 1875 pxc = MVGBE_READ(sc, MVGBE_PXC); 1876 pxc &= ~MVGBE_PXC_UPM; 1877 pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP; 1878 if (ifp->if_flags & IFF_BROADCAST) { 1879 pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP); 1880 } 1881 if (ifp->if_flags & IFF_PROMISC) { 1882 pxc |= MVGBE_PXC_UPM; 1883 } 1884 MVGBE_WRITE(sc, MVGBE_PXC, pxc); 1885 1886 /* Set Destination Address Filter Unicast Table */ 1887 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 1888 dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 1889 MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT); 1890 1891 /* Set Destination Address Filter Multicast Tables */ 1892 MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT); 1893 MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT); 1894 1895 kmem_free(dfut, sizeof(dfut[0]) * MVGBE_NDFUT); 1896 kmem_free(dfsmt, sizeof(dfsmt[0]) * MVGBE_NDFSMT); 1897 kmem_free(dfomt, sizeof(dfsmt[0]) * MVGBE_NDFOMT); 1898 } 1899 1900 #ifdef MVGBE_DEBUG 1901 static void 1902 mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx) 1903 { 1904 #define DESC_PRINT(X) \ 1905 if (X) \ 1906 printf("txdesc[%d]." #X "=%#x\n", idx, X); 1907 1908 #if BYTE_ORDER == BIG_ENDIAN 1909 DESC_PRINT(desc->bytecnt); 1910 DESC_PRINT(desc->l4ichk); 1911 DESC_PRINT(desc->cmdsts); 1912 DESC_PRINT(desc->nextdescptr); 1913 DESC_PRINT(desc->bufptr); 1914 #else /* LITTLE_ENDIAN */ 1915 DESC_PRINT(desc->cmdsts); 1916 DESC_PRINT(desc->l4ichk); 1917 DESC_PRINT(desc->bytecnt); 1918 DESC_PRINT(desc->bufptr); 1919 DESC_PRINT(desc->nextdescptr); 1920 #endif 1921 #undef DESC_PRINT 1922 } 1923 #endif 1924