1 /* $NetBSD: if_gfe.c,v 1.39 2010/11/13 13:52:04 uebayasi Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.39 2010/11/13 13:52:04 uebayasi Exp $"); 46 47 #include "opt_inet.h" 48 #include "rnd.h" 49 50 #include <sys/param.h> 51 #include <sys/bus.h> 52 #include <sys/callout.h> 53 #include <sys/device.h> 54 #include <sys/errno.h> 55 #include <sys/ioctl.h> 56 #include <sys/mbuf.h> 57 #include <sys/mutex.h> 58 #include <sys/socket.h> 59 60 #include <uvm/uvm.h> 61 #include <net/if.h> 62 #include <net/if_dl.h> 63 #include <net/if_ether.h> 64 #include <net/if_media.h> 65 66 #ifdef INET 67 #include <netinet/in.h> 68 #include <netinet/if_inarp.h> 69 #endif 70 #include <net/bpf.h> 71 #if NRND > 0 72 #include <sys/rnd.h> 73 #endif 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/marvell/gtreg.h> 79 #include <dev/marvell/gtvar.h> 80 #include <dev/marvell/gtethreg.h> 81 #include <dev/marvell/if_gfevar.h> 82 #include <dev/marvell/marvellreg.h> 83 #include <dev/marvell/marvellvar.h> 84 85 #include <prop/proplib.h> 86 87 #include "locators.h" 88 89 90 #define GE_READ(sc, reg) \ 91 bus_space_read_4((sc)->sc_memt, (sc)->sc_memh, (reg)) 92 #define GE_WRITE(sc, reg, v) \ 93 bus_space_write_4((sc)->sc_memt, (sc)->sc_memh, (reg), (v)) 94 95 #define GE_DEBUG 96 #if 0 97 #define GE_NOHASH 98 #define GE_NORX 99 #endif 100 101 #ifdef GE_DEBUG 102 #define GE_DPRINTF(sc, a) \ 103 do { \ 104 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 105 printf a; \ 106 } while (0 /* CONSTCOND */) 107 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 108 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 109 #else 110 #define GE_DPRINTF(sc, a) do { } while (0) 111 #define GE_FUNC_ENTER(sc, func) do { } while (0) 112 #define GE_FUNC_EXIT(sc, str) do { } while (0) 113 #endif 114 enum gfe_whack_op { 115 GE_WHACK_START, GE_WHACK_RESTART, 116 GE_WHACK_CHANGE, GE_WHACK_STOP 117 }; 118 119 enum gfe_hash_op { 120 GE_HASH_ADD, GE_HASH_REMOVE, 121 }; 122 123 #if 1 124 #define htogt32(a) htobe32(a) 125 #define gt32toh(a) be32toh(a) 126 #else 127 #define htogt32(a) htole32(a) 128 #define gt32toh(a) le32toh(a) 129 #endif 130 131 #define GE_RXDSYNC(sc, rxq, n, ops) \ 132 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 133 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 134 (ops)) 135 #define GE_RXDPRESYNC(sc, rxq, n) \ 136 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 137 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 138 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 139 140 #define GE_TXDSYNC(sc, txq, n, ops) \ 141 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 142 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 143 (ops)) 144 #define GE_TXDPRESYNC(sc, txq, n) \ 145 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 146 #define GE_TXDPOSTSYNC(sc, txq, n) \ 147 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 148 149 #define STATIC 150 151 152 STATIC int gfec_match(device_t, cfdata_t, void *); 153 STATIC void gfec_attach(device_t, device_t, void *); 154 155 STATIC int gfec_print(void *, const char *); 156 STATIC int gfec_search(device_t, cfdata_t, const int *, void *); 157 158 STATIC int gfec_enet_phy(device_t, int); 159 STATIC int gfec_mii_read(device_t, int, int); 160 STATIC void gfec_mii_write(device_t, int, int, int); 161 STATIC void gfec_mii_statchg(device_t); 162 163 STATIC int gfe_match(device_t, cfdata_t, void *); 164 STATIC void gfe_attach(device_t, device_t, void *); 165 166 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 167 size_t, int); 168 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 169 170 STATIC int gfe_ifioctl(struct ifnet *, u_long, void *); 171 STATIC void gfe_ifstart(struct ifnet *); 172 STATIC void gfe_ifwatchdog(struct ifnet *); 173 174 STATIC void gfe_tick(void *arg); 175 176 STATIC void gfe_tx_restart(void *); 177 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 178 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 179 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 180 STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); 181 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 182 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 183 184 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 185 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 186 STATIC int gfe_rx_prime(struct gfe_softc *); 187 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 188 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 189 STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); 190 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 191 192 STATIC int gfe_intr(void *); 193 194 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 195 196 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 197 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 198 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 199 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 200 u_long); 201 STATIC int gfe_hash_fill(struct gfe_softc *); 202 STATIC int gfe_hash_alloc(struct gfe_softc *); 203 204 205 CFATTACH_DECL_NEW(gfec, sizeof(struct gfec_softc), 206 gfec_match, gfec_attach, NULL, NULL); 207 CFATTACH_DECL_NEW(gfe, sizeof(struct gfe_softc), 208 gfe_match, gfe_attach, NULL, NULL); 209 210 211 /* ARGSUSED */ 212 int 213 gfec_match(device_t parent, cfdata_t cf, void *aux) 214 { 215 struct marvell_attach_args *mva = aux; 216 217 if (strcmp(mva->mva_name, cf->cf_name) != 0) 218 return 0; 219 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 220 return 0; 221 222 mva->mva_size = ETHC_SIZE; 223 return 1; 224 } 225 226 /* ARGSUSED */ 227 void 228 gfec_attach(device_t parent, device_t self, void *aux) 229 { 230 struct gfec_softc *sc = device_private(self); 231 struct marvell_attach_args *mva = aux, gfea; 232 static int gfe_irqs[] = { 32, 33, 34 }; 233 int i; 234 235 aprint_naive("\n"); 236 aprint_normal(": Ethernet Controller\n"); 237 238 sc->sc_dev = self; 239 sc->sc_iot = mva->mva_iot; 240 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 241 mva->mva_size, &sc->sc_ioh)) { 242 aprint_error_dev(self, "Cannot map registers\n"); 243 return; 244 } 245 246 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 247 248 for (i = 0; i < ETH_NUM; i++) { 249 gfea.mva_name = "gfe"; 250 gfea.mva_model = mva->mva_model; 251 gfea.mva_iot = sc->sc_iot; 252 gfea.mva_ioh = sc->sc_ioh; 253 gfea.mva_unit = i; 254 gfea.mva_dmat = mva->mva_dmat; 255 gfea.mva_irq = gfe_irqs[i]; 256 config_found_sm_loc(sc->sc_dev, "gfec", NULL, &gfea, 257 gfec_print, gfec_search); 258 } 259 } 260 261 int 262 gfec_print(void *aux, const char *pnp) 263 { 264 struct marvell_attach_args *gfea = aux; 265 266 if (pnp) 267 aprint_normal("%s at %s port %d", 268 gfea->mva_name, pnp, gfea->mva_unit); 269 else { 270 if (gfea->mva_unit != GFECCF_PORT_DEFAULT) 271 aprint_normal(" port %d", gfea->mva_unit); 272 if (gfea->mva_irq != GFECCF_IRQ_DEFAULT) 273 aprint_normal(" irq %d", gfea->mva_irq); 274 } 275 return UNCONF; 276 } 277 278 /* ARGSUSED */ 279 int 280 gfec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 281 { 282 struct marvell_attach_args *gfea = aux; 283 284 if (cf->cf_loc[GFECCF_PORT] == gfea->mva_unit && 285 cf->cf_loc[GFECCF_IRQ] != GFECCF_IRQ_DEFAULT) 286 gfea->mva_irq = cf->cf_loc[GFECCF_IRQ]; 287 288 return config_match(parent, cf, aux); 289 } 290 291 int 292 gfec_enet_phy(device_t dev, int unit) 293 { 294 struct gfec_softc *sc = device_private(dev); 295 uint32_t epar; 296 297 epar = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ETH_EPAR); 298 return ETH_EPAR_PhyAD_GET(epar, unit); 299 } 300 301 int 302 gfec_mii_read(device_t dev, int phy, int reg) 303 { 304 struct gfec_softc *csc = device_private(device_parent(dev)); 305 uint32_t data; 306 int count = 10000; 307 308 mutex_enter(&csc->sc_mtx); 309 310 do { 311 DELAY(10); 312 data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 313 } while ((data & ETH_ESMIR_Busy) && count-- > 0); 314 315 if (count == 0) { 316 aprint_error_dev(dev, 317 "mii read for phy %d reg %d busied out\n", phy, reg); 318 mutex_exit(&csc->sc_mtx); 319 return ETH_ESMIR_Value_GET(data); 320 } 321 322 bus_space_write_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR, 323 ETH_ESMIR_READ(phy, reg)); 324 325 count = 10000; 326 do { 327 DELAY(10); 328 data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 329 } while ((data & ETH_ESMIR_ReadValid) == 0 && count-- > 0); 330 331 mutex_exit(&csc->sc_mtx); 332 333 if (count == 0) 334 aprint_error_dev(dev, 335 "mii read for phy %d reg %d timed out\n", phy, reg); 336 #if defined(GTMIIDEBUG) 337 aprint_normal_dev(dev, "mii_read(%d, %d): %#x data %#x\n", 338 phy, reg, data, ETH_ESMIR_Value_GET(data)); 339 #endif 340 return ETH_ESMIR_Value_GET(data); 341 } 342 343 void 344 gfec_mii_write (device_t dev, int phy, int reg, int value) 345 { 346 struct gfec_softc *csc = device_private(device_parent(dev)); 347 uint32_t data; 348 int count = 10000; 349 350 mutex_enter(&csc->sc_mtx); 351 352 do { 353 DELAY(10); 354 data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 355 } while ((data & ETH_ESMIR_Busy) && count-- > 0); 356 357 if (count == 0) { 358 aprint_error_dev(dev, 359 "mii write for phy %d reg %d busied out (busy)\n", 360 phy, reg); 361 mutex_exit(&csc->sc_mtx); 362 return; 363 } 364 365 bus_space_write_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR, 366 ETH_ESMIR_WRITE(phy, reg, value)); 367 368 count = 10000; 369 do { 370 DELAY(10); 371 data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 372 } while ((data & ETH_ESMIR_Busy) && count-- > 0); 373 374 mutex_exit(&csc->sc_mtx); 375 376 if (count == 0) 377 aprint_error_dev(dev, 378 "mii write for phy %d reg %d timed out\n", phy, reg); 379 #if defined(GTMIIDEBUG) 380 aprint_normal_dev(dev, "mii_write(%d, %d, %#x)\n", phy, reg, value); 381 #endif 382 } 383 384 void 385 gfec_mii_statchg(device_t dev) 386 { 387 /* struct gfe_softc *sc = device_private(self); */ 388 /* do nothing? */ 389 } 390 391 /* ARGSUSED */ 392 int 393 gfe_match(device_t parent, cfdata_t cf, void *aux) 394 { 395 396 return 1; 397 } 398 399 /* ARGSUSED */ 400 void 401 gfe_attach(device_t parent, device_t self, void *aux) 402 { 403 struct marvell_attach_args *mva = aux; 404 struct gfe_softc * const sc = device_private(self); 405 struct ifnet * const ifp = &sc->sc_ec.ec_if; 406 uint32_t sdcr; 407 int phyaddr, error; 408 prop_data_t ea; 409 uint8_t enaddr[6]; 410 411 aprint_naive("\n"); 412 aprint_normal(": Ethernet Controller\n"); 413 414 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 415 mva->mva_offset, mva->mva_size, &sc->sc_memh)) { 416 aprint_error_dev(self, "failed to map registers\n"); 417 return; 418 } 419 sc->sc_dev = self; 420 sc->sc_memt = mva->mva_iot; 421 sc->sc_dmat = mva->mva_dmat; 422 sc->sc_macno = (mva->mva_offset == ETH_BASE(0)) ? 0 : 423 ((mva->mva_offset == ETH_BASE(1)) ? 1 : 2); 424 425 callout_init(&sc->sc_co, 0); 426 427 phyaddr = gfec_enet_phy(parent, sc->sc_macno); 428 429 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr"); 430 if (ea != NULL) { 431 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 432 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 433 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 434 } 435 436 sc->sc_pcr = GE_READ(sc, ETH_EPCR); 437 sc->sc_pcxr = GE_READ(sc, ETH_EPCXR); 438 sc->sc_intrmask = GE_READ(sc, ETH_EIMR) | ETH_IR_MIIPhySTC; 439 440 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr)); 441 442 #if defined(DEBUG) 443 printf("pcr %#x, pcxr %#x\n", sc->sc_pcr, sc->sc_pcxr); 444 #endif 445 446 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 447 if (device_cfdata(self)->cf_flags & 1) { 448 aprint_normal_dev(self, "phy %d (rmii)\n", phyaddr); 449 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 450 } else { 451 aprint_normal_dev(self, "phy %d (mii)\n", phyaddr); 452 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 453 } 454 if (device_cfdata(self)->cf_flags & 2) 455 sc->sc_flags |= GE_NOFREE; 456 /* Set Max Frame Length is 1536 */ 457 sc->sc_pcxr &= ~ETH_EPCXR_MFL_SET(ETH_EPCXR_MFL_MASK); 458 sc->sc_pcxr |= ETH_EPCXR_MFL_SET(ETH_EPCXR_MFL_1536); 459 sc->sc_max_frame_length = 1536; 460 461 if (sc->sc_pcr & ETH_EPCR_EN) { 462 int tries = 1000; 463 /* 464 * Abort transmitter and receiver and wait for them to quiese 465 */ 466 GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_AR | ETH_ESDCMR_AT); 467 do { 468 delay(100); 469 if (tries-- <= 0) { 470 aprint_error_dev(self, "Abort TX/RX failed\n"); 471 break; 472 } 473 } while (GE_READ(sc, ETH_ESDCMR) & 474 (ETH_ESDCMR_AR | ETH_ESDCMR_AT)); 475 } 476 477 sc->sc_pcr &= 478 ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 479 480 #if defined(DEBUG) 481 printf("pcr %#x, pcxr %#x\n", sc->sc_pcr, sc->sc_pcxr); 482 #endif 483 484 /* 485 * Now turn off the GT. If it didn't quiese, too ***ing bad. 486 */ 487 GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 488 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 489 sdcr = GE_READ(sc, ETH_ESDCR); 490 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 491 sdcr |= ETH_ESDCR_RIFB; 492 GE_WRITE(sc, ETH_ESDCR, sdcr); 493 494 sc->sc_mii.mii_ifp = ifp; 495 sc->sc_mii.mii_readreg = gfec_mii_read; 496 sc->sc_mii.mii_writereg = gfec_mii_write; 497 sc->sc_mii.mii_statchg = gfec_mii_statchg; 498 499 sc->sc_ec.ec_mii = &sc->sc_mii; 500 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 501 ether_mediastatus); 502 503 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 504 MII_OFFSET_ANY, MIIF_NOISOLATE); 505 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 506 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 507 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 508 } else { 509 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 510 } 511 512 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 513 ifp->if_softc = sc; 514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 515 #if 0 516 ifp->if_flags |= IFF_DEBUG; 517 #endif 518 ifp->if_ioctl = gfe_ifioctl; 519 ifp->if_start = gfe_ifstart; 520 ifp->if_watchdog = gfe_ifwatchdog; 521 522 if (sc->sc_flags & GE_NOFREE) { 523 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 524 if (!error) 525 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 526 if (!error) 527 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 528 if (!error) 529 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 530 if (!error) 531 error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); 532 if (!error) 533 error = gfe_hash_alloc(sc); 534 if (error) 535 aprint_error_dev(self, 536 "failed to allocate resources: %d\n", error); 537 } 538 539 if_attach(ifp); 540 ether_ifattach(ifp, enaddr); 541 bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 542 #if NRND > 0 543 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 544 0); 545 #endif 546 marvell_intr_establish(mva->mva_irq, IPL_NET, gfe_intr, sc); 547 } 548 549 int 550 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 551 size_t size, int flags) 552 { 553 int error = 0; 554 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 555 556 KASSERT(gdm->gdm_kva == NULL); 557 gdm->gdm_size = size; 558 gdm->gdm_maxsegs = maxsegs; 559 560 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 561 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 562 BUS_DMA_NOWAIT); 563 if (error) 564 goto fail; 565 566 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 567 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 568 if (error) 569 goto fail; 570 571 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 572 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 573 if (error) 574 goto fail; 575 576 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 577 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 578 if (error) 579 goto fail; 580 581 /* invalidate from cache */ 582 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 583 BUS_DMASYNC_PREREAD); 584 fail: 585 if (error) { 586 gfe_dmamem_free(sc, gdm); 587 GE_DPRINTF(sc, (":err=%d", error)); 588 } 589 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 590 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 591 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 592 GE_FUNC_EXIT(sc, ""); 593 return error; 594 } 595 596 void 597 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 598 { 599 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 600 if (gdm->gdm_map) 601 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 602 if (gdm->gdm_kva) 603 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 604 if (gdm->gdm_nsegs > 0) 605 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 606 gdm->gdm_map = NULL; 607 gdm->gdm_kva = NULL; 608 gdm->gdm_nsegs = 0; 609 GE_FUNC_EXIT(sc, ""); 610 } 611 612 int 613 gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 614 { 615 struct gfe_softc * const sc = ifp->if_softc; 616 struct ifreq *ifr = (struct ifreq *) data; 617 struct ifaddr *ifa = (struct ifaddr *) data; 618 int s, error = 0; 619 620 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 621 s = splnet(); 622 623 switch (cmd) { 624 case SIOCINITIFADDR: 625 ifp->if_flags |= IFF_UP; 626 error = gfe_whack(sc, GE_WHACK_START); 627 switch (ifa->ifa_addr->sa_family) { 628 #ifdef INET 629 case AF_INET: 630 if (error == 0) 631 arp_ifinit(ifp, ifa); 632 break; 633 #endif 634 default: 635 break; 636 } 637 break; 638 639 case SIOCSIFFLAGS: 640 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 641 break; 642 /* XXX re-use ether_ioctl() */ 643 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 644 case IFF_UP|IFF_RUNNING:/* active->active, update */ 645 error = gfe_whack(sc, GE_WHACK_CHANGE); 646 break; 647 case IFF_RUNNING: /* not up, so we stop */ 648 error = gfe_whack(sc, GE_WHACK_STOP); 649 break; 650 case IFF_UP: /* not running, so we start */ 651 error = gfe_whack(sc, GE_WHACK_START); 652 break; 653 case 0: /* idle->idle: do nothing */ 654 break; 655 } 656 break; 657 658 case SIOCSIFMEDIA: 659 case SIOCGIFMEDIA: 660 case SIOCADDMULTI: 661 case SIOCDELMULTI: 662 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 663 if (ifp->if_flags & IFF_RUNNING) 664 error = gfe_whack(sc, GE_WHACK_CHANGE); 665 else 666 error = 0; 667 } 668 break; 669 670 case SIOCSIFMTU: 671 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 672 error = EINVAL; 673 break; 674 } 675 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 676 error = 0; 677 break; 678 679 default: 680 error = ether_ioctl(ifp, cmd, data); 681 break; 682 } 683 splx(s); 684 GE_FUNC_EXIT(sc, ""); 685 return error; 686 } 687 688 void 689 gfe_ifstart(struct ifnet *ifp) 690 { 691 struct gfe_softc * const sc = ifp->if_softc; 692 struct mbuf *m; 693 694 GE_FUNC_ENTER(sc, "gfe_ifstart"); 695 696 if ((ifp->if_flags & IFF_RUNNING) == 0) { 697 GE_FUNC_EXIT(sc, "$"); 698 return; 699 } 700 701 for (;;) { 702 IF_DEQUEUE(&ifp->if_snd, m); 703 if (m == NULL) { 704 ifp->if_flags &= ~IFF_OACTIVE; 705 GE_FUNC_EXIT(sc, ""); 706 return; 707 } 708 709 /* 710 * No space in the pending queue? try later. 711 */ 712 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 713 break; 714 715 /* 716 * Try to enqueue a mbuf to the device. If that fails, we 717 * can always try to map the next mbuf. 718 */ 719 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 720 GE_DPRINTF(sc, (">")); 721 #ifndef GE_NOTX 722 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 723 #endif 724 } 725 726 /* 727 * Attempt to queue the mbuf for send failed. 728 */ 729 IF_PREPEND(&ifp->if_snd, m); 730 ifp->if_flags |= IFF_OACTIVE; 731 GE_FUNC_EXIT(sc, "%%"); 732 } 733 734 void 735 gfe_ifwatchdog(struct ifnet *ifp) 736 { 737 struct gfe_softc * const sc = ifp->if_softc; 738 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 739 740 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 741 aprint_error_dev(sc->sc_dev, "device timeout"); 742 if (ifp->if_flags & IFF_RUNNING) { 743 uint32_t curtxdnum; 744 745 curtxdnum = (GE_READ(sc, txq->txq_ectdp) - 746 txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 747 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 748 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 749 aprint_error(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 750 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 751 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 752 GE_READ(sc, ETH_EICR)); 753 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 754 GE_TXDPRESYNC(sc, txq, curtxdnum); 755 } 756 aprint_error("\n"); 757 ifp->if_oerrors++; 758 (void) gfe_whack(sc, GE_WHACK_RESTART); 759 GE_FUNC_EXIT(sc, ""); 760 } 761 762 int 763 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 764 { 765 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 766 int error; 767 768 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 769 GE_DPRINTF(sc, ("(%d)", rxprio)); 770 771 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 772 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 773 if (error) { 774 GE_FUNC_EXIT(sc, "!!"); 775 return error; 776 } 777 778 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 779 GE_RXBUF_MEMSIZE, 0); 780 if (error) { 781 GE_FUNC_EXIT(sc, "!!!"); 782 return error; 783 } 784 GE_FUNC_EXIT(sc, ""); 785 return error; 786 } 787 788 int 789 gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) 790 { 791 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 792 volatile struct gt_eth_desc *rxd; 793 const bus_dma_segment_t *ds; 794 int idx; 795 bus_addr_t nxtaddr; 796 bus_size_t boff; 797 798 GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); 799 GE_DPRINTF(sc, ("(%d)", rxprio)); 800 801 if ((sc->sc_flags & GE_NOFREE) == 0) { 802 int error = gfe_rx_rxqalloc(sc, rxprio); 803 if (error) { 804 GE_FUNC_EXIT(sc, "!"); 805 return error; 806 } 807 } else { 808 KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); 809 KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); 810 } 811 812 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); 813 814 rxq->rxq_descs = 815 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 816 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 817 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 818 rxq->rxq_fi = 0; 819 rxq->rxq_active = GE_RXDESC_MAX; 820 boff = 0; 821 ds = rxq->rxq_buf_mem.gdm_map->dm_segs; 822 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 823 for (idx = 0, rxd = rxq->rxq_descs; idx < GE_RXDESC_MAX; 824 idx++, nxtaddr += sizeof(*(++rxd))) { 825 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 826 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 827 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 828 /* 829 * update the nxtptr to point to the next txd. 830 */ 831 if (idx == GE_RXDESC_MAX - 1) 832 nxtaddr = rxq->rxq_desc_busaddr; 833 rxd->ed_nxtptr = htogt32(nxtaddr); 834 boff += GE_RXBUF_SIZE; 835 if (boff == ds->ds_len) { 836 ds++; 837 boff = 0; 838 } 839 } 840 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 841 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 842 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 843 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 844 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 845 BUS_DMASYNC_PREREAD); 846 847 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 848 switch (rxprio) { 849 case GE_RXPRIO_HI: 850 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 851 rxq->rxq_efrdp = ETH_EFRDP3; 852 rxq->rxq_ecrdp = ETH_ECRDP3; 853 break; 854 case GE_RXPRIO_MEDHI: 855 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 856 rxq->rxq_efrdp = ETH_EFRDP2; 857 rxq->rxq_ecrdp = ETH_ECRDP2; 858 break; 859 case GE_RXPRIO_MEDLO: 860 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 861 rxq->rxq_efrdp = ETH_EFRDP1; 862 rxq->rxq_ecrdp = ETH_ECRDP1; 863 break; 864 case GE_RXPRIO_LO: 865 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 866 rxq->rxq_efrdp = ETH_EFRDP0; 867 rxq->rxq_ecrdp = ETH_ECRDP0; 868 break; 869 } 870 GE_FUNC_EXIT(sc, ""); 871 return 0; 872 } 873 874 void 875 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 876 { 877 struct ifnet * const ifp = &sc->sc_ec.ec_if; 878 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 879 struct mbuf *m = rxq->rxq_curpkt; 880 881 GE_FUNC_ENTER(sc, "gfe_rx_get"); 882 GE_DPRINTF(sc, ("(%d)", rxprio)); 883 884 while (rxq->rxq_active > 0) { 885 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 886 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 887 const struct ether_header *eh; 888 unsigned int cmdsts; 889 size_t buflen; 890 891 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 892 cmdsts = gt32toh(rxd->ed_cmdsts); 893 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 894 rxq->rxq_cmdsts = cmdsts; 895 /* 896 * Sometimes the GE "forgets" to reset the ownership bit. 897 * But if the length has been rewritten, the packet is ours 898 * so pretend the O bit is set. 899 */ 900 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 901 if ((cmdsts & RX_CMD_O) && buflen == 0) { 902 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 903 break; 904 } 905 906 /* 907 * If this is not a single buffer packet with no errors 908 * or for some reason it's bigger than our frame size, 909 * ignore it and go to the next packet. 910 */ 911 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 912 (RX_CMD_F|RX_CMD_L) || 913 buflen > sc->sc_max_frame_length) { 914 GE_DPRINTF(sc, ("!")); 915 --rxq->rxq_active; 916 ifp->if_ipackets++; 917 ifp->if_ierrors++; 918 goto give_it_back; 919 } 920 921 /* CRC is included with the packet; trim it off. */ 922 buflen -= ETHER_CRC_LEN; 923 924 if (m == NULL) { 925 MGETHDR(m, M_DONTWAIT, MT_DATA); 926 if (m == NULL) { 927 GE_DPRINTF(sc, ("?")); 928 break; 929 } 930 } 931 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 932 MCLGET(m, M_DONTWAIT); 933 if ((m->m_flags & M_EXT) == 0) { 934 GE_DPRINTF(sc, ("?")); 935 break; 936 } 937 } 938 m->m_data += 2; 939 m->m_len = 0; 940 m->m_pkthdr.len = 0; 941 m->m_pkthdr.rcvif = ifp; 942 rxq->rxq_cmdsts = cmdsts; 943 --rxq->rxq_active; 944 945 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 946 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 947 948 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 949 memcpy(m->m_data + m->m_len, rxb->rxb_data, buflen); 950 m->m_len = buflen; 951 m->m_pkthdr.len = buflen; 952 953 ifp->if_ipackets++; 954 bpf_mtap(ifp, m); 955 956 eh = (const struct ether_header *) m->m_data; 957 if ((ifp->if_flags & IFF_PROMISC) || 958 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 959 (rxq->rxq_cmdsts & RX_STS_HE) || 960 (eh->ether_dhost[0] & 1) != 0 || 961 memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl), 962 ETHER_ADDR_LEN) == 0) { 963 (*ifp->if_input)(ifp, m); 964 m = NULL; 965 GE_DPRINTF(sc, (">")); 966 } else { 967 m->m_len = 0; 968 m->m_pkthdr.len = 0; 969 GE_DPRINTF(sc, ("+")); 970 } 971 rxq->rxq_cmdsts = 0; 972 973 give_it_back: 974 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 975 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 976 #if 0 977 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 978 rxq->rxq_fi, 979 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 980 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 981 #endif 982 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 983 if (++rxq->rxq_fi == GE_RXDESC_MAX) 984 rxq->rxq_fi = 0; 985 rxq->rxq_active++; 986 } 987 rxq->rxq_curpkt = m; 988 GE_FUNC_EXIT(sc, ""); 989 } 990 991 uint32_t 992 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 993 { 994 struct ifnet * const ifp = &sc->sc_ec.ec_if; 995 struct gfe_rxqueue *rxq; 996 uint32_t rxbits; 997 #define RXPRIO_DECODER 0xffffaa50 998 GE_FUNC_ENTER(sc, "gfe_rx_process"); 999 1000 rxbits = ETH_IR_RxBuffer_GET(cause); 1001 while (rxbits) { 1002 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 1003 GE_DPRINTF(sc, ("%1x", rxbits)); 1004 rxbits &= ~(1 << rxprio); 1005 gfe_rx_get(sc, rxprio); 1006 } 1007 1008 rxbits = ETH_IR_RxError_GET(cause); 1009 while (rxbits) { 1010 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 1011 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 1012 int idx; 1013 rxbits &= ~(1 << rxprio); 1014 rxq = &sc->sc_rxq[rxprio]; 1015 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 1016 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 1017 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 1018 sc->sc_tickflags |= GE_TICK_RX_RESTART; 1019 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 1020 } 1021 ifp->if_ierrors++; 1022 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 1023 device_xname(sc->sc_dev), rxprio, rxq->rxq_fi)); 1024 memset(masks, 0, sizeof(masks)); 1025 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 1026 0, rxq->rxq_desc_mem.gdm_size, 1027 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1028 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 1029 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 1030 1031 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 1032 masks[idx/32] |= 1 << (idx & 31); 1033 } 1034 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 1035 0, rxq->rxq_desc_mem.gdm_size, 1036 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1037 #if defined(DEBUG) 1038 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 1039 device_xname(sc->sc_dev), rxprio, rxq->rxq_fi, 1040 rxq->rxq_cmdsts, masks[0], masks[1]); 1041 #endif 1042 } 1043 if ((intrmask & ETH_IR_RxBits) == 0) 1044 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 1045 1046 GE_FUNC_EXIT(sc, ""); 1047 return intrmask; 1048 } 1049 1050 int 1051 gfe_rx_prime(struct gfe_softc *sc) 1052 { 1053 struct gfe_rxqueue *rxq; 1054 int error; 1055 1056 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 1057 1058 error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); 1059 if (error) 1060 goto bail; 1061 rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 1062 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1063 GE_WRITE(sc, ETH_EFRDP3, rxq->rxq_desc_busaddr); 1064 GE_WRITE(sc, ETH_ECRDP3, rxq->rxq_desc_busaddr); 1065 } 1066 sc->sc_intrmask |= rxq->rxq_intrbits; 1067 1068 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); 1069 if (error) 1070 goto bail; 1071 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1072 rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 1073 GE_WRITE(sc, ETH_EFRDP2, rxq->rxq_desc_busaddr); 1074 GE_WRITE(sc, ETH_ECRDP2, rxq->rxq_desc_busaddr); 1075 sc->sc_intrmask |= rxq->rxq_intrbits; 1076 } 1077 1078 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); 1079 if (error) 1080 goto bail; 1081 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1082 rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 1083 GE_WRITE(sc, ETH_EFRDP1, rxq->rxq_desc_busaddr); 1084 GE_WRITE(sc, ETH_ECRDP1, rxq->rxq_desc_busaddr); 1085 sc->sc_intrmask |= rxq->rxq_intrbits; 1086 } 1087 1088 error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); 1089 if (error) 1090 goto bail; 1091 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1092 rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 1093 GE_WRITE(sc, ETH_EFRDP0, rxq->rxq_desc_busaddr); 1094 GE_WRITE(sc, ETH_ECRDP0, rxq->rxq_desc_busaddr); 1095 sc->sc_intrmask |= rxq->rxq_intrbits; 1096 } 1097 1098 bail: 1099 GE_FUNC_EXIT(sc, ""); 1100 return error; 1101 } 1102 1103 void 1104 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 1105 { 1106 struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; 1107 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 1108 if (rxq == NULL) { 1109 GE_FUNC_EXIT(sc, ""); 1110 return; 1111 } 1112 1113 if (rxq->rxq_curpkt) 1114 m_freem(rxq->rxq_curpkt); 1115 if ((sc->sc_flags & GE_NOFREE) == 0) { 1116 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 1117 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 1118 } 1119 GE_FUNC_EXIT(sc, ""); 1120 } 1121 1122 void 1123 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1124 { 1125 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 1126 sc->sc_flags &= ~GE_RXACTIVE; 1127 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 1128 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 1129 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1130 GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_AR); 1131 do { 1132 delay(10); 1133 } while (GE_READ(sc, ETH_ESDCMR) & ETH_ESDCMR_AR); 1134 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 1135 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 1136 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 1137 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 1138 GE_FUNC_EXIT(sc, ""); 1139 } 1140 1141 void 1142 gfe_tick(void *arg) 1143 { 1144 struct gfe_softc * const sc = arg; 1145 uint32_t intrmask; 1146 unsigned int tickflags; 1147 int s; 1148 1149 GE_FUNC_ENTER(sc, "gfe_tick"); 1150 1151 s = splnet(); 1152 1153 tickflags = sc->sc_tickflags; 1154 sc->sc_tickflags = 0; 1155 intrmask = sc->sc_intrmask; 1156 if (tickflags & GE_TICK_TX_IFSTART) 1157 gfe_ifstart(&sc->sc_ec.ec_if); 1158 if (tickflags & GE_TICK_RX_RESTART) { 1159 intrmask |= sc->sc_idlemask; 1160 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 1161 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 1162 rxq->rxq_fi = 0; 1163 GE_WRITE(sc, ETH_EFRDP3, rxq->rxq_desc_busaddr); 1164 GE_WRITE(sc, ETH_ECRDP3, rxq->rxq_desc_busaddr); 1165 } 1166 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 1167 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 1168 rxq->rxq_fi = 0; 1169 GE_WRITE(sc, ETH_EFRDP2, rxq->rxq_desc_busaddr); 1170 GE_WRITE(sc, ETH_ECRDP2, rxq->rxq_desc_busaddr); 1171 } 1172 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 1173 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 1174 rxq->rxq_fi = 0; 1175 GE_WRITE(sc, ETH_EFRDP1, rxq->rxq_desc_busaddr); 1176 GE_WRITE(sc, ETH_ECRDP1, rxq->rxq_desc_busaddr); 1177 } 1178 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 1179 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 1180 rxq->rxq_fi = 0; 1181 GE_WRITE(sc, ETH_EFRDP0, rxq->rxq_desc_busaddr); 1182 GE_WRITE(sc, ETH_ECRDP0, rxq->rxq_desc_busaddr); 1183 } 1184 sc->sc_idlemask = 0; 1185 } 1186 if (intrmask != sc->sc_intrmask) { 1187 sc->sc_intrmask = intrmask; 1188 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1189 } 1190 gfe_intr(sc); 1191 splx(s); 1192 1193 GE_FUNC_EXIT(sc, ""); 1194 } 1195 1196 int 1197 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 1198 { 1199 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1200 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1201 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1202 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 1203 uint32_t intrmask = sc->sc_intrmask; 1204 size_t buflen; 1205 struct mbuf *m; 1206 1207 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1208 1209 /* 1210 * Anything in the pending queue to enqueue? if not, punt. Likewise 1211 * if the txq is not yet created. 1212 * otherwise grab its dmamap. 1213 */ 1214 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1215 GE_FUNC_EXIT(sc, "-"); 1216 return 0; 1217 } 1218 1219 /* 1220 * Have we [over]consumed our limit of descriptors? 1221 * Do we have enough free descriptors? 1222 */ 1223 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1224 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1225 uint32_t cmdsts; 1226 size_t pktlen; 1227 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1228 cmdsts = gt32toh(txd2->ed_cmdsts); 1229 if (cmdsts & TX_CMD_O) { 1230 int nextin; 1231 /* 1232 * Sometime the Discovery forgets to update the 1233 * last descriptor. See if we own the descriptor 1234 * after it (since we know we've turned that to 1235 * the discovery and if we owned it, the Discovery 1236 * gave it back). If we do, we know the Discovery 1237 * gave back this one but forgot to mark it as ours. 1238 */ 1239 nextin = txq->txq_fi + 1; 1240 if (nextin == GE_TXDESC_MAX) 1241 nextin = 0; 1242 GE_TXDPOSTSYNC(sc, txq, nextin); 1243 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1244 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1245 GE_TXDPRESYNC(sc, txq, nextin); 1246 GE_FUNC_EXIT(sc, "@"); 1247 return 0; 1248 } 1249 #ifdef DEBUG 1250 printf("%s: txenqueue: transmitter resynced at %d\n", 1251 device_xname(sc->sc_dev), txq->txq_fi); 1252 #endif 1253 } 1254 if (++txq->txq_fi == GE_TXDESC_MAX) 1255 txq->txq_fi = 0; 1256 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1257 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1258 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1259 txq->txq_nactive--; 1260 1261 /* statistics */ 1262 ifp->if_opackets++; 1263 if (cmdsts & TX_STS_ES) 1264 ifp->if_oerrors++; 1265 GE_DPRINTF(sc, ("%%")); 1266 } 1267 1268 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1269 1270 /* 1271 * If this packet would wrap around the end of the buffer, reset back 1272 * to the beginning. 1273 */ 1274 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1275 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1276 txq->txq_outptr = 0; 1277 } 1278 1279 /* 1280 * Make sure the output packet doesn't run over the beginning of 1281 * what we've already given the GT. 1282 */ 1283 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1284 txq->txq_outptr + buflen > txq->txq_inptr) { 1285 intrmask |= txq->txq_intrbits & 1286 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1287 if (sc->sc_intrmask != intrmask) { 1288 sc->sc_intrmask = intrmask; 1289 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1290 } 1291 GE_FUNC_EXIT(sc, "#"); 1292 return 0; 1293 } 1294 1295 /* 1296 * The end-of-list descriptor we put on last time is the starting point 1297 * for this packet. The GT is supposed to terminate list processing on 1298 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1299 * must terminate the list. 1300 */ 1301 intrmask = sc->sc_intrmask; 1302 1303 m_copydata(m, 0, m->m_pkthdr.len, 1304 (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr); 1305 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1306 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1307 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1308 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1309 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1310 1311 /* 1312 * Request a buffer interrupt every 2/3 of the way thru the transmit 1313 * buffer. 1314 */ 1315 txq->txq_ei_gapcount += buflen; 1316 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1317 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1318 txq->txq_ei_gapcount = 0; 1319 } else { 1320 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1321 } 1322 #if 0 1323 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1324 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1325 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1326 #endif 1327 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1328 1329 txq->txq_outptr += buflen; 1330 /* 1331 * Tell the SDMA engine to "Fetch!" 1332 */ 1333 GE_WRITE(sc, ETH_ESDCMR, 1334 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1335 1336 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1337 1338 /* 1339 * Update the last out appropriately. 1340 */ 1341 txq->txq_nactive++; 1342 if (++txq->txq_lo == GE_TXDESC_MAX) 1343 txq->txq_lo = 0; 1344 1345 /* 1346 * Move mbuf from the pending queue to the snd queue. 1347 */ 1348 IF_DEQUEUE(&txq->txq_pendq, m); 1349 bpf_mtap(ifp, m); 1350 m_freem(m); 1351 ifp->if_flags &= ~IFF_OACTIVE; 1352 1353 /* 1354 * Since we have put an item into the packet queue, we now want 1355 * an interrupt when the transmit queue finishes processing the 1356 * list. But only update the mask if needs changing. 1357 */ 1358 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1359 if (sc->sc_intrmask != intrmask) { 1360 sc->sc_intrmask = intrmask; 1361 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1362 } 1363 if (ifp->if_timer == 0) 1364 ifp->if_timer = 5; 1365 GE_FUNC_EXIT(sc, "*"); 1366 return 1; 1367 } 1368 1369 uint32_t 1370 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1371 { 1372 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1373 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1374 1375 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1376 1377 if (txq == NULL) { 1378 GE_FUNC_EXIT(sc, ""); 1379 return intrmask; 1380 } 1381 1382 while (txq->txq_nactive > 0) { 1383 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1384 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1385 uint32_t cmdsts; 1386 size_t pktlen; 1387 1388 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1389 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1390 int nextin; 1391 1392 if (txq->txq_nactive == 1) { 1393 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1394 GE_FUNC_EXIT(sc, ""); 1395 return intrmask; 1396 } 1397 /* 1398 * Sometimes the Discovery forgets to update the 1399 * ownership bit in the descriptor. See if we own the 1400 * descriptor after it (since we know we've turned 1401 * that to the Discovery and if we own it now then the 1402 * Discovery gave it back). If we do, we know the 1403 * Discovery gave back this one but forgot to mark it 1404 * as ours. 1405 */ 1406 nextin = txq->txq_fi + 1; 1407 if (nextin == GE_TXDESC_MAX) 1408 nextin = 0; 1409 GE_TXDPOSTSYNC(sc, txq, nextin); 1410 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1411 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1412 GE_TXDPRESYNC(sc, txq, nextin); 1413 GE_FUNC_EXIT(sc, ""); 1414 return intrmask; 1415 } 1416 #ifdef DEBUG 1417 printf("%s: txdone: transmitter resynced at %d\n", 1418 device_xname(sc->sc_dev), txq->txq_fi); 1419 #endif 1420 } 1421 #if 0 1422 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1423 txq->txq_lo, 1424 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1425 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1426 #endif 1427 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1428 if (++txq->txq_fi == GE_TXDESC_MAX) 1429 txq->txq_fi = 0; 1430 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1431 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1432 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1433 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1434 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1435 1436 /* statistics */ 1437 ifp->if_opackets++; 1438 if (cmdsts & TX_STS_ES) 1439 ifp->if_oerrors++; 1440 1441 /* txd->ed_bufptr = 0; */ 1442 1443 ifp->if_timer = 5; 1444 --txq->txq_nactive; 1445 } 1446 if (txq->txq_nactive != 0) 1447 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1448 device_xname(sc->sc_dev), txprio, txq->txq_nactive); 1449 ifp->if_timer = 0; 1450 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1451 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1452 GE_FUNC_EXIT(sc, ""); 1453 return intrmask; 1454 } 1455 1456 int 1457 gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) 1458 { 1459 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1460 int error; 1461 1462 GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); 1463 1464 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1465 GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); 1466 if (error) { 1467 GE_FUNC_EXIT(sc, ""); 1468 return error; 1469 } 1470 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); 1471 if (error) { 1472 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1473 GE_FUNC_EXIT(sc, ""); 1474 return error; 1475 } 1476 GE_FUNC_EXIT(sc, ""); 1477 return 0; 1478 } 1479 1480 int 1481 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1482 { 1483 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1484 volatile struct gt_eth_desc *txd; 1485 unsigned int i; 1486 bus_addr_t addr; 1487 1488 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1489 1490 sc->sc_intrmask &= 1491 ~(ETH_IR_TxEndHigh | 1492 ETH_IR_TxBufferHigh | 1493 ETH_IR_TxEndLow | 1494 ETH_IR_TxBufferLow); 1495 1496 if (sc->sc_flags & GE_NOFREE) { 1497 KASSERT(txq->txq_desc_mem.gdm_kva != NULL); 1498 KASSERT(txq->txq_buf_mem.gdm_kva != NULL); 1499 } else { 1500 int error = gfe_tx_txqalloc(sc, txprio); 1501 if (error) { 1502 GE_FUNC_EXIT(sc, "!"); 1503 return error; 1504 } 1505 } 1506 1507 txq->txq_descs = 1508 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1509 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1510 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1511 1512 txq->txq_pendq.ifq_maxlen = 10; 1513 txq->txq_ei_gapcount = 0; 1514 txq->txq_nactive = 0; 1515 txq->txq_fi = 0; 1516 txq->txq_lo = 0; 1517 txq->txq_inptr = GE_TXBUF_SIZE; 1518 txq->txq_outptr = 0; 1519 for (i = 0, txd = txq->txq_descs, 1520 addr = txq->txq_desc_busaddr + sizeof(*txd); 1521 i < GE_TXDESC_MAX - 1; i++, txd++, addr += sizeof(*txd)) { 1522 /* 1523 * update the nxtptr to point to the next txd. 1524 */ 1525 txd->ed_cmdsts = 0; 1526 txd->ed_nxtptr = htogt32(addr); 1527 } 1528 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1529 htogt32(txq->txq_desc_busaddr); 1530 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1531 GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1532 1533 switch (txprio) { 1534 case GE_TXPRIO_HI: 1535 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1536 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1537 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1538 txq->txq_ectdp = ETH_ECTDP1; 1539 GE_WRITE(sc, ETH_ECTDP1, txq->txq_desc_busaddr); 1540 break; 1541 1542 case GE_TXPRIO_LO: 1543 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1544 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1545 txq->txq_epsrbits = ETH_EPSR_TxLow; 1546 txq->txq_ectdp = ETH_ECTDP0; 1547 GE_WRITE(sc, ETH_ECTDP0, txq->txq_desc_busaddr); 1548 break; 1549 1550 case GE_TXPRIO_NONE: 1551 break; 1552 } 1553 #if 0 1554 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1555 GE_WRITE(sc->sc_dev, txq->txq_ectdp, txq->txq_desc_busaddr); 1556 GE_DPRINTF(sc, (")")); 1557 #endif 1558 1559 /* 1560 * If we are restarting, there may be packets in the pending queue 1561 * waiting to be enqueued. Try enqueuing packets from both priority 1562 * queues until the pending queue is empty or there no room for them 1563 * on the device. 1564 */ 1565 while (gfe_tx_enqueue(sc, txprio)) 1566 continue; 1567 1568 GE_FUNC_EXIT(sc, ""); 1569 return 0; 1570 } 1571 1572 void 1573 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1574 { 1575 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1576 1577 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1578 if (txq == NULL) { 1579 GE_FUNC_EXIT(sc, ""); 1580 return; 1581 } 1582 1583 if (!flush) { 1584 GE_FUNC_EXIT(sc, ""); 1585 return; 1586 } 1587 1588 if ((sc->sc_flags & GE_NOFREE) == 0) { 1589 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1590 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1591 } 1592 GE_FUNC_EXIT(sc, "-F"); 1593 } 1594 1595 void 1596 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1597 { 1598 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1599 1600 GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1601 1602 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1603 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1604 sc->sc_intrmask &= 1605 ~(ETH_IR_TxEndHigh | 1606 ETH_IR_TxBufferHigh | 1607 ETH_IR_TxEndLow | 1608 ETH_IR_TxBufferLow); 1609 1610 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1611 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1612 1613 sc->sc_ec.ec_if.if_timer = 0; 1614 GE_FUNC_EXIT(sc, ""); 1615 } 1616 1617 int 1618 gfe_intr(void *arg) 1619 { 1620 struct gfe_softc * const sc = arg; 1621 uint32_t cause; 1622 uint32_t intrmask = sc->sc_intrmask; 1623 int claim = 0; 1624 int cnt; 1625 1626 GE_FUNC_ENTER(sc, "gfe_intr"); 1627 1628 for (cnt = 0; cnt < 4; cnt++) { 1629 if (sc->sc_intrmask != intrmask) { 1630 sc->sc_intrmask = intrmask; 1631 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1632 } 1633 cause = GE_READ(sc, ETH_EICR); 1634 cause &= sc->sc_intrmask; 1635 GE_DPRINTF(sc, (".%#x", cause)); 1636 if (cause == 0) 1637 break; 1638 1639 claim = 1; 1640 1641 GE_WRITE(sc, ETH_EICR, ~cause); 1642 #ifndef GE_NORX 1643 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1644 intrmask = gfe_rx_process(sc, cause, intrmask); 1645 #endif 1646 1647 #ifndef GE_NOTX 1648 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1649 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1650 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1651 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1652 #endif 1653 if (cause & ETH_IR_MIIPhySTC) { 1654 sc->sc_flags |= GE_PHYSTSCHG; 1655 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1656 } 1657 } 1658 1659 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1660 continue; 1661 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1662 continue; 1663 1664 GE_FUNC_EXIT(sc, ""); 1665 return claim; 1666 } 1667 1668 int 1669 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1670 { 1671 int error = 0; 1672 GE_FUNC_ENTER(sc, "gfe_whack"); 1673 1674 switch (op) { 1675 case GE_WHACK_RESTART: 1676 #ifndef GE_NOTX 1677 gfe_tx_stop(sc, op); 1678 #endif 1679 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1680 /* FALLTHROUGH */ 1681 case GE_WHACK_START: 1682 #ifndef GE_NOHASH 1683 if (error == 0 && sc->sc_hashtable == NULL) { 1684 error = gfe_hash_alloc(sc); 1685 if (error) 1686 break; 1687 } 1688 if (op != GE_WHACK_RESTART) 1689 gfe_hash_fill(sc); 1690 #endif 1691 #ifndef GE_NORX 1692 if (op != GE_WHACK_RESTART) { 1693 error = gfe_rx_prime(sc); 1694 if (error) 1695 break; 1696 } 1697 #endif 1698 #ifndef GE_NOTX 1699 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1700 if (error) 1701 break; 1702 #endif 1703 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1704 GE_WRITE(sc, ETH_EPCR, sc->sc_pcr | ETH_EPCR_EN); 1705 GE_WRITE(sc, ETH_EPCXR, sc->sc_pcxr); 1706 GE_WRITE(sc, ETH_EICR, 0); 1707 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1708 #ifndef GE_NOHASH 1709 GE_WRITE(sc, ETH_EHTPR, 1710 sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1711 #endif 1712 #ifndef GE_NORX 1713 GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_ERD); 1714 sc->sc_flags |= GE_RXACTIVE; 1715 #endif 1716 /* FALLTHROUGH */ 1717 case GE_WHACK_CHANGE: 1718 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1719 GE_READ(sc, ETH_EPCR), GE_READ(sc, ETH_EIMR))); 1720 GE_WRITE(sc, ETH_EPCR, sc->sc_pcr | ETH_EPCR_EN); 1721 GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1722 gfe_ifstart(&sc->sc_ec.ec_if); 1723 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1724 GE_READ(sc, ETH_ECTDP0), GE_READ(sc, ETH_ECTDP1))); 1725 GE_FUNC_EXIT(sc, ""); 1726 return error; 1727 case GE_WHACK_STOP: 1728 break; 1729 } 1730 1731 #ifdef GE_DEBUG 1732 if (error) 1733 GE_DPRINTF(sc, (" failed: %d\n", error)); 1734 #endif 1735 GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 1736 GE_WRITE(sc, ETH_EIMR, 0); 1737 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1738 #ifndef GE_NOTX 1739 gfe_tx_stop(sc, GE_WHACK_STOP); 1740 #endif 1741 #ifndef GE_NORX 1742 gfe_rx_stop(sc, GE_WHACK_STOP); 1743 #endif 1744 #ifndef GE_NOHASH 1745 if ((sc->sc_flags & GE_NOFREE) == 0) { 1746 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1747 sc->sc_hashtable = NULL; 1748 } 1749 #endif 1750 1751 GE_FUNC_EXIT(sc, ""); 1752 return error; 1753 } 1754 1755 int 1756 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1757 { 1758 uint32_t w0, add0, add1; 1759 uint32_t result; 1760 1761 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1762 add0 = ((uint32_t) eaddr[5] << 0) | 1763 ((uint32_t) eaddr[4] << 8) | 1764 ((uint32_t) eaddr[3] << 16); 1765 1766 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1767 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1768 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1769 1770 add1 = ((uint32_t) eaddr[2] << 0) | 1771 ((uint32_t) eaddr[1] << 8) | 1772 ((uint32_t) eaddr[0] << 16); 1773 1774 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1775 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1776 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1777 1778 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1779 /* 1780 * hashResult is the 15 bits Hash entry address. 1781 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1782 * MAC address, by nibble swapping in every byte (i.e MAC address 1783 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1784 */ 1785 1786 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1787 /* 1788 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1789 * 1790 * hashFunc0 calculates the hashResult in the following manner: 1791 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1792 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1793 */ 1794 result = (add0 & 3) | ((add0 >> 6) & ~3); 1795 result ^= (add0 >> 15) ^ (add1 >> 0); 1796 result &= 0x1ff; 1797 /* 1798 * hashResult[14:9] = ethernetADD[7:2] 1799 */ 1800 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1801 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1802 } else { 1803 #define TRIBITFLIP 073516240 /* yes its in octal */ 1804 /* 1805 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1806 * 1807 * hashFunc1 calculates the hashResult in the following manner: 1808 * hashResult[08:00] = ethernetADD[06:14] 1809 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1810 */ 1811 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1812 /* 1813 * Now bitswap those 9 bits 1814 */ 1815 result = 0; 1816 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1817 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1818 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1819 1820 /* 1821 * hashResult[14:09] = ethernetADD[00:05] 1822 */ 1823 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1824 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1825 GE_DPRINTF(sc, ("1(%#x)", result)); 1826 } 1827 GE_FUNC_EXIT(sc, ""); 1828 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1829 } 1830 1831 int 1832 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1833 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1834 { 1835 uint64_t he; 1836 uint64_t *maybe_he_p = NULL; 1837 int limit; 1838 int hash; 1839 int maybe_hash = 0; 1840 1841 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1842 1843 hash = gfe_hash_compute(sc, eaddr); 1844 1845 if (sc->sc_hashtable == NULL) { 1846 panic("%s:%d: hashtable == NULL!", device_xname(sc->sc_dev), 1847 __LINE__); 1848 } 1849 1850 /* 1851 * Assume we are going to insert so create the hash entry we 1852 * are going to insert. We also use it to match entries we 1853 * will be removing. 1854 */ 1855 he = ((uint64_t) eaddr[5] << 43) | 1856 ((uint64_t) eaddr[4] << 35) | 1857 ((uint64_t) eaddr[3] << 27) | 1858 ((uint64_t) eaddr[2] << 19) | 1859 ((uint64_t) eaddr[1] << 11) | 1860 ((uint64_t) eaddr[0] << 3) | 1861 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1862 1863 /* 1864 * The GT will search upto 12 entries for a hit, so we must mimic that. 1865 */ 1866 hash &= sc->sc_hashmask / sizeof(he); 1867 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1868 /* 1869 * Does the GT wrap at the end, stop at the, or overrun the 1870 * end? Assume it wraps for now. Stash a copy of the 1871 * current hash entry. 1872 */ 1873 uint64_t *he_p = &sc->sc_hashtable[hash]; 1874 uint64_t thishe = *he_p; 1875 1876 /* 1877 * If the hash entry isn't valid, that break the chain. And 1878 * this entry a good candidate for reuse. 1879 */ 1880 if ((thishe & HSH_V) == 0) { 1881 maybe_he_p = he_p; 1882 break; 1883 } 1884 1885 /* 1886 * If the hash entry has the same address we are looking for 1887 * then ... if we are removing and the skip bit is set, its 1888 * already been removed. if are adding and the skip bit is 1889 * clear, then its already added. In either return EBUSY 1890 * indicating the op has already been done. Otherwise flip 1891 * the skip bit and return 0. 1892 */ 1893 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1894 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1895 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1896 return EBUSY; 1897 *he_p = thishe ^ HSH_S; 1898 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1899 hash * sizeof(he), sizeof(he), 1900 BUS_DMASYNC_PREWRITE); 1901 GE_FUNC_EXIT(sc, "^"); 1902 return 0; 1903 } 1904 1905 /* 1906 * If we haven't found a slot for the entry and this entry 1907 * is currently being skipped, return this entry. 1908 */ 1909 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1910 maybe_he_p = he_p; 1911 maybe_hash = hash; 1912 } 1913 1914 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1915 } 1916 1917 /* 1918 * If we got here, then there was no entry to remove. 1919 */ 1920 if (op == GE_HASH_REMOVE) { 1921 GE_FUNC_EXIT(sc, "?"); 1922 return ENOENT; 1923 } 1924 1925 /* 1926 * If we couldn't find a slot, return an error. 1927 */ 1928 if (maybe_he_p == NULL) { 1929 GE_FUNC_EXIT(sc, "!"); 1930 return ENOSPC; 1931 } 1932 1933 /* Update the entry. 1934 */ 1935 *maybe_he_p = he; 1936 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1937 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1938 GE_FUNC_EXIT(sc, "+"); 1939 return 0; 1940 } 1941 1942 int 1943 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, 1944 u_long cmd) 1945 { 1946 struct gfe_softc *sc = ec->ec_if.if_softc; 1947 int error; 1948 enum gfe_hash_op op; 1949 enum gfe_rxprio prio; 1950 1951 GE_FUNC_ENTER(sc, "hash_multichg"); 1952 /* 1953 * Is this a wildcard entry? If so and its being removed, recompute. 1954 */ 1955 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1956 if (cmd == SIOCDELMULTI) { 1957 GE_FUNC_EXIT(sc, ""); 1958 return ENETRESET; 1959 } 1960 1961 /* 1962 * Switch in 1963 */ 1964 sc->sc_flags |= GE_ALLMULTI; 1965 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1966 sc->sc_pcr |= ETH_EPCR_PM; 1967 GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 1968 GE_FUNC_EXIT(sc, ""); 1969 return 0; 1970 } 1971 GE_FUNC_EXIT(sc, ""); 1972 return ENETRESET; 1973 } 1974 1975 prio = GE_RXPRIO_MEDLO; 1976 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1977 1978 if (sc->sc_hashtable == NULL) { 1979 GE_FUNC_EXIT(sc, ""); 1980 return 0; 1981 } 1982 1983 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1984 if (error == EBUSY) { 1985 aprint_error_dev(sc->sc_dev, "multichg: tried to %s %s again\n", 1986 cmd == SIOCDELMULTI ? "remove" : "add", 1987 ether_sprintf(enm->enm_addrlo)); 1988 GE_FUNC_EXIT(sc, ""); 1989 return 0; 1990 } 1991 1992 if (error == ENOENT) { 1993 aprint_error_dev(sc->sc_dev, 1994 "multichg: failed to remove %s: not in table\n", 1995 ether_sprintf(enm->enm_addrlo)); 1996 GE_FUNC_EXIT(sc, ""); 1997 return 0; 1998 } 1999 2000 if (error == ENOSPC) { 2001 aprint_error_dev(sc->sc_dev, "multichg:" 2002 " failed to add %s: no space; regenerating table\n", 2003 ether_sprintf(enm->enm_addrlo)); 2004 GE_FUNC_EXIT(sc, ""); 2005 return ENETRESET; 2006 } 2007 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 2008 device_xname(sc->sc_dev), 2009 cmd == SIOCDELMULTI ? "remove" : "add", 2010 ether_sprintf(enm->enm_addrlo))); 2011 GE_FUNC_EXIT(sc, ""); 2012 return 0; 2013 } 2014 2015 int 2016 gfe_hash_fill(struct gfe_softc *sc) 2017 { 2018 struct ether_multistep step; 2019 struct ether_multi *enm; 2020 int error; 2021 2022 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 2023 2024 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 2025 CLLADDR(sc->sc_ec.ec_if.if_sadl)); 2026 if (error) 2027 GE_FUNC_EXIT(sc, "!"); 2028 return error; 2029 2030 sc->sc_flags &= ~GE_ALLMULTI; 2031 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 2032 sc->sc_pcr &= ~ETH_EPCR_PM; 2033 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 2034 while (enm != NULL) { 2035 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2036 sc->sc_flags |= GE_ALLMULTI; 2037 sc->sc_pcr |= ETH_EPCR_PM; 2038 } else { 2039 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 2040 GE_RXPRIO_MEDLO, enm->enm_addrlo); 2041 if (error == ENOSPC) 2042 break; 2043 } 2044 ETHER_NEXT_MULTI(step, enm); 2045 } 2046 2047 GE_FUNC_EXIT(sc, ""); 2048 return error; 2049 } 2050 2051 int 2052 gfe_hash_alloc(struct gfe_softc *sc) 2053 { 2054 int error; 2055 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 2056 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 2057 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 2058 BUS_DMA_NOCACHE); 2059 if (error) { 2060 aprint_error_dev(sc->sc_dev, 2061 "failed to allocate %d bytes for hash table: %d\n", 2062 sc->sc_hashmask + 1, error); 2063 GE_FUNC_EXIT(sc, ""); 2064 return error; 2065 } 2066 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 2067 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 2068 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 2069 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 2070 GE_FUNC_EXIT(sc, ""); 2071 return 0; 2072 } 2073