1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 #include "opt_jme.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/serialize2.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_poll.h> 54 #include <net/ifq_var.h> 55 #include <net/toeplitz.h> 56 #include <net/toeplitz2.h> 57 #include <net/vlan/if_vlan_var.h> 58 #include <net/vlan/if_vlan_ether.h> 59 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 63 #include <dev/netif/mii_layer/mii.h> 64 #include <dev/netif/mii_layer/miivar.h> 65 #include <dev/netif/mii_layer/jmphyreg.h> 66 67 #include <bus/pci/pcireg.h> 68 #include <bus/pci/pcivar.h> 69 #include "pcidevs.h" 70 71 #include <dev/netif/jme/if_jmereg.h> 72 #include <dev/netif/jme/if_jmevar.h> 73 74 #include "miibus_if.h" 75 76 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */ 77 78 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 79 80 #ifdef JME_RSS_DEBUG 81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 82 do { \ 83 if ((sc)->jme_rss_debug >= (lvl)) \ 84 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 85 } while (0) 86 #else /* !JME_RSS_DEBUG */ 87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 88 #endif /* JME_RSS_DEBUG */ 89 90 static int jme_probe(device_t); 91 static int jme_attach(device_t); 92 static int jme_detach(device_t); 93 static int jme_shutdown(device_t); 94 static int jme_suspend(device_t); 95 static int jme_resume(device_t); 96 97 static int jme_miibus_readreg(device_t, int, int); 98 static int jme_miibus_writereg(device_t, int, int, int); 99 static void jme_miibus_statchg(device_t); 100 101 static void jme_init(void *); 102 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 103 static void jme_start(struct ifnet *, struct ifaltq_subque *); 104 static void jme_watchdog(struct ifnet *); 105 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 106 static int jme_mediachange(struct ifnet *); 107 #ifdef IFPOLL_ENABLE 108 static void jme_npoll(struct ifnet *, struct ifpoll_info *); 109 static void jme_npoll_status(struct ifnet *); 110 static void jme_npoll_rx(struct ifnet *, void *, int); 111 static void jme_npoll_tx(struct ifnet *, void *, int); 112 #endif 113 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 114 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 115 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 116 #ifdef INVARIANTS 117 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 118 boolean_t); 119 #endif 120 121 static void jme_intr(void *); 122 static void jme_msix_tx(void *); 123 static void jme_msix_rx(void *); 124 static void jme_msix_status(void *); 125 static void jme_txeof(struct jme_txdata *); 126 static void jme_rxeof(struct jme_rxdata *, int, int); 127 static void jme_rx_intr(struct jme_softc *, uint32_t); 128 static void jme_enable_intr(struct jme_softc *); 129 static void jme_disable_intr(struct jme_softc *); 130 static void jme_rx_restart(struct jme_softc *, uint32_t); 131 132 static int jme_msix_setup(device_t); 133 static void jme_msix_teardown(device_t, int); 134 static int jme_intr_setup(device_t); 135 static void jme_intr_teardown(device_t); 136 static void jme_msix_try_alloc(device_t); 137 static void jme_msix_free(device_t); 138 static int jme_intr_alloc(device_t); 139 static void jme_intr_free(device_t); 140 static int jme_dma_alloc(struct jme_softc *); 141 static void jme_dma_free(struct jme_softc *); 142 static int jme_init_rx_ring(struct jme_rxdata *); 143 static void jme_init_tx_ring(struct jme_txdata *); 144 static void jme_init_ssb(struct jme_softc *); 145 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int); 146 static int jme_encap(struct jme_txdata *, struct mbuf **, int *); 147 static void jme_rxpkt(struct jme_rxdata *, int); 148 static int jme_rxring_dma_alloc(struct jme_rxdata *); 149 static int jme_rxbuf_dma_alloc(struct jme_rxdata *); 150 static int jme_rxbuf_dma_filter(void *, bus_addr_t); 151 152 static void jme_tick(void *); 153 static void jme_stop(struct jme_softc *); 154 static void jme_reset(struct jme_softc *); 155 static void jme_set_msinum(struct jme_softc *); 156 static void jme_set_vlan(struct jme_softc *); 157 static void jme_set_filter(struct jme_softc *); 158 static void jme_stop_tx(struct jme_softc *); 159 static void jme_stop_rx(struct jme_softc *); 160 static void jme_mac_config(struct jme_softc *); 161 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 162 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 163 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 164 #ifdef notyet 165 static void jme_setwol(struct jme_softc *); 166 static void jme_setlinkspeed(struct jme_softc *); 167 #endif 168 static void jme_set_tx_coal(struct jme_softc *); 169 static void jme_set_rx_coal(struct jme_softc *); 170 static void jme_enable_rss(struct jme_softc *); 171 static void jme_disable_rss(struct jme_softc *); 172 static void jme_serialize_skipmain(struct jme_softc *); 173 static void jme_deserialize_skipmain(struct jme_softc *); 174 static void jme_phy_poweron(struct jme_softc *); 175 static void jme_phy_poweroff(struct jme_softc *); 176 static int jme_miiext_read(struct jme_softc *, int); 177 static void jme_miiext_write(struct jme_softc *, int, int); 178 static void jme_phy_init(struct jme_softc *); 179 180 static void jme_sysctl_node(struct jme_softc *); 181 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 182 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 183 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 184 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 185 #ifdef IFPOLL_ENABLE 186 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 187 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 188 #endif 189 190 /* 191 * Devices supported by this driver. 192 */ 193 static const struct jme_dev { 194 uint16_t jme_vendorid; 195 uint16_t jme_deviceid; 196 uint32_t jme_caps; 197 const char *jme_name; 198 } jme_devs[] = { 199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 200 JME_CAP_JUMBO, 201 "JMicron Inc, JMC250 Gigabit Ethernet" }, 202 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 203 JME_CAP_FASTETH, 204 "JMicron Inc, JMC260 Fast Ethernet" }, 205 { 0, 0, 0, NULL } 206 }; 207 208 static device_method_t jme_methods[] = { 209 /* Device interface. */ 210 DEVMETHOD(device_probe, jme_probe), 211 DEVMETHOD(device_attach, jme_attach), 212 DEVMETHOD(device_detach, jme_detach), 213 DEVMETHOD(device_shutdown, jme_shutdown), 214 DEVMETHOD(device_suspend, jme_suspend), 215 DEVMETHOD(device_resume, jme_resume), 216 217 /* Bus interface. */ 218 DEVMETHOD(bus_print_child, bus_generic_print_child), 219 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 220 221 /* MII interface. */ 222 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 223 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 224 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 225 226 { NULL, NULL } 227 }; 228 229 static driver_t jme_driver = { 230 "jme", 231 jme_methods, 232 sizeof(struct jme_softc) 233 }; 234 235 static devclass_t jme_devclass; 236 237 DECLARE_DUMMY_MODULE(if_jme); 238 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 239 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 240 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 241 242 static const struct { 243 uint32_t jme_coal; 244 uint32_t jme_comp; 245 uint32_t jme_empty; 246 } jme_rx_status[JME_NRXRING_MAX] = { 247 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 248 INTR_RXQ0_DESC_EMPTY }, 249 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 250 INTR_RXQ1_DESC_EMPTY }, 251 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 252 INTR_RXQ2_DESC_EMPTY }, 253 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 254 INTR_RXQ3_DESC_EMPTY } 255 }; 256 257 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 258 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 259 static int jme_rx_ring_count = 0; 260 static int jme_msi_enable = 1; 261 static int jme_msix_enable = 1; 262 263 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 264 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 265 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 266 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 267 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 268 269 static __inline void 270 jme_setup_rxdesc(struct jme_rxdesc *rxd) 271 { 272 struct jme_desc *desc; 273 274 desc = rxd->rx_desc; 275 desc->buflen = htole32(MCLBYTES); 276 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr)); 277 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr)); 278 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 279 } 280 281 /* 282 * Read a PHY register on the MII of the JMC250. 283 */ 284 static int 285 jme_miibus_readreg(device_t dev, int phy, int reg) 286 { 287 struct jme_softc *sc = device_get_softc(dev); 288 uint32_t val; 289 int i; 290 291 /* For FPGA version, PHY address 0 should be ignored. */ 292 if (sc->jme_caps & JME_CAP_FPGA) { 293 if (phy == 0) 294 return (0); 295 } else { 296 if (sc->jme_phyaddr != phy) 297 return (0); 298 } 299 300 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 301 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 302 303 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 304 DELAY(1); 305 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 306 break; 307 } 308 if (i == 0) { 309 device_printf(sc->jme_dev, "phy read timeout: " 310 "phy %d, reg %d\n", phy, reg); 311 return (0); 312 } 313 314 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 315 } 316 317 /* 318 * Write a PHY register on the MII of the JMC250. 319 */ 320 static int 321 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 322 { 323 struct jme_softc *sc = device_get_softc(dev); 324 int i; 325 326 /* For FPGA version, PHY address 0 should be ignored. */ 327 if (sc->jme_caps & JME_CAP_FPGA) { 328 if (phy == 0) 329 return (0); 330 } else { 331 if (sc->jme_phyaddr != phy) 332 return (0); 333 } 334 335 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 336 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 337 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 338 339 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 340 DELAY(1); 341 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 342 break; 343 } 344 if (i == 0) { 345 device_printf(sc->jme_dev, "phy write timeout: " 346 "phy %d, reg %d\n", phy, reg); 347 } 348 349 return (0); 350 } 351 352 /* 353 * Callback from MII layer when media changes. 354 */ 355 static void 356 jme_miibus_statchg(device_t dev) 357 { 358 struct jme_softc *sc = device_get_softc(dev); 359 struct ifnet *ifp = &sc->arpcom.ac_if; 360 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 361 struct mii_data *mii; 362 struct jme_txdesc *txd; 363 bus_addr_t paddr; 364 int i, r; 365 366 if (sc->jme_in_tick) 367 jme_serialize_skipmain(sc); 368 ASSERT_IFNET_SERIALIZED_ALL(ifp); 369 370 if ((ifp->if_flags & IFF_RUNNING) == 0) 371 goto done; 372 373 mii = device_get_softc(sc->jme_miibus); 374 375 sc->jme_has_link = FALSE; 376 if ((mii->mii_media_status & IFM_AVALID) != 0) { 377 switch (IFM_SUBTYPE(mii->mii_media_active)) { 378 case IFM_10_T: 379 case IFM_100_TX: 380 sc->jme_has_link = TRUE; 381 break; 382 case IFM_1000_T: 383 if (sc->jme_caps & JME_CAP_FASTETH) 384 break; 385 sc->jme_has_link = TRUE; 386 break; 387 default: 388 break; 389 } 390 } 391 392 /* 393 * Disabling Rx/Tx MACs have a side-effect of resetting 394 * JME_TXNDA/JME_RXNDA register to the first address of 395 * Tx/Rx descriptor address. So driver should reset its 396 * internal procucer/consumer pointer and reclaim any 397 * allocated resources. Note, just saving the value of 398 * JME_TXNDA and JME_RXNDA registers before stopping MAC 399 * and restoring JME_TXNDA/JME_RXNDA register is not 400 * sufficient to make sure correct MAC state because 401 * stopping MAC operation can take a while and hardware 402 * might have updated JME_TXNDA/JME_RXNDA registers 403 * during the stop operation. 404 */ 405 406 /* Disable interrupts */ 407 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 408 409 /* Stop driver */ 410 ifp->if_flags &= ~IFF_RUNNING; 411 ifq_clr_oactive(&ifp->if_snd); 412 ifp->if_timer = 0; 413 callout_stop(&sc->jme_tick_ch); 414 415 /* Stop receiver/transmitter. */ 416 jme_stop_rx(sc); 417 jme_stop_tx(sc); 418 419 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 420 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 421 422 jme_rxeof(rdata, -1, -1); 423 if (rdata->jme_rxhead != NULL) 424 m_freem(rdata->jme_rxhead); 425 JME_RXCHAIN_RESET(rdata); 426 427 /* 428 * Reuse configured Rx descriptors and reset 429 * procuder/consumer index. 430 */ 431 rdata->jme_rx_cons = 0; 432 } 433 if (JME_ENABLE_HWRSS(sc)) 434 jme_enable_rss(sc); 435 else 436 jme_disable_rss(sc); 437 438 jme_txeof(tdata); 439 if (tdata->jme_tx_cnt != 0) { 440 /* Remove queued packets for transmit. */ 441 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 442 txd = &tdata->jme_txdesc[i]; 443 if (txd->tx_m != NULL) { 444 bus_dmamap_unload( tdata->jme_tx_tag, 445 txd->tx_dmamap); 446 m_freem(txd->tx_m); 447 txd->tx_m = NULL; 448 txd->tx_ndesc = 0; 449 IFNET_STAT_INC(ifp, oerrors, 1); 450 } 451 } 452 } 453 jme_init_tx_ring(tdata); 454 455 /* Initialize shadow status block. */ 456 jme_init_ssb(sc); 457 458 /* Program MAC with resolved speed/duplex/flow-control. */ 459 if (sc->jme_has_link) { 460 jme_mac_config(sc); 461 462 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 463 464 /* Set Tx ring address to the hardware. */ 465 paddr = tdata->jme_tx_ring_paddr; 466 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 467 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 468 469 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 470 CSR_WRITE_4(sc, JME_RXCSR, 471 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 472 473 /* Set Rx ring address to the hardware. */ 474 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 475 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 476 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 477 } 478 479 /* Restart receiver/transmitter. */ 480 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 481 RXCSR_RXQ_START); 482 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 483 } 484 485 ifp->if_flags |= IFF_RUNNING; 486 ifq_clr_oactive(&ifp->if_snd); 487 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 488 JME_TICK_CPUID); 489 490 #ifdef IFPOLL_ENABLE 491 if (!(ifp->if_flags & IFF_NPOLLING)) 492 #endif 493 /* Reenable interrupts. */ 494 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 495 496 done: 497 if (sc->jme_in_tick) 498 jme_deserialize_skipmain(sc); 499 } 500 501 /* 502 * Get the current interface media status. 503 */ 504 static void 505 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 506 { 507 struct jme_softc *sc = ifp->if_softc; 508 struct mii_data *mii = device_get_softc(sc->jme_miibus); 509 510 ASSERT_IFNET_SERIALIZED_ALL(ifp); 511 512 mii_pollstat(mii); 513 ifmr->ifm_status = mii->mii_media_status; 514 ifmr->ifm_active = mii->mii_media_active; 515 } 516 517 /* 518 * Set hardware to newly-selected media. 519 */ 520 static int 521 jme_mediachange(struct ifnet *ifp) 522 { 523 struct jme_softc *sc = ifp->if_softc; 524 struct mii_data *mii = device_get_softc(sc->jme_miibus); 525 int error; 526 527 ASSERT_IFNET_SERIALIZED_ALL(ifp); 528 529 if (mii->mii_instance != 0) { 530 struct mii_softc *miisc; 531 532 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 533 mii_phy_reset(miisc); 534 } 535 error = mii_mediachg(mii); 536 537 return (error); 538 } 539 540 static int 541 jme_probe(device_t dev) 542 { 543 const struct jme_dev *sp; 544 uint16_t vid, did; 545 546 vid = pci_get_vendor(dev); 547 did = pci_get_device(dev); 548 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 549 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 550 struct jme_softc *sc = device_get_softc(dev); 551 552 sc->jme_caps = sp->jme_caps; 553 device_set_desc(dev, sp->jme_name); 554 return (0); 555 } 556 } 557 return (ENXIO); 558 } 559 560 static int 561 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 562 { 563 uint32_t reg; 564 int i; 565 566 *val = 0; 567 for (i = JME_TIMEOUT; i > 0; i--) { 568 reg = CSR_READ_4(sc, JME_SMBCSR); 569 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 570 break; 571 DELAY(1); 572 } 573 574 if (i == 0) { 575 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 576 return (ETIMEDOUT); 577 } 578 579 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 580 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 581 for (i = JME_TIMEOUT; i > 0; i--) { 582 DELAY(1); 583 reg = CSR_READ_4(sc, JME_SMBINTF); 584 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 585 break; 586 } 587 588 if (i == 0) { 589 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 590 return (ETIMEDOUT); 591 } 592 593 reg = CSR_READ_4(sc, JME_SMBINTF); 594 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 595 596 return (0); 597 } 598 599 static int 600 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 601 { 602 uint8_t fup, reg, val; 603 uint32_t offset; 604 int match; 605 606 offset = 0; 607 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 608 fup != JME_EEPROM_SIG0) 609 return (ENOENT); 610 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 611 fup != JME_EEPROM_SIG1) 612 return (ENOENT); 613 match = 0; 614 do { 615 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 616 break; 617 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 618 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 619 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 620 break; 621 if (reg >= JME_PAR0 && 622 reg < JME_PAR0 + ETHER_ADDR_LEN) { 623 if (jme_eeprom_read_byte(sc, offset + 2, 624 &val) != 0) 625 break; 626 eaddr[reg - JME_PAR0] = val; 627 match++; 628 } 629 } 630 /* Check for the end of EEPROM descriptor. */ 631 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 632 break; 633 /* Try next eeprom descriptor. */ 634 offset += JME_EEPROM_DESC_BYTES; 635 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 636 637 if (match == ETHER_ADDR_LEN) 638 return (0); 639 640 return (ENOENT); 641 } 642 643 static void 644 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 645 { 646 uint32_t par0, par1; 647 648 /* Read station address. */ 649 par0 = CSR_READ_4(sc, JME_PAR0); 650 par1 = CSR_READ_4(sc, JME_PAR1); 651 par1 &= 0xFFFF; 652 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 653 device_printf(sc->jme_dev, 654 "generating fake ethernet address.\n"); 655 par0 = karc4random(); 656 /* Set OUI to JMicron. */ 657 eaddr[0] = 0x00; 658 eaddr[1] = 0x1B; 659 eaddr[2] = 0x8C; 660 eaddr[3] = (par0 >> 16) & 0xff; 661 eaddr[4] = (par0 >> 8) & 0xff; 662 eaddr[5] = par0 & 0xff; 663 } else { 664 eaddr[0] = (par0 >> 0) & 0xFF; 665 eaddr[1] = (par0 >> 8) & 0xFF; 666 eaddr[2] = (par0 >> 16) & 0xFF; 667 eaddr[3] = (par0 >> 24) & 0xFF; 668 eaddr[4] = (par1 >> 0) & 0xFF; 669 eaddr[5] = (par1 >> 8) & 0xFF; 670 } 671 } 672 673 static int 674 jme_attach(device_t dev) 675 { 676 struct jme_softc *sc = device_get_softc(dev); 677 struct ifnet *ifp = &sc->arpcom.ac_if; 678 uint32_t reg; 679 uint16_t did; 680 uint8_t pcie_ptr, rev; 681 int error = 0, i, j, rx_desc_cnt, coal_max; 682 uint8_t eaddr[ETHER_ADDR_LEN]; 683 #ifdef IFPOLL_ENABLE 684 int offset, offset_def; 685 #endif 686 687 /* 688 * Initialize serializers 689 */ 690 lwkt_serialize_init(&sc->jme_serialize); 691 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize); 692 for (i = 0; i < JME_NRXRING_MAX; ++i) { 693 lwkt_serialize_init( 694 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 695 } 696 697 /* 698 * Get # of RX ring descriptors 699 */ 700 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count", 701 jme_rx_desc_count); 702 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN); 703 if (rx_desc_cnt > JME_NDESC_MAX) 704 rx_desc_cnt = JME_NDESC_MAX; 705 706 /* 707 * Get # of TX ring descriptors 708 */ 709 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 710 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count); 711 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 712 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN); 713 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX) 714 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX; 715 716 /* 717 * Get # of RX rings 718 */ 719 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count", 720 jme_rx_ring_count); 721 sc->jme_cdata.jme_rx_ring_cnt = 722 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX); 723 724 /* 725 * Initialize serializer array 726 */ 727 i = 0; 728 729 KKASSERT(i < JME_NSERIALIZE); 730 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 731 732 KKASSERT(i < JME_NSERIALIZE); 733 sc->jme_serialize_arr[i++] = 734 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 735 736 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) { 737 KKASSERT(i < JME_NSERIALIZE); 738 sc->jme_serialize_arr[i++] = 739 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 740 } 741 742 KKASSERT(i <= JME_NSERIALIZE); 743 sc->jme_serialize_cnt = i; 744 745 /* 746 * Setup TX ring specific data 747 */ 748 sc->jme_cdata.jme_tx_data.jme_sc = sc; 749 750 /* 751 * Setup RX rings specific data 752 */ 753 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 754 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 755 756 rdata->jme_sc = sc; 757 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 758 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 759 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 760 rdata->jme_rx_idx = i; 761 rdata->jme_rx_desc_cnt = rx_desc_cnt; 762 } 763 764 sc->jme_dev = dev; 765 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 766 767 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 768 769 callout_init(&sc->jme_tick_ch); 770 771 #ifndef BURN_BRIDGES 772 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 773 uint32_t irq, mem; 774 775 irq = pci_read_config(dev, PCIR_INTLINE, 4); 776 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 777 778 device_printf(dev, "chip is in D%d power mode " 779 "-- setting to D0\n", pci_get_powerstate(dev)); 780 781 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 782 783 pci_write_config(dev, PCIR_INTLINE, irq, 4); 784 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 785 } 786 #endif /* !BURN_BRIDGE */ 787 788 /* Enable bus mastering */ 789 pci_enable_busmaster(dev); 790 791 /* 792 * Allocate IO memory 793 * 794 * JMC250 supports both memory mapped and I/O register space 795 * access. Because I/O register access should use different 796 * BARs to access registers it's waste of time to use I/O 797 * register spce access. JMC250 uses 16K to map entire memory 798 * space. 799 */ 800 sc->jme_mem_rid = JME_PCIR_BAR; 801 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 802 &sc->jme_mem_rid, RF_ACTIVE); 803 if (sc->jme_mem_res == NULL) { 804 device_printf(dev, "can't allocate IO memory\n"); 805 return ENXIO; 806 } 807 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 808 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 809 810 /* 811 * Allocate IRQ 812 */ 813 error = jme_intr_alloc(dev); 814 if (error) 815 goto fail; 816 817 /* 818 * Extract revisions 819 */ 820 reg = CSR_READ_4(sc, JME_CHIPMODE); 821 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 822 CHIPMODE_NOT_FPGA) { 823 sc->jme_caps |= JME_CAP_FPGA; 824 if (bootverbose) { 825 device_printf(dev, "FPGA revision: 0x%04x\n", 826 (reg & CHIPMODE_FPGA_REV_MASK) >> 827 CHIPMODE_FPGA_REV_SHIFT); 828 } 829 } 830 831 /* NOTE: FM revision is put in the upper 4 bits */ 832 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 833 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 834 if (bootverbose) 835 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 836 837 did = pci_get_device(dev); 838 switch (did) { 839 case PCI_PRODUCT_JMICRON_JMC250: 840 if (rev == JME_REV1_A2) 841 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 842 break; 843 844 case PCI_PRODUCT_JMICRON_JMC260: 845 if (rev == JME_REV2) { 846 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 847 sc->jme_phycom0 = 0x608a; 848 } else if (rev == JME_REV2_2) { 849 sc->jme_phycom0 = 0x408a; 850 } 851 break; 852 853 default: 854 panic("unknown device id 0x%04x", did); 855 } 856 if (rev >= JME_REV2) { 857 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 858 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 859 GHC_TXMAC_CLKSRC_1000; 860 } 861 if (rev >= JME_REV5) 862 sc->jme_caps |= JME_CAP_PHYPWR; 863 if (rev >= JME_REV6 || rev == JME_REV5 || rev == JME_REV5_1 || 864 rev == JME_REV5_3) { 865 sc->jme_phycom0 = 0x008a; 866 sc->jme_phycom1 = 0x4109; 867 } else if (rev == JME_REV3_1 || rev == JME_REV3_2) { 868 sc->jme_phycom0 = 0xe088; 869 } 870 871 if (rev >= JME_REV2) { 872 reg = pci_read_config(dev, JME_PCI_SSCTRL, 4); 873 if ((reg & SSCTRL_PHYMASK) == SSCTRL_PHYEA) { 874 sc->jme_phycom0 = 0; 875 sc->jme_phycom1 = 0; 876 } 877 } 878 879 /* Reset the ethernet controller. */ 880 jme_reset(sc); 881 882 /* Map MSI/MSI-X vectors */ 883 jme_set_msinum(sc); 884 885 /* Get station address. */ 886 reg = CSR_READ_4(sc, JME_SMBCSR); 887 if (reg & SMBCSR_EEPROM_PRESENT) 888 error = jme_eeprom_macaddr(sc, eaddr); 889 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 890 if (error != 0 && (bootverbose)) { 891 device_printf(dev, "ethernet hardware address " 892 "not found in EEPROM.\n"); 893 } 894 jme_reg_macaddr(sc, eaddr); 895 } 896 897 /* 898 * Save PHY address. 899 * Integrated JR0211 has fixed PHY address whereas FPGA version 900 * requires PHY probing to get correct PHY address. 901 */ 902 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 903 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 904 GPREG0_PHY_ADDR_MASK; 905 if (bootverbose) { 906 device_printf(dev, "PHY is at address %d.\n", 907 sc->jme_phyaddr); 908 } 909 } else { 910 sc->jme_phyaddr = 0; 911 } 912 913 /* Set max allowable DMA size. */ 914 pcie_ptr = pci_get_pciecap_ptr(dev); 915 if (pcie_ptr != 0) { 916 uint16_t ctrl; 917 918 sc->jme_caps |= JME_CAP_PCIE; 919 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 920 if (bootverbose) { 921 device_printf(dev, "Read request size : %d bytes.\n", 922 128 << ((ctrl >> 12) & 0x07)); 923 device_printf(dev, "TLP payload size : %d bytes.\n", 924 128 << ((ctrl >> 5) & 0x07)); 925 } 926 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 927 case PCIEM_DEVCTL_MAX_READRQ_128: 928 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 929 break; 930 case PCIEM_DEVCTL_MAX_READRQ_256: 931 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 932 break; 933 default: 934 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 935 break; 936 } 937 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 938 } else { 939 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 940 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 941 } 942 943 #ifdef notyet 944 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 945 sc->jme_caps |= JME_CAP_PMCAP; 946 #endif 947 948 #ifdef IFPOLL_ENABLE 949 /* 950 * NPOLLING RX CPU offset 951 */ 952 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 953 offset = 0; 954 } else { 955 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 956 device_get_unit(dev)) % ncpus2; 957 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 958 if (offset >= ncpus2 || 959 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 960 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 961 offset, offset_def); 962 offset = offset_def; 963 } 964 } 965 sc->jme_npoll_rxoff = offset; 966 967 /* 968 * NPOLLING TX CPU offset 969 */ 970 offset_def = sc->jme_npoll_rxoff; 971 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 972 if (offset >= ncpus2) { 973 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 974 offset, offset_def); 975 offset = offset_def; 976 } 977 sc->jme_npoll_txoff = offset; 978 #endif 979 980 /* 981 * Set default coalesce valves 982 */ 983 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 984 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 985 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 986 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 987 988 /* 989 * Adjust coalesce valves, in case that the number of TX/RX 990 * descs are set to small values by users. 991 * 992 * NOTE: coal_max will not be zero, since number of descs 993 * must aligned by JME_NDESC_ALIGN (16 currently) 994 */ 995 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2; 996 if (coal_max < sc->jme_tx_coal_pkt) 997 sc->jme_tx_coal_pkt = coal_max; 998 999 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2; 1000 if (coal_max < sc->jme_rx_coal_pkt) 1001 sc->jme_rx_coal_pkt = coal_max; 1002 1003 sc->jme_cdata.jme_tx_data.jme_tx_wreg = JME_TXWREG_NSEGS; 1004 1005 /* 1006 * Create sysctl tree 1007 */ 1008 jme_sysctl_node(sc); 1009 1010 /* Allocate DMA stuffs */ 1011 error = jme_dma_alloc(sc); 1012 if (error) 1013 goto fail; 1014 1015 ifp->if_softc = sc; 1016 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1017 ifp->if_init = jme_init; 1018 ifp->if_ioctl = jme_ioctl; 1019 ifp->if_start = jme_start; 1020 #ifdef IFPOLL_ENABLE 1021 ifp->if_npoll = jme_npoll; 1022 #endif 1023 ifp->if_watchdog = jme_watchdog; 1024 ifp->if_serialize = jme_serialize; 1025 ifp->if_deserialize = jme_deserialize; 1026 ifp->if_tryserialize = jme_tryserialize; 1027 #ifdef INVARIANTS 1028 ifp->if_serialize_assert = jme_serialize_assert; 1029 #endif 1030 ifp->if_nmbclusters = sc->jme_cdata.jme_rx_ring_cnt * 1031 sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt; 1032 ifq_set_maxlen(&ifp->if_snd, 1033 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD); 1034 ifq_set_ready(&ifp->if_snd); 1035 1036 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 1037 ifp->if_capabilities = IFCAP_HWCSUM | 1038 IFCAP_TSO | 1039 IFCAP_VLAN_MTU | 1040 IFCAP_VLAN_HWTAGGING; 1041 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN) 1042 ifp->if_capabilities |= IFCAP_RSS; 1043 ifp->if_capenable = ifp->if_capabilities; 1044 1045 /* 1046 * Disable TXCSUM by default to improve bulk data 1047 * transmit performance (+20Mbps improvement). 1048 */ 1049 ifp->if_capenable &= ~IFCAP_TXCSUM; 1050 1051 if (ifp->if_capenable & IFCAP_TXCSUM) 1052 ifp->if_hwassist |= JME_CSUM_FEATURES; 1053 ifp->if_hwassist |= CSUM_TSO; 1054 1055 /* Set up MII bus. */ 1056 error = mii_phy_probe(dev, &sc->jme_miibus, 1057 jme_mediachange, jme_mediastatus); 1058 if (error) { 1059 device_printf(dev, "no PHY found!\n"); 1060 goto fail; 1061 } 1062 1063 /* 1064 * Save PHYADDR for FPGA mode PHY. 1065 */ 1066 if (sc->jme_caps & JME_CAP_FPGA) { 1067 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1068 1069 if (mii->mii_instance != 0) { 1070 struct mii_softc *miisc; 1071 1072 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 1073 if (miisc->mii_phy != 0) { 1074 sc->jme_phyaddr = miisc->mii_phy; 1075 break; 1076 } 1077 } 1078 if (sc->jme_phyaddr != 0) { 1079 device_printf(sc->jme_dev, 1080 "FPGA PHY is at %d\n", sc->jme_phyaddr); 1081 /* vendor magic. */ 1082 jme_miibus_writereg(dev, sc->jme_phyaddr, 1083 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 1084 1085 /* XXX should we clear JME_WA_EXTFIFO */ 1086 } 1087 } 1088 } 1089 1090 ether_ifattach(ifp, eaddr, NULL); 1091 1092 /* Tell the upper layer(s) we support long frames. */ 1093 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1094 1095 /* Setup the TX ring's CPUID */ 1096 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 1097 ifq_set_hw_serialize(&ifp->if_snd, 1098 &sc->jme_cdata.jme_tx_data.jme_tx_serialize); 1099 1100 error = jme_intr_setup(dev); 1101 if (error) { 1102 ether_ifdetach(ifp); 1103 goto fail; 1104 } 1105 1106 return 0; 1107 fail: 1108 jme_detach(dev); 1109 return (error); 1110 } 1111 1112 static int 1113 jme_detach(device_t dev) 1114 { 1115 struct jme_softc *sc = device_get_softc(dev); 1116 1117 if (device_is_attached(dev)) { 1118 struct ifnet *ifp = &sc->arpcom.ac_if; 1119 1120 ifnet_serialize_all(ifp); 1121 jme_stop(sc); 1122 jme_intr_teardown(dev); 1123 ifnet_deserialize_all(ifp); 1124 1125 ether_ifdetach(ifp); 1126 } 1127 1128 if (sc->jme_miibus != NULL) 1129 device_delete_child(dev, sc->jme_miibus); 1130 bus_generic_detach(dev); 1131 1132 jme_intr_free(dev); 1133 1134 if (sc->jme_mem_res != NULL) { 1135 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 1136 sc->jme_mem_res); 1137 } 1138 1139 jme_dma_free(sc); 1140 1141 return (0); 1142 } 1143 1144 static void 1145 jme_sysctl_node(struct jme_softc *sc) 1146 { 1147 struct sysctl_ctx_list *ctx; 1148 struct sysctl_oid *tree; 1149 #ifdef JME_RSS_DEBUG 1150 int r; 1151 #endif 1152 1153 ctx = device_get_sysctl_ctx(sc->jme_dev); 1154 tree = device_get_sysctl_tree(sc->jme_dev); 1155 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1156 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1157 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1158 1159 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1160 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1161 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1162 1163 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1164 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1165 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1166 1167 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1168 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1169 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1170 1171 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1172 "rx_desc_count", CTLFLAG_RD, 1173 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt, 1174 0, "RX desc count"); 1175 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1176 "tx_desc_count", CTLFLAG_RD, 1177 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, 1178 0, "TX desc count"); 1179 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1180 "rx_ring_count", CTLFLAG_RD, 1181 &sc->jme_cdata.jme_rx_ring_cnt, 1182 0, "RX ring count"); 1183 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1184 "tx_wreg", CTLFLAG_RW, 1185 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0, 1186 "# of segments before writing to hardware register"); 1187 1188 #ifdef JME_RSS_DEBUG 1189 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1190 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1191 0, "RSS debug level"); 1192 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1193 char rx_ring_desc[32]; 1194 1195 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1196 "rx_ring%d_pkt", r); 1197 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1198 rx_ring_desc, CTLFLAG_RW, 1199 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets"); 1200 1201 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1202 "rx_ring%d_emp", r); 1203 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1204 rx_ring_desc, CTLFLAG_RW, 1205 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp, 1206 "# of time RX ring empty"); 1207 } 1208 #endif 1209 1210 #ifdef IFPOLL_ENABLE 1211 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1212 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1213 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1214 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1215 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1216 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1217 #endif 1218 } 1219 1220 static int 1221 jme_dma_alloc(struct jme_softc *sc) 1222 { 1223 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1224 struct jme_txdesc *txd; 1225 bus_dmamem_t dmem; 1226 int error, i, asize; 1227 1228 asize = __VM_CACHELINE_ALIGN( 1229 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc)); 1230 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF, 1231 M_WAITOK | M_ZERO); 1232 1233 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1234 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 1235 1236 asize = __VM_CACHELINE_ALIGN( 1237 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc)); 1238 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF, 1239 M_WAITOK | M_ZERO); 1240 } 1241 1242 /* Create parent ring tag. */ 1243 error = bus_dma_tag_create(NULL,/* parent */ 1244 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1245 sc->jme_lowaddr, /* lowaddr */ 1246 BUS_SPACE_MAXADDR, /* highaddr */ 1247 NULL, NULL, /* filter, filterarg */ 1248 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1249 0, /* nsegments */ 1250 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1251 0, /* flags */ 1252 &sc->jme_cdata.jme_ring_tag); 1253 if (error) { 1254 device_printf(sc->jme_dev, 1255 "could not create parent ring DMA tag.\n"); 1256 return error; 1257 } 1258 1259 /* 1260 * Create DMA stuffs for TX ring 1261 */ 1262 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN); 1263 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1264 JME_TX_RING_ALIGN, 0, 1265 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1266 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1267 if (error) { 1268 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1269 return error; 1270 } 1271 tdata->jme_tx_ring_tag = dmem.dmem_tag; 1272 tdata->jme_tx_ring_map = dmem.dmem_map; 1273 tdata->jme_tx_ring = dmem.dmem_addr; 1274 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr; 1275 1276 /* 1277 * Create DMA stuffs for RX rings 1278 */ 1279 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1280 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1281 if (error) 1282 return error; 1283 } 1284 1285 /* Create parent buffer tag. */ 1286 error = bus_dma_tag_create(NULL,/* parent */ 1287 1, 0, /* algnmnt, boundary */ 1288 sc->jme_lowaddr, /* lowaddr */ 1289 BUS_SPACE_MAXADDR, /* highaddr */ 1290 NULL, NULL, /* filter, filterarg */ 1291 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1292 0, /* nsegments */ 1293 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1294 0, /* flags */ 1295 &sc->jme_cdata.jme_buffer_tag); 1296 if (error) { 1297 device_printf(sc->jme_dev, 1298 "could not create parent buffer DMA tag.\n"); 1299 return error; 1300 } 1301 1302 /* 1303 * Create DMA stuffs for shadow status block 1304 */ 1305 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN); 1306 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1307 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1308 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1309 if (error) { 1310 device_printf(sc->jme_dev, 1311 "could not create shadow status block.\n"); 1312 return error; 1313 } 1314 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1315 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1316 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1317 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1318 1319 /* 1320 * Create DMA stuffs for TX buffers 1321 */ 1322 1323 /* Create tag for Tx buffers. */ 1324 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1325 1, 0, /* algnmnt, boundary */ 1326 BUS_SPACE_MAXADDR, /* lowaddr */ 1327 BUS_SPACE_MAXADDR, /* highaddr */ 1328 NULL, NULL, /* filter, filterarg */ 1329 JME_TSO_MAXSIZE, /* maxsize */ 1330 JME_MAXTXSEGS, /* nsegments */ 1331 JME_MAXSEGSIZE, /* maxsegsize */ 1332 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1333 &tdata->jme_tx_tag); 1334 if (error != 0) { 1335 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1336 return error; 1337 } 1338 1339 /* Create DMA maps for Tx buffers. */ 1340 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1341 txd = &tdata->jme_txdesc[i]; 1342 error = bus_dmamap_create(tdata->jme_tx_tag, 1343 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1344 &txd->tx_dmamap); 1345 if (error) { 1346 int j; 1347 1348 device_printf(sc->jme_dev, 1349 "could not create %dth Tx dmamap.\n", i); 1350 1351 for (j = 0; j < i; ++j) { 1352 txd = &tdata->jme_txdesc[j]; 1353 bus_dmamap_destroy(tdata->jme_tx_tag, 1354 txd->tx_dmamap); 1355 } 1356 bus_dma_tag_destroy(tdata->jme_tx_tag); 1357 tdata->jme_tx_tag = NULL; 1358 return error; 1359 } 1360 } 1361 1362 /* 1363 * Create DMA stuffs for RX buffers 1364 */ 1365 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1366 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1367 if (error) 1368 return error; 1369 } 1370 return 0; 1371 } 1372 1373 static void 1374 jme_dma_free(struct jme_softc *sc) 1375 { 1376 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1377 struct jme_txdesc *txd; 1378 struct jme_rxdesc *rxd; 1379 struct jme_rxdata *rdata; 1380 int i, r; 1381 1382 /* Tx ring */ 1383 if (tdata->jme_tx_ring_tag != NULL) { 1384 bus_dmamap_unload(tdata->jme_tx_ring_tag, 1385 tdata->jme_tx_ring_map); 1386 bus_dmamem_free(tdata->jme_tx_ring_tag, 1387 tdata->jme_tx_ring, tdata->jme_tx_ring_map); 1388 bus_dma_tag_destroy(tdata->jme_tx_ring_tag); 1389 tdata->jme_tx_ring_tag = NULL; 1390 } 1391 1392 /* Rx ring */ 1393 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1394 rdata = &sc->jme_cdata.jme_rx_data[r]; 1395 if (rdata->jme_rx_ring_tag != NULL) { 1396 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1397 rdata->jme_rx_ring_map); 1398 bus_dmamem_free(rdata->jme_rx_ring_tag, 1399 rdata->jme_rx_ring, 1400 rdata->jme_rx_ring_map); 1401 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1402 rdata->jme_rx_ring_tag = NULL; 1403 } 1404 } 1405 1406 /* Tx buffers */ 1407 if (tdata->jme_tx_tag != NULL) { 1408 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1409 txd = &tdata->jme_txdesc[i]; 1410 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap); 1411 } 1412 bus_dma_tag_destroy(tdata->jme_tx_tag); 1413 tdata->jme_tx_tag = NULL; 1414 } 1415 1416 /* Rx buffers */ 1417 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1418 rdata = &sc->jme_cdata.jme_rx_data[r]; 1419 if (rdata->jme_rx_tag != NULL) { 1420 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 1421 rxd = &rdata->jme_rxdesc[i]; 1422 bus_dmamap_destroy(rdata->jme_rx_tag, 1423 rxd->rx_dmamap); 1424 } 1425 bus_dmamap_destroy(rdata->jme_rx_tag, 1426 rdata->jme_rx_sparemap); 1427 bus_dma_tag_destroy(rdata->jme_rx_tag); 1428 rdata->jme_rx_tag = NULL; 1429 } 1430 } 1431 1432 /* Shadow status block. */ 1433 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1434 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1435 sc->jme_cdata.jme_ssb_map); 1436 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1437 sc->jme_cdata.jme_ssb_block, 1438 sc->jme_cdata.jme_ssb_map); 1439 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1440 sc->jme_cdata.jme_ssb_tag = NULL; 1441 } 1442 1443 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1444 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1445 sc->jme_cdata.jme_buffer_tag = NULL; 1446 } 1447 if (sc->jme_cdata.jme_ring_tag != NULL) { 1448 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1449 sc->jme_cdata.jme_ring_tag = NULL; 1450 } 1451 1452 if (tdata->jme_txdesc != NULL) { 1453 kfree(tdata->jme_txdesc, M_DEVBUF); 1454 tdata->jme_txdesc = NULL; 1455 } 1456 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1457 rdata = &sc->jme_cdata.jme_rx_data[r]; 1458 if (rdata->jme_rxdesc != NULL) { 1459 kfree(rdata->jme_rxdesc, M_DEVBUF); 1460 rdata->jme_rxdesc = NULL; 1461 } 1462 } 1463 } 1464 1465 /* 1466 * Make sure the interface is stopped at reboot time. 1467 */ 1468 static int 1469 jme_shutdown(device_t dev) 1470 { 1471 return jme_suspend(dev); 1472 } 1473 1474 #ifdef notyet 1475 /* 1476 * Unlike other ethernet controllers, JMC250 requires 1477 * explicit resetting link speed to 10/100Mbps as gigabit 1478 * link will cunsume more power than 375mA. 1479 * Note, we reset the link speed to 10/100Mbps with 1480 * auto-negotiation but we don't know whether that operation 1481 * would succeed or not as we have no control after powering 1482 * off. If the renegotiation fail WOL may not work. Running 1483 * at 1Gbps draws more power than 375mA at 3.3V which is 1484 * specified in PCI specification and that would result in 1485 * complete shutdowning power to ethernet controller. 1486 * 1487 * TODO 1488 * Save current negotiated media speed/duplex/flow-control 1489 * to softc and restore the same link again after resuming. 1490 * PHY handling such as power down/resetting to 100Mbps 1491 * may be better handled in suspend method in phy driver. 1492 */ 1493 static void 1494 jme_setlinkspeed(struct jme_softc *sc) 1495 { 1496 struct mii_data *mii; 1497 int aneg, i; 1498 1499 JME_LOCK_ASSERT(sc); 1500 1501 mii = device_get_softc(sc->jme_miibus); 1502 mii_pollstat(mii); 1503 aneg = 0; 1504 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1505 switch IFM_SUBTYPE(mii->mii_media_active) { 1506 case IFM_10_T: 1507 case IFM_100_TX: 1508 return; 1509 case IFM_1000_T: 1510 aneg++; 1511 default: 1512 break; 1513 } 1514 } 1515 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1516 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1517 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1518 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1519 BMCR_AUTOEN | BMCR_STARTNEG); 1520 DELAY(1000); 1521 if (aneg != 0) { 1522 /* Poll link state until jme(4) get a 10/100 link. */ 1523 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1524 mii_pollstat(mii); 1525 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1526 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1527 case IFM_10_T: 1528 case IFM_100_TX: 1529 jme_mac_config(sc); 1530 return; 1531 default: 1532 break; 1533 } 1534 } 1535 JME_UNLOCK(sc); 1536 pause("jmelnk", hz); 1537 JME_LOCK(sc); 1538 } 1539 if (i == MII_ANEGTICKS_GIGE) 1540 device_printf(sc->jme_dev, "establishing link failed, " 1541 "WOL may not work!"); 1542 } 1543 /* 1544 * No link, force MAC to have 100Mbps, full-duplex link. 1545 * This is the last resort and may/may not work. 1546 */ 1547 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1548 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1549 jme_mac_config(sc); 1550 } 1551 1552 static void 1553 jme_setwol(struct jme_softc *sc) 1554 { 1555 struct ifnet *ifp = &sc->arpcom.ac_if; 1556 uint32_t gpr, pmcs; 1557 uint16_t pmstat; 1558 int pmc; 1559 1560 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1561 /* No PME capability, PHY power down. */ 1562 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1563 MII_BMCR, BMCR_PDOWN); 1564 return; 1565 } 1566 1567 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1568 pmcs = CSR_READ_4(sc, JME_PMCS); 1569 pmcs &= ~PMCS_WOL_ENB_MASK; 1570 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1571 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1572 /* Enable PME message. */ 1573 gpr |= GPREG0_PME_ENB; 1574 /* For gigabit controllers, reset link speed to 10/100. */ 1575 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1576 jme_setlinkspeed(sc); 1577 } 1578 1579 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1580 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1581 1582 /* Request PME. */ 1583 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1584 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1585 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1586 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1587 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1588 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1589 /* No WOL, PHY power down. */ 1590 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1591 MII_BMCR, BMCR_PDOWN); 1592 } 1593 } 1594 #endif 1595 1596 static int 1597 jme_suspend(device_t dev) 1598 { 1599 struct jme_softc *sc = device_get_softc(dev); 1600 struct ifnet *ifp = &sc->arpcom.ac_if; 1601 1602 ifnet_serialize_all(ifp); 1603 jme_stop(sc); 1604 #ifdef notyet 1605 jme_setwol(sc); 1606 #endif 1607 ifnet_deserialize_all(ifp); 1608 1609 return (0); 1610 } 1611 1612 static int 1613 jme_resume(device_t dev) 1614 { 1615 struct jme_softc *sc = device_get_softc(dev); 1616 struct ifnet *ifp = &sc->arpcom.ac_if; 1617 #ifdef notyet 1618 int pmc; 1619 #endif 1620 1621 ifnet_serialize_all(ifp); 1622 1623 #ifdef notyet 1624 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1625 uint16_t pmstat; 1626 1627 pmstat = pci_read_config(sc->jme_dev, 1628 pmc + PCIR_POWER_STATUS, 2); 1629 /* Disable PME clear PME status. */ 1630 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1631 pci_write_config(sc->jme_dev, 1632 pmc + PCIR_POWER_STATUS, pmstat, 2); 1633 } 1634 #endif 1635 1636 if (ifp->if_flags & IFF_UP) 1637 jme_init(sc); 1638 1639 ifnet_deserialize_all(ifp); 1640 1641 return (0); 1642 } 1643 1644 static __inline int 1645 jme_tso_pullup(struct mbuf **mp) 1646 { 1647 int hoff, iphlen, thoff; 1648 struct mbuf *m; 1649 1650 m = *mp; 1651 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1652 1653 iphlen = m->m_pkthdr.csum_iphlen; 1654 thoff = m->m_pkthdr.csum_thlen; 1655 hoff = m->m_pkthdr.csum_lhlen; 1656 1657 KASSERT(iphlen > 0, ("invalid ip hlen")); 1658 KASSERT(thoff > 0, ("invalid tcp hlen")); 1659 KASSERT(hoff > 0, ("invalid ether hlen")); 1660 1661 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1662 m = m_pullup(m, hoff + iphlen + thoff); 1663 if (m == NULL) { 1664 *mp = NULL; 1665 return ENOBUFS; 1666 } 1667 *mp = m; 1668 } 1669 return 0; 1670 } 1671 1672 static int 1673 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used) 1674 { 1675 struct jme_txdesc *txd; 1676 struct jme_desc *desc; 1677 struct mbuf *m; 1678 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1679 int maxsegs, nsegs; 1680 int error, i, prod, symbol_desc; 1681 uint32_t cflags, flag64, mss; 1682 1683 M_ASSERTPKTHDR((*m_head)); 1684 1685 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) { 1686 /* XXX Is this necessary? */ 1687 error = jme_tso_pullup(m_head); 1688 if (error) 1689 return error; 1690 } 1691 1692 prod = tdata->jme_tx_prod; 1693 txd = &tdata->jme_txdesc[prod]; 1694 1695 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1696 symbol_desc = 1; 1697 else 1698 symbol_desc = 0; 1699 1700 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) - 1701 (JME_TXD_RSVD + symbol_desc); 1702 if (maxsegs > JME_MAXTXSEGS) 1703 maxsegs = JME_MAXTXSEGS; 1704 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc), 1705 ("not enough segments %d", maxsegs)); 1706 1707 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag, 1708 txd->tx_dmamap, m_head, 1709 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1710 if (error) 1711 goto fail; 1712 *segs_used += nsegs; 1713 1714 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap, 1715 BUS_DMASYNC_PREWRITE); 1716 1717 m = *m_head; 1718 cflags = 0; 1719 mss = 0; 1720 1721 /* Configure checksum offload. */ 1722 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1723 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; 1724 cflags |= JME_TD_TSO; 1725 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) { 1726 if (m->m_pkthdr.csum_flags & CSUM_IP) 1727 cflags |= JME_TD_IPCSUM; 1728 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1729 cflags |= JME_TD_TCPCSUM; 1730 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1731 cflags |= JME_TD_UDPCSUM; 1732 } 1733 1734 /* Configure VLAN. */ 1735 if (m->m_flags & M_VLANTAG) { 1736 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1737 cflags |= JME_TD_VLAN_TAG; 1738 } 1739 1740 desc = &tdata->jme_tx_ring[prod]; 1741 desc->flags = htole32(cflags); 1742 desc->addr_hi = htole32(m->m_pkthdr.len); 1743 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1744 /* 1745 * Use 64bits TX desc chain format. 1746 * 1747 * The first TX desc of the chain, which is setup here, 1748 * is just a symbol TX desc carrying no payload. 1749 */ 1750 flag64 = JME_TD_64BIT; 1751 desc->buflen = htole32(mss); 1752 desc->addr_lo = 0; 1753 1754 *segs_used += 1; 1755 1756 /* No effective TX desc is consumed */ 1757 i = 0; 1758 } else { 1759 /* 1760 * Use 32bits TX desc chain format. 1761 * 1762 * The first TX desc of the chain, which is setup here, 1763 * is an effective TX desc carrying the first segment of 1764 * the mbuf chain. 1765 */ 1766 flag64 = 0; 1767 desc->buflen = htole32(mss | txsegs[0].ds_len); 1768 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1769 1770 /* One effective TX desc is consumed */ 1771 i = 1; 1772 } 1773 tdata->jme_tx_cnt++; 1774 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1775 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1776 1777 txd->tx_ndesc = 1 - i; 1778 for (; i < nsegs; i++) { 1779 desc = &tdata->jme_tx_ring[prod]; 1780 desc->buflen = htole32(txsegs[i].ds_len); 1781 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1782 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1783 desc->flags = htole32(JME_TD_OWN | flag64); 1784 1785 tdata->jme_tx_cnt++; 1786 KKASSERT(tdata->jme_tx_cnt <= 1787 tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1788 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1789 } 1790 1791 /* Update producer index. */ 1792 tdata->jme_tx_prod = prod; 1793 /* 1794 * Finally request interrupt and give the first descriptor 1795 * owenership to hardware. 1796 */ 1797 desc = txd->tx_desc; 1798 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1799 1800 txd->tx_m = m; 1801 txd->tx_ndesc += nsegs; 1802 1803 return 0; 1804 fail: 1805 m_freem(*m_head); 1806 *m_head = NULL; 1807 return error; 1808 } 1809 1810 static void 1811 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1812 { 1813 struct jme_softc *sc = ifp->if_softc; 1814 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1815 struct mbuf *m_head; 1816 int enq = 0; 1817 1818 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1819 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 1820 1821 if (!sc->jme_has_link) { 1822 ifq_purge(&ifp->if_snd); 1823 return; 1824 } 1825 1826 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1827 return; 1828 1829 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata)) 1830 jme_txeof(tdata); 1831 1832 while (!ifq_is_empty(&ifp->if_snd)) { 1833 /* 1834 * Check number of available TX descs, always 1835 * leave JME_TXD_RSVD free TX descs. 1836 */ 1837 if (tdata->jme_tx_cnt + JME_TXD_SPARE > 1838 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) { 1839 ifq_set_oactive(&ifp->if_snd); 1840 break; 1841 } 1842 1843 m_head = ifq_dequeue(&ifp->if_snd); 1844 if (m_head == NULL) 1845 break; 1846 1847 /* 1848 * Pack the data into the transmit ring. If we 1849 * don't have room, set the OACTIVE flag and wait 1850 * for the NIC to drain the ring. 1851 */ 1852 if (jme_encap(tdata, &m_head, &enq)) { 1853 KKASSERT(m_head == NULL); 1854 IFNET_STAT_INC(ifp, oerrors, 1); 1855 ifq_set_oactive(&ifp->if_snd); 1856 break; 1857 } 1858 1859 if (enq >= tdata->jme_tx_wreg) { 1860 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | 1861 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1862 enq = 0; 1863 } 1864 1865 /* 1866 * If there's a BPF listener, bounce a copy of this frame 1867 * to him. 1868 */ 1869 ETHER_BPF_MTAP(ifp, m_head); 1870 1871 /* Set a timeout in case the chip goes out to lunch. */ 1872 ifp->if_timer = JME_TX_TIMEOUT; 1873 } 1874 1875 if (enq > 0) { 1876 /* 1877 * Reading TXCSR takes very long time under heavy load 1878 * so cache TXCSR value and writes the ORed value with 1879 * the kick command to the TXCSR. This saves one register 1880 * access cycle. 1881 */ 1882 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1883 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1884 } 1885 } 1886 1887 static void 1888 jme_watchdog(struct ifnet *ifp) 1889 { 1890 struct jme_softc *sc = ifp->if_softc; 1891 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1892 1893 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1894 1895 if (!sc->jme_has_link) { 1896 if_printf(ifp, "watchdog timeout (missed link)\n"); 1897 IFNET_STAT_INC(ifp, oerrors, 1); 1898 jme_init(sc); 1899 return; 1900 } 1901 1902 jme_txeof(tdata); 1903 if (tdata->jme_tx_cnt == 0) { 1904 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1905 "-- recovering\n"); 1906 if (!ifq_is_empty(&ifp->if_snd)) 1907 if_devstart(ifp); 1908 return; 1909 } 1910 1911 if_printf(ifp, "watchdog timeout\n"); 1912 IFNET_STAT_INC(ifp, oerrors, 1); 1913 jme_init(sc); 1914 if (!ifq_is_empty(&ifp->if_snd)) 1915 if_devstart(ifp); 1916 } 1917 1918 static int 1919 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1920 { 1921 struct jme_softc *sc = ifp->if_softc; 1922 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1923 struct ifreq *ifr = (struct ifreq *)data; 1924 int error = 0, mask; 1925 1926 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1927 1928 switch (cmd) { 1929 case SIOCSIFMTU: 1930 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1931 (!(sc->jme_caps & JME_CAP_JUMBO) && 1932 ifr->ifr_mtu > JME_MAX_MTU)) { 1933 error = EINVAL; 1934 break; 1935 } 1936 1937 if (ifp->if_mtu != ifr->ifr_mtu) { 1938 /* 1939 * No special configuration is required when interface 1940 * MTU is changed but availability of Tx checksum 1941 * offload should be chcked against new MTU size as 1942 * FIFO size is just 2K. 1943 */ 1944 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1945 ifp->if_capenable &= 1946 ~(IFCAP_TXCSUM | IFCAP_TSO); 1947 ifp->if_hwassist &= 1948 ~(JME_CSUM_FEATURES | CSUM_TSO); 1949 } 1950 ifp->if_mtu = ifr->ifr_mtu; 1951 if (ifp->if_flags & IFF_RUNNING) 1952 jme_init(sc); 1953 } 1954 break; 1955 1956 case SIOCSIFFLAGS: 1957 if (ifp->if_flags & IFF_UP) { 1958 if (ifp->if_flags & IFF_RUNNING) { 1959 if ((ifp->if_flags ^ sc->jme_if_flags) & 1960 (IFF_PROMISC | IFF_ALLMULTI)) 1961 jme_set_filter(sc); 1962 } else { 1963 jme_init(sc); 1964 } 1965 } else { 1966 if (ifp->if_flags & IFF_RUNNING) 1967 jme_stop(sc); 1968 } 1969 sc->jme_if_flags = ifp->if_flags; 1970 break; 1971 1972 case SIOCADDMULTI: 1973 case SIOCDELMULTI: 1974 if (ifp->if_flags & IFF_RUNNING) 1975 jme_set_filter(sc); 1976 break; 1977 1978 case SIOCSIFMEDIA: 1979 case SIOCGIFMEDIA: 1980 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1981 break; 1982 1983 case SIOCSIFCAP: 1984 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1985 1986 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1987 ifp->if_capenable ^= IFCAP_TXCSUM; 1988 if (ifp->if_capenable & IFCAP_TXCSUM) 1989 ifp->if_hwassist |= JME_CSUM_FEATURES; 1990 else 1991 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1992 } 1993 if (mask & IFCAP_RXCSUM) { 1994 uint32_t reg; 1995 1996 ifp->if_capenable ^= IFCAP_RXCSUM; 1997 reg = CSR_READ_4(sc, JME_RXMAC); 1998 reg &= ~RXMAC_CSUM_ENB; 1999 if (ifp->if_capenable & IFCAP_RXCSUM) 2000 reg |= RXMAC_CSUM_ENB; 2001 CSR_WRITE_4(sc, JME_RXMAC, reg); 2002 } 2003 2004 if (mask & IFCAP_VLAN_HWTAGGING) { 2005 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2006 jme_set_vlan(sc); 2007 } 2008 2009 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 2010 ifp->if_capenable ^= IFCAP_TSO; 2011 if (ifp->if_capenable & IFCAP_TSO) 2012 ifp->if_hwassist |= CSUM_TSO; 2013 else 2014 ifp->if_hwassist &= ~CSUM_TSO; 2015 } 2016 2017 if (mask & IFCAP_RSS) 2018 ifp->if_capenable ^= IFCAP_RSS; 2019 break; 2020 2021 default: 2022 error = ether_ioctl(ifp, cmd, data); 2023 break; 2024 } 2025 return (error); 2026 } 2027 2028 static void 2029 jme_mac_config(struct jme_softc *sc) 2030 { 2031 struct mii_data *mii; 2032 uint32_t ghc, rxmac, txmac, txpause, gp1; 2033 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 2034 2035 mii = device_get_softc(sc->jme_miibus); 2036 2037 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2038 DELAY(10); 2039 CSR_WRITE_4(sc, JME_GHC, 0); 2040 ghc = 0; 2041 rxmac = CSR_READ_4(sc, JME_RXMAC); 2042 rxmac &= ~RXMAC_FC_ENB; 2043 txmac = CSR_READ_4(sc, JME_TXMAC); 2044 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2045 txpause = CSR_READ_4(sc, JME_TXPFC); 2046 txpause &= ~TXPFC_PAUSE_ENB; 2047 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2048 ghc |= GHC_FULL_DUPLEX; 2049 rxmac &= ~RXMAC_COLL_DET_ENB; 2050 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2051 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2052 TXMAC_FRAME_BURST); 2053 #ifdef notyet 2054 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2055 txpause |= TXPFC_PAUSE_ENB; 2056 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2057 rxmac |= RXMAC_FC_ENB; 2058 #endif 2059 /* Disable retry transmit timer/retry limit. */ 2060 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2061 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2062 } else { 2063 rxmac |= RXMAC_COLL_DET_ENB; 2064 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2065 /* Enable retry transmit timer/retry limit. */ 2066 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2067 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2068 } 2069 2070 /* 2071 * Reprogram Tx/Rx MACs with resolved speed/duplex. 2072 */ 2073 gp1 = CSR_READ_4(sc, JME_GPREG1); 2074 gp1 &= ~GPREG1_WA_HDX; 2075 2076 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2077 hdx = 1; 2078 2079 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2080 case IFM_10_T: 2081 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 2082 if (hdx) 2083 gp1 |= GPREG1_WA_HDX; 2084 break; 2085 2086 case IFM_100_TX: 2087 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 2088 if (hdx) 2089 gp1 |= GPREG1_WA_HDX; 2090 2091 /* 2092 * Use extended FIFO depth to workaround CRC errors 2093 * emitted by chips before JMC250B 2094 */ 2095 phyconf = JMPHY_CONF_EXTFIFO; 2096 break; 2097 2098 case IFM_1000_T: 2099 if (sc->jme_caps & JME_CAP_FASTETH) 2100 break; 2101 2102 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 2103 if (hdx) 2104 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2105 break; 2106 2107 default: 2108 break; 2109 } 2110 CSR_WRITE_4(sc, JME_GHC, ghc); 2111 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2112 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2113 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2114 2115 if (sc->jme_workaround & JME_WA_EXTFIFO) { 2116 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2117 JMPHY_CONF, phyconf); 2118 } 2119 if (sc->jme_workaround & JME_WA_HDX) 2120 CSR_WRITE_4(sc, JME_GPREG1, gp1); 2121 } 2122 2123 static void 2124 jme_intr(void *xsc) 2125 { 2126 struct jme_softc *sc = xsc; 2127 struct ifnet *ifp = &sc->arpcom.ac_if; 2128 uint32_t status; 2129 int r; 2130 2131 ASSERT_SERIALIZED(&sc->jme_serialize); 2132 2133 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2134 if (status == 0 || status == 0xFFFFFFFF) 2135 return; 2136 2137 /* Disable interrupts. */ 2138 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2139 2140 status = CSR_READ_4(sc, JME_INTR_STATUS); 2141 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2142 goto back; 2143 2144 /* Reset PCC counter/timer and Ack interrupts. */ 2145 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2146 2147 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2148 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2149 2150 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2151 if (status & jme_rx_status[r].jme_coal) { 2152 status |= jme_rx_status[r].jme_coal | 2153 jme_rx_status[r].jme_comp; 2154 } 2155 } 2156 2157 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2158 2159 if (ifp->if_flags & IFF_RUNNING) { 2160 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2161 2162 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2163 jme_rx_intr(sc, status); 2164 2165 if (status & INTR_RXQ_DESC_EMPTY) { 2166 /* 2167 * Notify hardware availability of new Rx buffers. 2168 * Reading RXCSR takes very long time under heavy 2169 * load so cache RXCSR value and writes the ORed 2170 * value with the kick command to the RXCSR. This 2171 * saves one register access cycle. 2172 */ 2173 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2174 RXCSR_RX_ENB | RXCSR_RXQ_START); 2175 } 2176 2177 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2178 lwkt_serialize_enter(&tdata->jme_tx_serialize); 2179 jme_txeof(tdata); 2180 if (!ifq_is_empty(&ifp->if_snd)) 2181 if_devstart(ifp); 2182 lwkt_serialize_exit(&tdata->jme_tx_serialize); 2183 } 2184 } 2185 back: 2186 /* Reenable interrupts. */ 2187 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2188 } 2189 2190 static void 2191 jme_txeof(struct jme_txdata *tdata) 2192 { 2193 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if; 2194 int cons; 2195 2196 cons = tdata->jme_tx_cons; 2197 if (cons == tdata->jme_tx_prod) 2198 return; 2199 2200 /* 2201 * Go through our Tx list and free mbufs for those 2202 * frames which have been transmitted. 2203 */ 2204 while (cons != tdata->jme_tx_prod) { 2205 struct jme_txdesc *txd, *next_txd; 2206 uint32_t status, next_status; 2207 int next_cons, nsegs; 2208 2209 txd = &tdata->jme_txdesc[cons]; 2210 KASSERT(txd->tx_m != NULL, 2211 ("%s: freeing NULL mbuf!", __func__)); 2212 2213 status = le32toh(txd->tx_desc->flags); 2214 if ((status & JME_TD_OWN) == JME_TD_OWN) 2215 break; 2216 2217 /* 2218 * NOTE: 2219 * This chip will always update the TX descriptor's 2220 * buflen field and this updating always happens 2221 * after clearing the OWN bit, so even if the OWN 2222 * bit is cleared by the chip, we still don't sure 2223 * about whether the buflen field has been updated 2224 * by the chip or not. To avoid this race, we wait 2225 * for the next TX descriptor's OWN bit to be cleared 2226 * by the chip before reusing this TX descriptor. 2227 */ 2228 next_cons = cons; 2229 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt); 2230 next_txd = &tdata->jme_txdesc[next_cons]; 2231 if (next_txd->tx_m == NULL) 2232 break; 2233 next_status = le32toh(next_txd->tx_desc->flags); 2234 if ((next_status & JME_TD_OWN) == JME_TD_OWN) 2235 break; 2236 2237 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2238 IFNET_STAT_INC(ifp, oerrors, 1); 2239 } else { 2240 IFNET_STAT_INC(ifp, opackets, 1); 2241 if (status & JME_TD_COLLISION) { 2242 IFNET_STAT_INC(ifp, collisions, 2243 le32toh(txd->tx_desc->buflen) & 2244 JME_TD_BUF_LEN_MASK); 2245 } 2246 } 2247 2248 /* 2249 * Only the first descriptor of multi-descriptor 2250 * transmission is updated so driver have to skip entire 2251 * chained buffers for the transmiited frame. In other 2252 * words, JME_TD_OWN bit is valid only at the first 2253 * descriptor of a multi-descriptor transmission. 2254 */ 2255 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2256 tdata->jme_tx_ring[cons].flags = 0; 2257 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt); 2258 } 2259 2260 /* Reclaim transferred mbufs. */ 2261 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2262 m_freem(txd->tx_m); 2263 txd->tx_m = NULL; 2264 tdata->jme_tx_cnt -= txd->tx_ndesc; 2265 KASSERT(tdata->jme_tx_cnt >= 0, 2266 ("%s: Active Tx desc counter was garbled", __func__)); 2267 txd->tx_ndesc = 0; 2268 } 2269 tdata->jme_tx_cons = cons; 2270 2271 /* 1 for symbol TX descriptor */ 2272 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1) 2273 ifp->if_timer = 0; 2274 2275 if (tdata->jme_tx_cnt + JME_TXD_SPARE <= 2276 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) 2277 ifq_clr_oactive(&ifp->if_snd); 2278 } 2279 2280 static __inline void 2281 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count) 2282 { 2283 int i; 2284 2285 for (i = 0; i < count; ++i) { 2286 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]); 2287 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2288 } 2289 } 2290 2291 static __inline struct pktinfo * 2292 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2293 { 2294 if (flags & JME_RD_IPV4) 2295 pi->pi_netisr = NETISR_IP; 2296 else if (flags & JME_RD_IPV6) 2297 pi->pi_netisr = NETISR_IPV6; 2298 else 2299 return NULL; 2300 2301 pi->pi_flags = 0; 2302 pi->pi_l3proto = IPPROTO_UNKNOWN; 2303 2304 if (flags & JME_RD_MORE_FRAG) 2305 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2306 else if (flags & JME_RD_TCP) 2307 pi->pi_l3proto = IPPROTO_TCP; 2308 else if (flags & JME_RD_UDP) 2309 pi->pi_l3proto = IPPROTO_UDP; 2310 else 2311 pi = NULL; 2312 return pi; 2313 } 2314 2315 /* Receive a frame. */ 2316 static void 2317 jme_rxpkt(struct jme_rxdata *rdata, int cpuid) 2318 { 2319 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if; 2320 struct jme_desc *desc; 2321 struct jme_rxdesc *rxd; 2322 struct mbuf *mp, *m; 2323 uint32_t flags, status, hash, hashinfo; 2324 int cons, count, nsegs; 2325 2326 cons = rdata->jme_rx_cons; 2327 desc = &rdata->jme_rx_ring[cons]; 2328 2329 flags = le32toh(desc->flags); 2330 status = le32toh(desc->buflen); 2331 hash = le32toh(desc->addr_hi); 2332 hashinfo = le32toh(desc->addr_lo); 2333 nsegs = JME_RX_NSEGS(status); 2334 2335 if (nsegs > 1) { 2336 /* Skip the first descriptor. */ 2337 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2338 2339 /* 2340 * Clear the OWN bit of the following RX descriptors; 2341 * hardware will not clear the OWN bit except the first 2342 * RX descriptor. 2343 * 2344 * Since the first RX descriptor is setup, i.e. OWN bit 2345 * on, before its followins RX descriptors, leaving the 2346 * OWN bit on the following RX descriptors will trick 2347 * the hardware into thinking that the following RX 2348 * descriptors are ready to be used too. 2349 */ 2350 for (count = 1; count < nsegs; count++, 2351 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) 2352 rdata->jme_rx_ring[cons].flags = 0; 2353 2354 cons = rdata->jme_rx_cons; 2355 } 2356 2357 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, " 2358 "hash 0x%08x, hash info 0x%08x\n", 2359 rdata->jme_rx_idx, flags, hash, hashinfo); 2360 2361 if (status & JME_RX_ERR_STAT) { 2362 IFNET_STAT_INC(ifp, ierrors, 1); 2363 jme_discard_rxbufs(rdata, cons, nsegs); 2364 #ifdef JME_SHOW_ERRORS 2365 if_printf(ifp, "%s : receive error = 0x%b\n", 2366 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2367 #endif 2368 rdata->jme_rx_cons += nsegs; 2369 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2370 return; 2371 } 2372 2373 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2374 for (count = 0; count < nsegs; count++, 2375 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) { 2376 rxd = &rdata->jme_rxdesc[cons]; 2377 mp = rxd->rx_m; 2378 2379 /* Add a new receive buffer to the ring. */ 2380 if (jme_newbuf(rdata, rxd, 0) != 0) { 2381 IFNET_STAT_INC(ifp, iqdrops, 1); 2382 /* Reuse buffer. */ 2383 jme_discard_rxbufs(rdata, cons, nsegs - count); 2384 if (rdata->jme_rxhead != NULL) { 2385 m_freem(rdata->jme_rxhead); 2386 JME_RXCHAIN_RESET(rdata); 2387 } 2388 break; 2389 } 2390 2391 /* 2392 * Assume we've received a full sized frame. 2393 * Actual size is fixed when we encounter the end of 2394 * multi-segmented frame. 2395 */ 2396 mp->m_len = MCLBYTES; 2397 2398 /* Chain received mbufs. */ 2399 if (rdata->jme_rxhead == NULL) { 2400 rdata->jme_rxhead = mp; 2401 rdata->jme_rxtail = mp; 2402 } else { 2403 /* 2404 * Receive processor can receive a maximum frame 2405 * size of 65535 bytes. 2406 */ 2407 rdata->jme_rxtail->m_next = mp; 2408 rdata->jme_rxtail = mp; 2409 } 2410 2411 if (count == nsegs - 1) { 2412 struct pktinfo pi0, *pi; 2413 2414 /* Last desc. for this frame. */ 2415 m = rdata->jme_rxhead; 2416 m->m_pkthdr.len = rdata->jme_rxlen; 2417 if (nsegs > 1) { 2418 /* Set first mbuf size. */ 2419 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2420 /* Set last mbuf size. */ 2421 mp->m_len = rdata->jme_rxlen - 2422 ((MCLBYTES - JME_RX_PAD_BYTES) + 2423 (MCLBYTES * (nsegs - 2))); 2424 } else { 2425 m->m_len = rdata->jme_rxlen; 2426 } 2427 m->m_pkthdr.rcvif = ifp; 2428 2429 /* 2430 * Account for 10bytes auto padding which is used 2431 * to align IP header on 32bit boundary. Also note, 2432 * CRC bytes is automatically removed by the 2433 * hardware. 2434 */ 2435 m->m_data += JME_RX_PAD_BYTES; 2436 2437 /* Set checksum information. */ 2438 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2439 (flags & JME_RD_IPV4)) { 2440 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2441 if (flags & JME_RD_IPCSUM) 2442 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2443 if ((flags & JME_RD_MORE_FRAG) == 0 && 2444 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2445 (JME_RD_TCP | JME_RD_TCPCSUM) || 2446 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2447 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2448 m->m_pkthdr.csum_flags |= 2449 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2450 m->m_pkthdr.csum_data = 0xffff; 2451 } 2452 } 2453 2454 /* Check for VLAN tagged packets. */ 2455 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2456 (flags & JME_RD_VLAN_TAG)) { 2457 m->m_pkthdr.ether_vlantag = 2458 flags & JME_RD_VLAN_MASK; 2459 m->m_flags |= M_VLANTAG; 2460 } 2461 2462 IFNET_STAT_INC(ifp, ipackets, 1); 2463 2464 if (ifp->if_capenable & IFCAP_RSS) 2465 pi = jme_pktinfo(&pi0, flags); 2466 else 2467 pi = NULL; 2468 2469 if (pi != NULL && 2470 (hashinfo & JME_RD_HASH_FN_MASK) == 2471 JME_RD_HASH_FN_TOEPLITZ) { 2472 m->m_flags |= (M_HASH | M_CKHASH); 2473 m->m_pkthdr.hash = toeplitz_hash(hash); 2474 } 2475 2476 #ifdef JME_RSS_DEBUG 2477 if (pi != NULL) { 2478 JME_RSS_DPRINTF(rdata->jme_sc, 10, 2479 "isr %d flags %08x, l3 %d %s\n", 2480 pi->pi_netisr, pi->pi_flags, 2481 pi->pi_l3proto, 2482 (m->m_flags & M_HASH) ? "hash" : ""); 2483 } 2484 #endif 2485 2486 /* Pass it on. */ 2487 ifp->if_input(ifp, m, pi, cpuid); 2488 2489 /* Reset mbuf chains. */ 2490 JME_RXCHAIN_RESET(rdata); 2491 #ifdef JME_RSS_DEBUG 2492 rdata->jme_rx_pkt++; 2493 #endif 2494 } 2495 } 2496 2497 rdata->jme_rx_cons += nsegs; 2498 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2499 } 2500 2501 static void 2502 jme_rxeof(struct jme_rxdata *rdata, int count, int cpuid) 2503 { 2504 struct jme_desc *desc; 2505 int nsegs, pktlen; 2506 2507 for (;;) { 2508 #ifdef IFPOLL_ENABLE 2509 if (count >= 0 && count-- == 0) 2510 break; 2511 #endif 2512 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2513 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2514 break; 2515 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2516 break; 2517 2518 /* 2519 * Check number of segments against received bytes. 2520 * Non-matching value would indicate that hardware 2521 * is still trying to update Rx descriptors. I'm not 2522 * sure whether this check is needed. 2523 */ 2524 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2525 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2526 if (nsegs != howmany(pktlen, MCLBYTES)) { 2527 if_printf(&rdata->jme_sc->arpcom.ac_if, 2528 "RX fragment count(%d) and " 2529 "packet size(%d) mismach\n", nsegs, pktlen); 2530 break; 2531 } 2532 2533 /* 2534 * NOTE: 2535 * RSS hash and hash information may _not_ be set by the 2536 * hardware even if the OWN bit is cleared and VALID bit 2537 * is set. 2538 * 2539 * If the RSS information is not delivered by the hardware 2540 * yet, we MUST NOT accept this packet, let alone reusing 2541 * its RX descriptor. If this packet was accepted and its 2542 * RX descriptor was reused before hardware delivering the 2543 * RSS information, the RX buffer's address would be trashed 2544 * by the RSS information delivered by the hardware. 2545 */ 2546 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 2547 struct jme_rxdesc *rxd; 2548 uint32_t hashinfo; 2549 2550 hashinfo = le32toh(desc->addr_lo); 2551 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons]; 2552 2553 /* 2554 * This test should be enough to detect the pending 2555 * RSS information delivery, given: 2556 * - If RSS hash is not calculated, the hashinfo 2557 * will be 0. Howvever, the lower 32bits of RX 2558 * buffers' physical address will never be 0. 2559 * (see jme_rxbuf_dma_filter) 2560 * - If RSS hash is calculated, the lowest 4 bits 2561 * of hashinfo will be set, while the RX buffers 2562 * are at least 2K aligned. 2563 */ 2564 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) { 2565 #ifdef JME_SHOW_RSSWB 2566 if_printf(&rdata->jme_sc->arpcom.ac_if, 2567 "RSS is not written back yet\n"); 2568 #endif 2569 break; 2570 } 2571 } 2572 2573 /* Received a frame. */ 2574 jme_rxpkt(rdata, cpuid); 2575 } 2576 } 2577 2578 static void 2579 jme_tick(void *xsc) 2580 { 2581 struct jme_softc *sc = xsc; 2582 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2583 2584 lwkt_serialize_enter(&sc->jme_serialize); 2585 2586 KKASSERT(mycpuid == JME_TICK_CPUID); 2587 2588 sc->jme_in_tick = TRUE; 2589 mii_tick(mii); 2590 sc->jme_in_tick = FALSE; 2591 2592 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2593 2594 lwkt_serialize_exit(&sc->jme_serialize); 2595 } 2596 2597 static void 2598 jme_reset(struct jme_softc *sc) 2599 { 2600 uint32_t val; 2601 2602 /* Make sure that TX and RX are stopped */ 2603 jme_stop_tx(sc); 2604 jme_stop_rx(sc); 2605 2606 /* Start reset */ 2607 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2608 DELAY(20); 2609 2610 /* 2611 * Hold reset bit before stop reset 2612 */ 2613 2614 /* Disable TXMAC and TXOFL clock sources */ 2615 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2616 /* Disable RXMAC clock source */ 2617 val = CSR_READ_4(sc, JME_GPREG1); 2618 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2619 /* Flush */ 2620 CSR_READ_4(sc, JME_GHC); 2621 2622 /* Stop reset */ 2623 CSR_WRITE_4(sc, JME_GHC, 0); 2624 /* Flush */ 2625 CSR_READ_4(sc, JME_GHC); 2626 2627 /* 2628 * Clear reset bit after stop reset 2629 */ 2630 2631 /* Enable TXMAC and TXOFL clock sources */ 2632 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2633 /* Enable RXMAC clock source */ 2634 val = CSR_READ_4(sc, JME_GPREG1); 2635 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2636 /* Flush */ 2637 CSR_READ_4(sc, JME_GHC); 2638 2639 /* Disable TXMAC and TXOFL clock sources */ 2640 CSR_WRITE_4(sc, JME_GHC, 0); 2641 /* Disable RXMAC clock source */ 2642 val = CSR_READ_4(sc, JME_GPREG1); 2643 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2644 /* Flush */ 2645 CSR_READ_4(sc, JME_GHC); 2646 2647 /* Enable TX and RX */ 2648 val = CSR_READ_4(sc, JME_TXCSR); 2649 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2650 val = CSR_READ_4(sc, JME_RXCSR); 2651 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2652 /* Flush */ 2653 CSR_READ_4(sc, JME_TXCSR); 2654 CSR_READ_4(sc, JME_RXCSR); 2655 2656 /* Enable TXMAC and TXOFL clock sources */ 2657 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2658 /* Disable RXMAC clock source */ 2659 val = CSR_READ_4(sc, JME_GPREG1); 2660 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2661 /* Flush */ 2662 CSR_READ_4(sc, JME_GHC); 2663 2664 /* Stop TX and RX */ 2665 jme_stop_tx(sc); 2666 jme_stop_rx(sc); 2667 } 2668 2669 static void 2670 jme_init(void *xsc) 2671 { 2672 struct jme_softc *sc = xsc; 2673 struct ifnet *ifp = &sc->arpcom.ac_if; 2674 struct mii_data *mii; 2675 uint8_t eaddr[ETHER_ADDR_LEN]; 2676 bus_addr_t paddr; 2677 uint32_t reg; 2678 int error, r; 2679 2680 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2681 2682 /* 2683 * Cancel any pending I/O. 2684 */ 2685 jme_stop(sc); 2686 2687 /* 2688 * Reset the chip to a known state. 2689 */ 2690 jme_reset(sc); 2691 2692 /* 2693 * Setup MSI/MSI-X vectors to interrupts mapping 2694 */ 2695 jme_set_msinum(sc); 2696 2697 if (JME_ENABLE_HWRSS(sc)) 2698 jme_enable_rss(sc); 2699 else 2700 jme_disable_rss(sc); 2701 2702 /* Init RX descriptors */ 2703 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2704 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]); 2705 if (error) { 2706 if_printf(ifp, "initialization failed: " 2707 "no memory for %dth RX ring.\n", r); 2708 jme_stop(sc); 2709 return; 2710 } 2711 } 2712 2713 /* Init TX descriptors */ 2714 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data); 2715 2716 /* Initialize shadow status block. */ 2717 jme_init_ssb(sc); 2718 2719 /* Reprogram the station address. */ 2720 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2721 CSR_WRITE_4(sc, JME_PAR0, 2722 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2723 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2724 2725 /* 2726 * Configure Tx queue. 2727 * Tx priority queue weight value : 0 2728 * Tx FIFO threshold for processing next packet : 16QW 2729 * Maximum Tx DMA length : 512 2730 * Allow Tx DMA burst. 2731 */ 2732 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2733 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2734 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2735 sc->jme_txcsr |= sc->jme_tx_dma_size; 2736 sc->jme_txcsr |= TXCSR_DMA_BURST; 2737 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2738 2739 /* Set Tx descriptor counter. */ 2740 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt); 2741 2742 /* Set Tx ring address to the hardware. */ 2743 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr; 2744 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2745 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2746 2747 /* Configure TxMAC parameters. */ 2748 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2749 reg |= TXMAC_THRESH_1_PKT; 2750 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2751 CSR_WRITE_4(sc, JME_TXMAC, reg); 2752 2753 /* 2754 * Configure Rx queue. 2755 * FIFO full threshold for transmitting Tx pause packet : 128T 2756 * FIFO threshold for processing next packet : 128QW 2757 * Rx queue 0 select 2758 * Max Rx DMA length : 128 2759 * Rx descriptor retry : 32 2760 * Rx descriptor retry time gap : 256ns 2761 * Don't receive runt/bad frame. 2762 */ 2763 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2764 #if 0 2765 /* 2766 * Since Rx FIFO size is 4K bytes, receiving frames larger 2767 * than 4K bytes will suffer from Rx FIFO overruns. So 2768 * decrease FIFO threshold to reduce the FIFO overruns for 2769 * frames larger than 4000 bytes. 2770 * For best performance of standard MTU sized frames use 2771 * maximum allowable FIFO threshold, 128QW. 2772 */ 2773 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2774 JME_RX_FIFO_SIZE) 2775 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2776 else 2777 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2778 #else 2779 /* Improve PCI Express compatibility */ 2780 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2781 #endif 2782 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2783 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2784 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2785 /* XXX TODO DROP_BAD */ 2786 2787 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2788 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 2789 2790 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2791 2792 /* Set Rx descriptor counter. */ 2793 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt); 2794 2795 /* Set Rx ring address to the hardware. */ 2796 paddr = rdata->jme_rx_ring_paddr; 2797 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2798 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2799 } 2800 2801 /* Clear receive filter. */ 2802 CSR_WRITE_4(sc, JME_RXMAC, 0); 2803 2804 /* Set up the receive filter. */ 2805 jme_set_filter(sc); 2806 jme_set_vlan(sc); 2807 2808 /* 2809 * Disable all WOL bits as WOL can interfere normal Rx 2810 * operation. Also clear WOL detection status bits. 2811 */ 2812 reg = CSR_READ_4(sc, JME_PMCS); 2813 reg &= ~PMCS_WOL_ENB_MASK; 2814 CSR_WRITE_4(sc, JME_PMCS, reg); 2815 2816 /* 2817 * Pad 10bytes right before received frame. This will greatly 2818 * help Rx performance on strict-alignment architectures as 2819 * it does not need to copy the frame to align the payload. 2820 */ 2821 reg = CSR_READ_4(sc, JME_RXMAC); 2822 reg |= RXMAC_PAD_10BYTES; 2823 2824 if (ifp->if_capenable & IFCAP_RXCSUM) 2825 reg |= RXMAC_CSUM_ENB; 2826 CSR_WRITE_4(sc, JME_RXMAC, reg); 2827 2828 /* Configure general purpose reg0 */ 2829 reg = CSR_READ_4(sc, JME_GPREG0); 2830 reg &= ~GPREG0_PCC_UNIT_MASK; 2831 /* Set PCC timer resolution to micro-seconds unit. */ 2832 reg |= GPREG0_PCC_UNIT_US; 2833 /* 2834 * Disable all shadow register posting as we have to read 2835 * JME_INTR_STATUS register in jme_intr. Also it seems 2836 * that it's hard to synchronize interrupt status between 2837 * hardware and software with shadow posting due to 2838 * requirements of bus_dmamap_sync(9). 2839 */ 2840 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2841 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2842 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2843 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2844 /* Disable posting of DW0. */ 2845 reg &= ~GPREG0_POST_DW0_ENB; 2846 /* Clear PME message. */ 2847 reg &= ~GPREG0_PME_ENB; 2848 /* Set PHY address. */ 2849 reg &= ~GPREG0_PHY_ADDR_MASK; 2850 reg |= sc->jme_phyaddr; 2851 CSR_WRITE_4(sc, JME_GPREG0, reg); 2852 2853 /* Configure Tx queue 0 packet completion coalescing. */ 2854 jme_set_tx_coal(sc); 2855 2856 /* Configure Rx queues packet completion coalescing. */ 2857 jme_set_rx_coal(sc); 2858 2859 /* Configure shadow status block but don't enable posting. */ 2860 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2861 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2862 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2863 2864 /* Disable Timer 1 and Timer 2. */ 2865 CSR_WRITE_4(sc, JME_TIMER1, 0); 2866 CSR_WRITE_4(sc, JME_TIMER2, 0); 2867 2868 /* Configure retry transmit period, retry limit value. */ 2869 CSR_WRITE_4(sc, JME_TXTRHD, 2870 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2871 TXTRHD_RT_PERIOD_MASK) | 2872 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2873 TXTRHD_RT_LIMIT_SHIFT)); 2874 2875 #ifdef IFPOLL_ENABLE 2876 if (!(ifp->if_flags & IFF_NPOLLING)) 2877 #endif 2878 /* Initialize the interrupt mask. */ 2879 jme_enable_intr(sc); 2880 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2881 2882 /* 2883 * Enabling Tx/Rx DMA engines and Rx queue processing is 2884 * done after detection of valid link in jme_miibus_statchg. 2885 */ 2886 sc->jme_has_link = FALSE; 2887 2888 jme_phy_init(sc); 2889 2890 /* Set the current media. */ 2891 mii = device_get_softc(sc->jme_miibus); 2892 mii_mediachg(mii); 2893 2894 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 2895 JME_TICK_CPUID); 2896 2897 ifp->if_flags |= IFF_RUNNING; 2898 ifq_clr_oactive(&ifp->if_snd); 2899 } 2900 2901 static void 2902 jme_stop(struct jme_softc *sc) 2903 { 2904 struct ifnet *ifp = &sc->arpcom.ac_if; 2905 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2906 struct jme_txdesc *txd; 2907 struct jme_rxdesc *rxd; 2908 struct jme_rxdata *rdata; 2909 int i, r; 2910 2911 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2912 2913 /* 2914 * Mark the interface down and cancel the watchdog timer. 2915 */ 2916 ifp->if_flags &= ~IFF_RUNNING; 2917 ifq_clr_oactive(&ifp->if_snd); 2918 ifp->if_timer = 0; 2919 2920 callout_stop(&sc->jme_tick_ch); 2921 sc->jme_has_link = FALSE; 2922 2923 /* 2924 * Disable interrupts. 2925 */ 2926 jme_disable_intr(sc); 2927 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2928 2929 /* Disable updating shadow status block. */ 2930 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2931 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2932 2933 /* Stop receiver, transmitter. */ 2934 jme_stop_rx(sc); 2935 jme_stop_tx(sc); 2936 2937 /* 2938 * Free partial finished RX segments 2939 */ 2940 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2941 rdata = &sc->jme_cdata.jme_rx_data[r]; 2942 if (rdata->jme_rxhead != NULL) 2943 m_freem(rdata->jme_rxhead); 2944 JME_RXCHAIN_RESET(rdata); 2945 } 2946 2947 /* 2948 * Free RX and TX mbufs still in the queues. 2949 */ 2950 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2951 rdata = &sc->jme_cdata.jme_rx_data[r]; 2952 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2953 rxd = &rdata->jme_rxdesc[i]; 2954 if (rxd->rx_m != NULL) { 2955 bus_dmamap_unload(rdata->jme_rx_tag, 2956 rxd->rx_dmamap); 2957 m_freem(rxd->rx_m); 2958 rxd->rx_m = NULL; 2959 } 2960 } 2961 } 2962 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 2963 txd = &tdata->jme_txdesc[i]; 2964 if (txd->tx_m != NULL) { 2965 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2966 m_freem(txd->tx_m); 2967 txd->tx_m = NULL; 2968 txd->tx_ndesc = 0; 2969 } 2970 } 2971 } 2972 2973 static void 2974 jme_stop_tx(struct jme_softc *sc) 2975 { 2976 uint32_t reg; 2977 int i; 2978 2979 reg = CSR_READ_4(sc, JME_TXCSR); 2980 if ((reg & TXCSR_TX_ENB) == 0) 2981 return; 2982 reg &= ~TXCSR_TX_ENB; 2983 CSR_WRITE_4(sc, JME_TXCSR, reg); 2984 for (i = JME_TIMEOUT; i > 0; i--) { 2985 DELAY(1); 2986 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2987 break; 2988 } 2989 if (i == 0) 2990 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2991 } 2992 2993 static void 2994 jme_stop_rx(struct jme_softc *sc) 2995 { 2996 uint32_t reg; 2997 int i; 2998 2999 reg = CSR_READ_4(sc, JME_RXCSR); 3000 if ((reg & RXCSR_RX_ENB) == 0) 3001 return; 3002 reg &= ~RXCSR_RX_ENB; 3003 CSR_WRITE_4(sc, JME_RXCSR, reg); 3004 for (i = JME_TIMEOUT; i > 0; i--) { 3005 DELAY(1); 3006 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 3007 break; 3008 } 3009 if (i == 0) 3010 device_printf(sc->jme_dev, "stopping receiver timeout!\n"); 3011 } 3012 3013 static void 3014 jme_init_tx_ring(struct jme_txdata *tdata) 3015 { 3016 struct jme_txdesc *txd; 3017 int i; 3018 3019 tdata->jme_tx_prod = 0; 3020 tdata->jme_tx_cons = 0; 3021 tdata->jme_tx_cnt = 0; 3022 3023 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata)); 3024 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 3025 txd = &tdata->jme_txdesc[i]; 3026 txd->tx_m = NULL; 3027 txd->tx_desc = &tdata->jme_tx_ring[i]; 3028 txd->tx_ndesc = 0; 3029 } 3030 } 3031 3032 static void 3033 jme_init_ssb(struct jme_softc *sc) 3034 { 3035 struct jme_chain_data *cd; 3036 3037 cd = &sc->jme_cdata; 3038 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 3039 } 3040 3041 static int 3042 jme_init_rx_ring(struct jme_rxdata *rdata) 3043 { 3044 struct jme_rxdesc *rxd; 3045 int i; 3046 3047 KKASSERT(rdata->jme_rxhead == NULL && 3048 rdata->jme_rxtail == NULL && 3049 rdata->jme_rxlen == 0); 3050 rdata->jme_rx_cons = 0; 3051 3052 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata)); 3053 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3054 int error; 3055 3056 rxd = &rdata->jme_rxdesc[i]; 3057 rxd->rx_m = NULL; 3058 rxd->rx_desc = &rdata->jme_rx_ring[i]; 3059 error = jme_newbuf(rdata, rxd, 1); 3060 if (error) 3061 return error; 3062 } 3063 return 0; 3064 } 3065 3066 static int 3067 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init) 3068 { 3069 struct mbuf *m; 3070 bus_dma_segment_t segs; 3071 bus_dmamap_t map; 3072 int error, nsegs; 3073 3074 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3075 if (m == NULL) 3076 return ENOBUFS; 3077 /* 3078 * JMC250 has 64bit boundary alignment limitation so jme(4) 3079 * takes advantage of 10 bytes padding feature of hardware 3080 * in order not to copy entire frame to align IP header on 3081 * 32bit boundary. 3082 */ 3083 m->m_len = m->m_pkthdr.len = MCLBYTES; 3084 3085 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 3086 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 3087 BUS_DMA_NOWAIT); 3088 if (error) { 3089 m_freem(m); 3090 if (init) { 3091 if_printf(&rdata->jme_sc->arpcom.ac_if, 3092 "can't load RX mbuf\n"); 3093 } 3094 return error; 3095 } 3096 3097 if (rxd->rx_m != NULL) { 3098 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 3099 BUS_DMASYNC_POSTREAD); 3100 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 3101 } 3102 map = rxd->rx_dmamap; 3103 rxd->rx_dmamap = rdata->jme_rx_sparemap; 3104 rdata->jme_rx_sparemap = map; 3105 rxd->rx_m = m; 3106 rxd->rx_paddr = segs.ds_addr; 3107 3108 jme_setup_rxdesc(rxd); 3109 return 0; 3110 } 3111 3112 static void 3113 jme_set_vlan(struct jme_softc *sc) 3114 { 3115 struct ifnet *ifp = &sc->arpcom.ac_if; 3116 uint32_t reg; 3117 3118 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3119 3120 reg = CSR_READ_4(sc, JME_RXMAC); 3121 reg &= ~RXMAC_VLAN_ENB; 3122 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 3123 reg |= RXMAC_VLAN_ENB; 3124 CSR_WRITE_4(sc, JME_RXMAC, reg); 3125 } 3126 3127 static void 3128 jme_set_filter(struct jme_softc *sc) 3129 { 3130 struct ifnet *ifp = &sc->arpcom.ac_if; 3131 struct ifmultiaddr *ifma; 3132 uint32_t crc; 3133 uint32_t mchash[2]; 3134 uint32_t rxcfg; 3135 3136 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3137 3138 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3139 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3140 RXMAC_ALLMULTI); 3141 3142 /* 3143 * Always accept frames destined to our station address. 3144 * Always accept broadcast frames. 3145 */ 3146 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 3147 3148 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 3149 if (ifp->if_flags & IFF_PROMISC) 3150 rxcfg |= RXMAC_PROMISC; 3151 if (ifp->if_flags & IFF_ALLMULTI) 3152 rxcfg |= RXMAC_ALLMULTI; 3153 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3154 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3155 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3156 return; 3157 } 3158 3159 /* 3160 * Set up the multicast address filter by passing all multicast 3161 * addresses through a CRC generator, and then using the low-order 3162 * 6 bits as an index into the 64 bit multicast hash table. The 3163 * high order bits select the register, while the rest of the bits 3164 * select the bit within the register. 3165 */ 3166 rxcfg |= RXMAC_MULTICAST; 3167 bzero(mchash, sizeof(mchash)); 3168 3169 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3170 if (ifma->ifma_addr->sa_family != AF_LINK) 3171 continue; 3172 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3173 ifma->ifma_addr), ETHER_ADDR_LEN); 3174 3175 /* Just want the 6 least significant bits. */ 3176 crc &= 0x3f; 3177 3178 /* Set the corresponding bit in the hash table. */ 3179 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3180 } 3181 3182 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3183 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3184 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3185 } 3186 3187 static int 3188 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 3189 { 3190 struct jme_softc *sc = arg1; 3191 struct ifnet *ifp = &sc->arpcom.ac_if; 3192 int error, v; 3193 3194 ifnet_serialize_all(ifp); 3195 3196 v = sc->jme_tx_coal_to; 3197 error = sysctl_handle_int(oidp, &v, 0, req); 3198 if (error || req->newptr == NULL) 3199 goto back; 3200 3201 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 3202 error = EINVAL; 3203 goto back; 3204 } 3205 3206 if (v != sc->jme_tx_coal_to) { 3207 sc->jme_tx_coal_to = v; 3208 if (ifp->if_flags & IFF_RUNNING) 3209 jme_set_tx_coal(sc); 3210 } 3211 back: 3212 ifnet_deserialize_all(ifp); 3213 return error; 3214 } 3215 3216 static int 3217 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3218 { 3219 struct jme_softc *sc = arg1; 3220 struct ifnet *ifp = &sc->arpcom.ac_if; 3221 int error, v; 3222 3223 ifnet_serialize_all(ifp); 3224 3225 v = sc->jme_tx_coal_pkt; 3226 error = sysctl_handle_int(oidp, &v, 0, req); 3227 if (error || req->newptr == NULL) 3228 goto back; 3229 3230 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 3231 error = EINVAL; 3232 goto back; 3233 } 3234 3235 if (v != sc->jme_tx_coal_pkt) { 3236 sc->jme_tx_coal_pkt = v; 3237 if (ifp->if_flags & IFF_RUNNING) 3238 jme_set_tx_coal(sc); 3239 } 3240 back: 3241 ifnet_deserialize_all(ifp); 3242 return error; 3243 } 3244 3245 static int 3246 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 3247 { 3248 struct jme_softc *sc = arg1; 3249 struct ifnet *ifp = &sc->arpcom.ac_if; 3250 int error, v; 3251 3252 ifnet_serialize_all(ifp); 3253 3254 v = sc->jme_rx_coal_to; 3255 error = sysctl_handle_int(oidp, &v, 0, req); 3256 if (error || req->newptr == NULL) 3257 goto back; 3258 3259 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 3260 error = EINVAL; 3261 goto back; 3262 } 3263 3264 if (v != sc->jme_rx_coal_to) { 3265 sc->jme_rx_coal_to = v; 3266 if (ifp->if_flags & IFF_RUNNING) 3267 jme_set_rx_coal(sc); 3268 } 3269 back: 3270 ifnet_deserialize_all(ifp); 3271 return error; 3272 } 3273 3274 static int 3275 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3276 { 3277 struct jme_softc *sc = arg1; 3278 struct ifnet *ifp = &sc->arpcom.ac_if; 3279 int error, v; 3280 3281 ifnet_serialize_all(ifp); 3282 3283 v = sc->jme_rx_coal_pkt; 3284 error = sysctl_handle_int(oidp, &v, 0, req); 3285 if (error || req->newptr == NULL) 3286 goto back; 3287 3288 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3289 error = EINVAL; 3290 goto back; 3291 } 3292 3293 if (v != sc->jme_rx_coal_pkt) { 3294 sc->jme_rx_coal_pkt = v; 3295 if (ifp->if_flags & IFF_RUNNING) 3296 jme_set_rx_coal(sc); 3297 } 3298 back: 3299 ifnet_deserialize_all(ifp); 3300 return error; 3301 } 3302 3303 static void 3304 jme_set_tx_coal(struct jme_softc *sc) 3305 { 3306 uint32_t reg; 3307 3308 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3309 PCCTX_COAL_TO_MASK; 3310 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3311 PCCTX_COAL_PKT_MASK; 3312 reg |= PCCTX_COAL_TXQ0; 3313 CSR_WRITE_4(sc, JME_PCCTX, reg); 3314 } 3315 3316 static void 3317 jme_set_rx_coal(struct jme_softc *sc) 3318 { 3319 uint32_t reg; 3320 int r; 3321 3322 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3323 PCCRX_COAL_TO_MASK; 3324 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3325 PCCRX_COAL_PKT_MASK; 3326 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) 3327 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3328 } 3329 3330 #ifdef IFPOLL_ENABLE 3331 3332 static void 3333 jme_npoll_status(struct ifnet *ifp) 3334 { 3335 struct jme_softc *sc = ifp->if_softc; 3336 uint32_t status; 3337 3338 ASSERT_SERIALIZED(&sc->jme_serialize); 3339 3340 status = CSR_READ_4(sc, JME_INTR_STATUS); 3341 if (status & INTR_RXQ_DESC_EMPTY) { 3342 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3343 jme_rx_restart(sc, status); 3344 } 3345 } 3346 3347 static void 3348 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3349 { 3350 struct jme_rxdata *rdata = arg; 3351 3352 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3353 3354 jme_rxeof(rdata, cycle, mycpuid); 3355 } 3356 3357 static void 3358 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3359 { 3360 struct jme_txdata *tdata = arg; 3361 3362 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3363 3364 jme_txeof(tdata); 3365 if (!ifq_is_empty(&ifp->if_snd)) 3366 if_devstart(ifp); 3367 } 3368 3369 static void 3370 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3371 { 3372 struct jme_softc *sc = ifp->if_softc; 3373 3374 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3375 3376 if (info) { 3377 int i, off; 3378 3379 info->ifpi_status.status_func = jme_npoll_status; 3380 info->ifpi_status.serializer = &sc->jme_serialize; 3381 3382 off = sc->jme_npoll_txoff; 3383 KKASSERT(off <= ncpus2); 3384 info->ifpi_tx[off].poll_func = jme_npoll_tx; 3385 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data; 3386 info->ifpi_tx[off].serializer = 3387 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3388 ifq_set_cpuid(&ifp->if_snd, sc->jme_npoll_txoff); 3389 3390 off = sc->jme_npoll_rxoff; 3391 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3392 struct jme_rxdata *rdata = 3393 &sc->jme_cdata.jme_rx_data[i]; 3394 int idx = i + off; 3395 3396 info->ifpi_rx[idx].poll_func = jme_npoll_rx; 3397 info->ifpi_rx[idx].arg = rdata; 3398 info->ifpi_rx[idx].serializer = 3399 &rdata->jme_rx_serialize; 3400 } 3401 3402 if (ifp->if_flags & IFF_RUNNING) 3403 jme_disable_intr(sc); 3404 } else { 3405 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 3406 if (ifp->if_flags & IFF_RUNNING) 3407 jme_enable_intr(sc); 3408 } 3409 } 3410 3411 static int 3412 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3413 { 3414 struct jme_softc *sc = (void *)arg1; 3415 struct ifnet *ifp = &sc->arpcom.ac_if; 3416 int error, off; 3417 3418 off = sc->jme_npoll_rxoff; 3419 error = sysctl_handle_int(oidp, &off, 0, req); 3420 if (error || req->newptr == NULL) 3421 return error; 3422 if (off < 0) 3423 return EINVAL; 3424 3425 ifnet_serialize_all(ifp); 3426 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3427 error = EINVAL; 3428 } else { 3429 error = 0; 3430 sc->jme_npoll_rxoff = off; 3431 } 3432 ifnet_deserialize_all(ifp); 3433 3434 return error; 3435 } 3436 3437 static int 3438 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3439 { 3440 struct jme_softc *sc = (void *)arg1; 3441 struct ifnet *ifp = &sc->arpcom.ac_if; 3442 int error, off; 3443 3444 off = sc->jme_npoll_txoff; 3445 error = sysctl_handle_int(oidp, &off, 0, req); 3446 if (error || req->newptr == NULL) 3447 return error; 3448 if (off < 0) 3449 return EINVAL; 3450 3451 ifnet_serialize_all(ifp); 3452 if (off >= ncpus2) { 3453 error = EINVAL; 3454 } else { 3455 error = 0; 3456 sc->jme_npoll_txoff = off; 3457 } 3458 ifnet_deserialize_all(ifp); 3459 3460 return error; 3461 } 3462 3463 #endif /* IFPOLL_ENABLE */ 3464 3465 static int 3466 jme_rxring_dma_alloc(struct jme_rxdata *rdata) 3467 { 3468 bus_dmamem_t dmem; 3469 int error, asize; 3470 3471 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN); 3472 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag, 3473 JME_RX_RING_ALIGN, 0, 3474 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3475 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3476 if (error) { 3477 device_printf(rdata->jme_sc->jme_dev, 3478 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx); 3479 return error; 3480 } 3481 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3482 rdata->jme_rx_ring_map = dmem.dmem_map; 3483 rdata->jme_rx_ring = dmem.dmem_addr; 3484 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3485 3486 return 0; 3487 } 3488 3489 static int 3490 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr) 3491 { 3492 if ((paddr & 0xffffffff) == 0) { 3493 /* 3494 * Don't allow lower 32bits of the RX buffer's 3495 * physical address to be 0, else it will break 3496 * hardware pending RSS information delivery 3497 * detection on RX path. 3498 */ 3499 return 1; 3500 } 3501 return 0; 3502 } 3503 3504 static int 3505 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata) 3506 { 3507 bus_addr_t lowaddr; 3508 int i, error; 3509 3510 lowaddr = BUS_SPACE_MAXADDR; 3511 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 3512 /* jme_rxbuf_dma_filter will be called */ 3513 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3514 } 3515 3516 /* Create tag for Rx buffers. */ 3517 error = bus_dma_tag_create( 3518 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */ 3519 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3520 lowaddr, /* lowaddr */ 3521 BUS_SPACE_MAXADDR, /* highaddr */ 3522 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */ 3523 MCLBYTES, /* maxsize */ 3524 1, /* nsegments */ 3525 MCLBYTES, /* maxsegsize */ 3526 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3527 &rdata->jme_rx_tag); 3528 if (error) { 3529 device_printf(rdata->jme_sc->jme_dev, 3530 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx); 3531 return error; 3532 } 3533 3534 /* Create DMA maps for Rx buffers. */ 3535 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3536 &rdata->jme_rx_sparemap); 3537 if (error) { 3538 device_printf(rdata->jme_sc->jme_dev, 3539 "could not create %dth spare Rx dmamap.\n", 3540 rdata->jme_rx_idx); 3541 bus_dma_tag_destroy(rdata->jme_rx_tag); 3542 rdata->jme_rx_tag = NULL; 3543 return error; 3544 } 3545 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3546 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3547 3548 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3549 &rxd->rx_dmamap); 3550 if (error) { 3551 int j; 3552 3553 device_printf(rdata->jme_sc->jme_dev, 3554 "could not create %dth Rx dmamap " 3555 "for %dth RX ring.\n", i, rdata->jme_rx_idx); 3556 3557 for (j = 0; j < i; ++j) { 3558 rxd = &rdata->jme_rxdesc[j]; 3559 bus_dmamap_destroy(rdata->jme_rx_tag, 3560 rxd->rx_dmamap); 3561 } 3562 bus_dmamap_destroy(rdata->jme_rx_tag, 3563 rdata->jme_rx_sparemap); 3564 bus_dma_tag_destroy(rdata->jme_rx_tag); 3565 rdata->jme_rx_tag = NULL; 3566 return error; 3567 } 3568 } 3569 return 0; 3570 } 3571 3572 static void 3573 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3574 { 3575 int r, cpuid = mycpuid; 3576 3577 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3578 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3579 3580 if (status & rdata->jme_rx_coal) { 3581 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3582 jme_rxeof(rdata, -1, cpuid); 3583 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3584 } 3585 } 3586 } 3587 3588 static void 3589 jme_enable_rss(struct jme_softc *sc) 3590 { 3591 uint32_t rssc, ind; 3592 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3593 int i; 3594 3595 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 || 3596 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4, 3597 ("%s: invalid # of RX rings (%d)", 3598 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt)); 3599 3600 rssc = RSSC_HASH_64_ENTRY; 3601 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3602 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1; 3603 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3604 CSR_WRITE_4(sc, JME_RSSC, rssc); 3605 3606 toeplitz_get_key(key, sizeof(key)); 3607 for (i = 0; i < RSSKEY_NREGS; ++i) { 3608 uint32_t keyreg; 3609 3610 keyreg = RSSKEY_REGVAL(key, i); 3611 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x, reg 0x%08x\n", 3612 i, keyreg, RSSKEY_REG(RSSKEY_NREGS - 1 - i)); 3613 3614 CSR_WRITE_4(sc, RSSKEY_REG(RSSKEY_NREGS - 1 - i), keyreg); 3615 } 3616 3617 /* 3618 * Create redirect table in following fashion: 3619 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3620 */ 3621 ind = 0; 3622 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3623 int q; 3624 3625 q = i % sc->jme_cdata.jme_rx_ring_cnt; 3626 ind |= q << (i * 8); 3627 } 3628 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3629 3630 for (i = 0; i < RSSTBL_NREGS; ++i) 3631 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3632 } 3633 3634 static void 3635 jme_disable_rss(struct jme_softc *sc) 3636 { 3637 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3638 } 3639 3640 static void 3641 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3642 { 3643 struct jme_softc *sc = ifp->if_softc; 3644 3645 ifnet_serialize_array_enter(sc->jme_serialize_arr, 3646 sc->jme_serialize_cnt, slz); 3647 } 3648 3649 static void 3650 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3651 { 3652 struct jme_softc *sc = ifp->if_softc; 3653 3654 ifnet_serialize_array_exit(sc->jme_serialize_arr, 3655 sc->jme_serialize_cnt, slz); 3656 } 3657 3658 static int 3659 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3660 { 3661 struct jme_softc *sc = ifp->if_softc; 3662 3663 return ifnet_serialize_array_try(sc->jme_serialize_arr, 3664 sc->jme_serialize_cnt, slz); 3665 } 3666 3667 #ifdef INVARIANTS 3668 3669 static void 3670 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3671 boolean_t serialized) 3672 { 3673 struct jme_softc *sc = ifp->if_softc; 3674 3675 ifnet_serialize_array_assert(sc->jme_serialize_arr, 3676 sc->jme_serialize_cnt, slz, serialized); 3677 } 3678 3679 #endif /* INVARIANTS */ 3680 3681 static void 3682 jme_msix_try_alloc(device_t dev) 3683 { 3684 struct jme_softc *sc = device_get_softc(dev); 3685 struct jme_msix_data *msix; 3686 int error, i, r, msix_enable, msix_count; 3687 int offset, offset_def; 3688 3689 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt); 3690 KKASSERT(msix_count <= JME_NMSIX); 3691 3692 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable); 3693 3694 /* 3695 * We leave the 1st MSI-X vector unused, so we 3696 * actually need msix_count + 1 MSI-X vectors. 3697 */ 3698 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3699 return; 3700 3701 for (i = 0; i < msix_count; ++i) 3702 sc->jme_msix[i].jme_msix_rid = -1; 3703 3704 i = 0; 3705 3706 /* 3707 * Setup status MSI-X 3708 */ 3709 3710 msix = &sc->jme_msix[i++]; 3711 msix->jme_msix_cpuid = 0; 3712 msix->jme_msix_arg = sc; 3713 msix->jme_msix_func = jme_msix_status; 3714 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3715 msix->jme_msix_intrs |= 3716 sc->jme_cdata.jme_rx_data[r].jme_rx_empty; 3717 } 3718 msix->jme_msix_serialize = &sc->jme_serialize; 3719 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts", 3720 device_get_nameunit(dev)); 3721 3722 /* 3723 * Setup TX MSI-X 3724 */ 3725 3726 offset_def = device_get_unit(dev) % ncpus2; 3727 offset = device_getenv_int(dev, "msix.txoff", offset_def); 3728 if (offset >= ncpus2) { 3729 device_printf(dev, "invalid msix.txoff %d, use %d\n", 3730 offset, offset_def); 3731 offset = offset_def; 3732 } 3733 3734 msix = &sc->jme_msix[i++]; 3735 msix->jme_msix_cpuid = offset; 3736 sc->jme_tx_cpuid = msix->jme_msix_cpuid; 3737 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data; 3738 msix->jme_msix_func = jme_msix_tx; 3739 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3740 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3741 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3742 device_get_nameunit(dev)); 3743 3744 /* 3745 * Setup RX MSI-X 3746 */ 3747 3748 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 3749 offset = 0; 3750 } else { 3751 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 3752 device_get_unit(dev)) % ncpus2; 3753 3754 offset = device_getenv_int(dev, "msix.rxoff", offset_def); 3755 if (offset >= ncpus2 || 3756 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3757 device_printf(dev, "invalid msix.rxoff %d, use %d\n", 3758 offset, offset_def); 3759 offset = offset_def; 3760 } 3761 } 3762 3763 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3764 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3765 3766 msix = &sc->jme_msix[i++]; 3767 msix->jme_msix_cpuid = r + offset; 3768 KKASSERT(msix->jme_msix_cpuid < ncpus2); 3769 msix->jme_msix_arg = rdata; 3770 msix->jme_msix_func = jme_msix_rx; 3771 msix->jme_msix_intrs = rdata->jme_rx_coal; 3772 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3773 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3774 "%s rx%d", device_get_nameunit(dev), r); 3775 } 3776 3777 KKASSERT(i == msix_count); 3778 3779 error = pci_setup_msix(dev); 3780 if (error) 3781 return; 3782 3783 /* Setup jme_msix_cnt early, so we could cleanup */ 3784 sc->jme_msix_cnt = msix_count; 3785 3786 for (i = 0; i < msix_count; ++i) { 3787 msix = &sc->jme_msix[i]; 3788 3789 msix->jme_msix_vector = i + 1; 3790 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3791 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3792 if (error) 3793 goto back; 3794 3795 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3796 &msix->jme_msix_rid, RF_ACTIVE); 3797 if (msix->jme_msix_res == NULL) { 3798 error = ENOMEM; 3799 goto back; 3800 } 3801 } 3802 3803 for (i = 0; i < JME_INTR_CNT; ++i) { 3804 uint32_t intr_mask = (1 << i); 3805 int x; 3806 3807 if ((JME_INTRS & intr_mask) == 0) 3808 continue; 3809 3810 for (x = 0; x < msix_count; ++x) { 3811 msix = &sc->jme_msix[x]; 3812 if (msix->jme_msix_intrs & intr_mask) { 3813 int reg, shift; 3814 3815 reg = i / JME_MSINUM_FACTOR; 3816 KKASSERT(reg < JME_MSINUM_CNT); 3817 3818 shift = (i % JME_MSINUM_FACTOR) * 4; 3819 3820 sc->jme_msinum[reg] |= 3821 (msix->jme_msix_vector << shift); 3822 3823 break; 3824 } 3825 } 3826 } 3827 3828 if (bootverbose) { 3829 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3830 device_printf(dev, "MSINUM%d: %#x\n", i, 3831 sc->jme_msinum[i]); 3832 } 3833 } 3834 3835 pci_enable_msix(dev); 3836 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3837 3838 back: 3839 if (error) 3840 jme_msix_free(dev); 3841 } 3842 3843 static int 3844 jme_intr_alloc(device_t dev) 3845 { 3846 struct jme_softc *sc = device_get_softc(dev); 3847 u_int irq_flags; 3848 3849 jme_msix_try_alloc(dev); 3850 3851 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3852 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3853 &sc->jme_irq_rid, &irq_flags); 3854 3855 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3856 &sc->jme_irq_rid, irq_flags); 3857 if (sc->jme_irq_res == NULL) { 3858 device_printf(dev, "can't allocate irq\n"); 3859 return ENXIO; 3860 } 3861 sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res); 3862 } 3863 return 0; 3864 } 3865 3866 static void 3867 jme_msix_free(device_t dev) 3868 { 3869 struct jme_softc *sc = device_get_softc(dev); 3870 int i; 3871 3872 KKASSERT(sc->jme_msix_cnt > 1); 3873 3874 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3875 struct jme_msix_data *msix = &sc->jme_msix[i]; 3876 3877 if (msix->jme_msix_res != NULL) { 3878 bus_release_resource(dev, SYS_RES_IRQ, 3879 msix->jme_msix_rid, msix->jme_msix_res); 3880 msix->jme_msix_res = NULL; 3881 } 3882 if (msix->jme_msix_rid >= 0) { 3883 pci_release_msix_vector(dev, msix->jme_msix_rid); 3884 msix->jme_msix_rid = -1; 3885 } 3886 } 3887 pci_teardown_msix(dev); 3888 } 3889 3890 static void 3891 jme_intr_free(device_t dev) 3892 { 3893 struct jme_softc *sc = device_get_softc(dev); 3894 3895 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3896 if (sc->jme_irq_res != NULL) { 3897 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3898 sc->jme_irq_res); 3899 } 3900 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3901 pci_release_msi(dev); 3902 } else { 3903 jme_msix_free(dev); 3904 } 3905 } 3906 3907 static void 3908 jme_msix_tx(void *xtdata) 3909 { 3910 struct jme_txdata *tdata = xtdata; 3911 struct jme_softc *sc = tdata->jme_sc; 3912 struct ifnet *ifp = &sc->arpcom.ac_if; 3913 3914 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3915 3916 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3917 3918 CSR_WRITE_4(sc, JME_INTR_STATUS, 3919 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3920 3921 if (ifp->if_flags & IFF_RUNNING) { 3922 jme_txeof(tdata); 3923 if (!ifq_is_empty(&ifp->if_snd)) 3924 if_devstart(ifp); 3925 } 3926 3927 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3928 } 3929 3930 static void 3931 jme_msix_rx(void *xrdata) 3932 { 3933 struct jme_rxdata *rdata = xrdata; 3934 struct jme_softc *sc = rdata->jme_sc; 3935 struct ifnet *ifp = &sc->arpcom.ac_if; 3936 3937 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3938 3939 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal); 3940 3941 CSR_WRITE_4(sc, JME_INTR_STATUS, 3942 rdata->jme_rx_coal | rdata->jme_rx_comp); 3943 3944 if (ifp->if_flags & IFF_RUNNING) 3945 jme_rxeof(rdata, -1, mycpuid); 3946 3947 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal); 3948 } 3949 3950 static void 3951 jme_msix_status(void *xsc) 3952 { 3953 struct jme_softc *sc = xsc; 3954 struct ifnet *ifp = &sc->arpcom.ac_if; 3955 uint32_t status; 3956 3957 ASSERT_SERIALIZED(&sc->jme_serialize); 3958 3959 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY); 3960 3961 status = CSR_READ_4(sc, JME_INTR_STATUS); 3962 3963 if (status & INTR_RXQ_DESC_EMPTY) { 3964 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3965 if (ifp->if_flags & IFF_RUNNING) 3966 jme_rx_restart(sc, status); 3967 } 3968 3969 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY); 3970 } 3971 3972 static void 3973 jme_rx_restart(struct jme_softc *sc, uint32_t status) 3974 { 3975 int i, cpuid = mycpuid; 3976 3977 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3978 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 3979 3980 if (status & rdata->jme_rx_empty) { 3981 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3982 jme_rxeof(rdata, -1, cpuid); 3983 #ifdef JME_RSS_DEBUG 3984 rdata->jme_rx_emp++; 3985 #endif 3986 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3987 } 3988 } 3989 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 3990 RXCSR_RXQ_START); 3991 } 3992 3993 static void 3994 jme_set_msinum(struct jme_softc *sc) 3995 { 3996 int i; 3997 3998 for (i = 0; i < JME_MSINUM_CNT; ++i) 3999 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 4000 } 4001 4002 static int 4003 jme_intr_setup(device_t dev) 4004 { 4005 struct jme_softc *sc = device_get_softc(dev); 4006 int error; 4007 4008 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 4009 return jme_msix_setup(dev); 4010 4011 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 4012 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 4013 if (error) { 4014 device_printf(dev, "could not set up interrupt handler.\n"); 4015 return error; 4016 } 4017 4018 return 0; 4019 } 4020 4021 static void 4022 jme_intr_teardown(device_t dev) 4023 { 4024 struct jme_softc *sc = device_get_softc(dev); 4025 4026 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 4027 jme_msix_teardown(dev, sc->jme_msix_cnt); 4028 else 4029 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 4030 } 4031 4032 static int 4033 jme_msix_setup(device_t dev) 4034 { 4035 struct jme_softc *sc = device_get_softc(dev); 4036 int x; 4037 4038 for (x = 0; x < sc->jme_msix_cnt; ++x) { 4039 struct jme_msix_data *msix = &sc->jme_msix[x]; 4040 int error; 4041 4042 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 4043 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 4044 &msix->jme_msix_handle, msix->jme_msix_serialize, 4045 msix->jme_msix_desc); 4046 if (error) { 4047 device_printf(dev, "could not set up %s " 4048 "interrupt handler.\n", msix->jme_msix_desc); 4049 jme_msix_teardown(dev, x); 4050 return error; 4051 } 4052 } 4053 return 0; 4054 } 4055 4056 static void 4057 jme_msix_teardown(device_t dev, int msix_count) 4058 { 4059 struct jme_softc *sc = device_get_softc(dev); 4060 int x; 4061 4062 for (x = 0; x < msix_count; ++x) { 4063 struct jme_msix_data *msix = &sc->jme_msix[x]; 4064 4065 bus_teardown_intr(dev, msix->jme_msix_res, 4066 msix->jme_msix_handle); 4067 } 4068 } 4069 4070 static void 4071 jme_serialize_skipmain(struct jme_softc *sc) 4072 { 4073 lwkt_serialize_array_enter(sc->jme_serialize_arr, 4074 sc->jme_serialize_cnt, 1); 4075 } 4076 4077 static void 4078 jme_deserialize_skipmain(struct jme_softc *sc) 4079 { 4080 lwkt_serialize_array_exit(sc->jme_serialize_arr, 4081 sc->jme_serialize_cnt, 1); 4082 } 4083 4084 static void 4085 jme_enable_intr(struct jme_softc *sc) 4086 { 4087 int i; 4088 4089 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4090 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]); 4091 4092 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 4093 } 4094 4095 static void 4096 jme_disable_intr(struct jme_softc *sc) 4097 { 4098 int i; 4099 4100 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 4101 4102 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4103 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]); 4104 } 4105 4106 static void 4107 jme_phy_poweron(struct jme_softc *sc) 4108 { 4109 uint16_t bmcr; 4110 4111 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR); 4112 bmcr &= ~BMCR_PDOWN; 4113 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr); 4114 4115 if (sc->jme_caps & JME_CAP_PHYPWR) { 4116 uint32_t val; 4117 4118 val = CSR_READ_4(sc, JME_PHYPWR); 4119 val &= ~(PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW | 4120 PHYPWR_DOWN2 | PHYPWR_CLKSEL); 4121 CSR_WRITE_4(sc, JME_PHYPWR, val); 4122 4123 val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 4124 val &= ~PE1_GPREG0_PHYBG; 4125 val |= PE1_GPREG0_ENBG; 4126 pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4); 4127 } 4128 } 4129 4130 static void 4131 jme_phy_poweroff(struct jme_softc *sc) 4132 { 4133 uint16_t bmcr; 4134 4135 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR); 4136 bmcr |= BMCR_PDOWN; 4137 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr); 4138 4139 if (sc->jme_caps & JME_CAP_PHYPWR) { 4140 uint32_t val; 4141 4142 val = CSR_READ_4(sc, JME_PHYPWR); 4143 val |= PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW | 4144 PHYPWR_DOWN2 | PHYPWR_CLKSEL; 4145 CSR_WRITE_4(sc, JME_PHYPWR, val); 4146 4147 val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 4148 val &= ~PE1_GPREG0_PHYBG; 4149 val |= PE1_GPREG0_PDD3COLD; 4150 pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4); 4151 } 4152 } 4153 4154 static int 4155 jme_miiext_read(struct jme_softc *sc, int reg) 4156 { 4157 int addr; 4158 4159 addr = JME_MII_EXT_ADDR_RD | reg; 4160 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 4161 JME_MII_EXT_ADDR, addr); 4162 return jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, 4163 JME_MII_EXT_DATA); 4164 } 4165 4166 static void 4167 jme_miiext_write(struct jme_softc *sc, int reg, int val) 4168 { 4169 int addr; 4170 4171 addr = JME_MII_EXT_ADDR_WR | reg; 4172 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 4173 JME_MII_EXT_DATA, val); 4174 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 4175 JME_MII_EXT_ADDR, addr); 4176 } 4177 4178 static void 4179 jme_phy_init(struct jme_softc *sc) 4180 { 4181 uint16_t gtcr; 4182 int val; 4183 4184 jme_phy_poweroff(sc); 4185 jme_phy_poweron(sc); 4186 4187 /* Enable PHY test 1 */ 4188 gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR); 4189 gtcr &= ~GTCR_TEST_MASK; 4190 gtcr |= GTCR_TEST_1; 4191 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr); 4192 4193 val = jme_miiext_read(sc, JME_MII_EXT_COM2); 4194 val &= ~JME_MII_EXT_COM2_CALIB_MODE0; 4195 val |= JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN; 4196 jme_miiext_write(sc, JME_MII_EXT_COM2, val); 4197 4198 DELAY(20000); 4199 4200 val = jme_miiext_read(sc, JME_MII_EXT_COM2); 4201 val &= ~(JME_MII_EXT_COM2_CALIB_MODE0 | 4202 JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN); 4203 jme_miiext_write(sc, JME_MII_EXT_COM2, val); 4204 4205 /* Disable PHY test */ 4206 gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR); 4207 gtcr &= ~GTCR_TEST_MASK; 4208 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr); 4209 4210 if (sc->jme_phycom0 != 0) 4211 jme_miiext_write(sc, JME_MII_EXT_COM0, sc->jme_phycom0); 4212 if (sc->jme_phycom1 != 0) 4213 jme_miiext_write(sc, JME_MII_EXT_COM1, sc->jme_phycom1); 4214 } 4215