1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 #include "opt_jme.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/serialize2.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_poll.h> 54 #include <net/ifq_var.h> 55 #include <net/toeplitz.h> 56 #include <net/toeplitz2.h> 57 #include <net/vlan/if_vlan_var.h> 58 #include <net/vlan/if_vlan_ether.h> 59 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 63 #include <dev/netif/mii_layer/miivar.h> 64 #include <dev/netif/mii_layer/jmphyreg.h> 65 66 #include <bus/pci/pcireg.h> 67 #include <bus/pci/pcivar.h> 68 #include <bus/pci/pcidevs.h> 69 70 #include <dev/netif/jme/if_jmereg.h> 71 #include <dev/netif/jme/if_jmevar.h> 72 73 #include "miibus_if.h" 74 75 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */ 76 77 #define JME_TX_SERIALIZE 1 78 #define JME_RX_SERIALIZE 2 79 80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 81 82 #ifdef JME_RSS_DEBUG 83 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 84 do { \ 85 if ((sc)->jme_rss_debug >= (lvl)) \ 86 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 87 } while (0) 88 #else /* !JME_RSS_DEBUG */ 89 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 90 #endif /* JME_RSS_DEBUG */ 91 92 static int jme_probe(device_t); 93 static int jme_attach(device_t); 94 static int jme_detach(device_t); 95 static int jme_shutdown(device_t); 96 static int jme_suspend(device_t); 97 static int jme_resume(device_t); 98 99 static int jme_miibus_readreg(device_t, int, int); 100 static int jme_miibus_writereg(device_t, int, int, int); 101 static void jme_miibus_statchg(device_t); 102 103 static void jme_init(void *); 104 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 105 static void jme_start(struct ifnet *, struct ifaltq_subque *); 106 static void jme_watchdog(struct ifnet *); 107 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 108 static int jme_mediachange(struct ifnet *); 109 #ifdef IFPOLL_ENABLE 110 static void jme_npoll(struct ifnet *, struct ifpoll_info *); 111 static void jme_npoll_status(struct ifnet *); 112 static void jme_npoll_rx(struct ifnet *, void *, int); 113 static void jme_npoll_tx(struct ifnet *, void *, int); 114 #endif 115 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 116 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 117 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 118 #ifdef INVARIANTS 119 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 120 boolean_t); 121 #endif 122 123 static void jme_intr(void *); 124 static void jme_msix_tx(void *); 125 static void jme_msix_rx(void *); 126 static void jme_msix_status(void *); 127 static void jme_txeof(struct jme_txdata *); 128 static void jme_rxeof(struct jme_rxdata *, int); 129 static void jme_rx_intr(struct jme_softc *, uint32_t); 130 static void jme_enable_intr(struct jme_softc *); 131 static void jme_disable_intr(struct jme_softc *); 132 static void jme_rx_restart(struct jme_softc *, uint32_t); 133 134 static int jme_msix_setup(device_t); 135 static void jme_msix_teardown(device_t, int); 136 static int jme_intr_setup(device_t); 137 static void jme_intr_teardown(device_t); 138 static void jme_msix_try_alloc(device_t); 139 static void jme_msix_free(device_t); 140 static int jme_intr_alloc(device_t); 141 static void jme_intr_free(device_t); 142 static int jme_dma_alloc(struct jme_softc *); 143 static void jme_dma_free(struct jme_softc *); 144 static int jme_init_rx_ring(struct jme_rxdata *); 145 static void jme_init_tx_ring(struct jme_txdata *); 146 static void jme_init_ssb(struct jme_softc *); 147 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int); 148 static int jme_encap(struct jme_txdata *, struct mbuf **, int *); 149 static void jme_rxpkt(struct jme_rxdata *); 150 static int jme_rxring_dma_alloc(struct jme_rxdata *); 151 static int jme_rxbuf_dma_alloc(struct jme_rxdata *); 152 static int jme_rxbuf_dma_filter(void *, bus_addr_t); 153 154 static void jme_tick(void *); 155 static void jme_stop(struct jme_softc *); 156 static void jme_reset(struct jme_softc *); 157 static void jme_set_msinum(struct jme_softc *); 158 static void jme_set_vlan(struct jme_softc *); 159 static void jme_set_filter(struct jme_softc *); 160 static void jme_stop_tx(struct jme_softc *); 161 static void jme_stop_rx(struct jme_softc *); 162 static void jme_mac_config(struct jme_softc *); 163 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 164 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 165 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 166 #ifdef notyet 167 static void jme_setwol(struct jme_softc *); 168 static void jme_setlinkspeed(struct jme_softc *); 169 #endif 170 static void jme_set_tx_coal(struct jme_softc *); 171 static void jme_set_rx_coal(struct jme_softc *); 172 static void jme_enable_rss(struct jme_softc *); 173 static void jme_disable_rss(struct jme_softc *); 174 static void jme_serialize_skipmain(struct jme_softc *); 175 static void jme_deserialize_skipmain(struct jme_softc *); 176 177 static void jme_sysctl_node(struct jme_softc *); 178 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 179 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 180 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 181 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 182 #ifdef IFPOLL_ENABLE 183 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 184 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 185 #endif 186 187 /* 188 * Devices supported by this driver. 189 */ 190 static const struct jme_dev { 191 uint16_t jme_vendorid; 192 uint16_t jme_deviceid; 193 uint32_t jme_caps; 194 const char *jme_name; 195 } jme_devs[] = { 196 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 197 JME_CAP_JUMBO, 198 "JMicron Inc, JMC250 Gigabit Ethernet" }, 199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 200 JME_CAP_FASTETH, 201 "JMicron Inc, JMC260 Fast Ethernet" }, 202 { 0, 0, 0, NULL } 203 }; 204 205 static device_method_t jme_methods[] = { 206 /* Device interface. */ 207 DEVMETHOD(device_probe, jme_probe), 208 DEVMETHOD(device_attach, jme_attach), 209 DEVMETHOD(device_detach, jme_detach), 210 DEVMETHOD(device_shutdown, jme_shutdown), 211 DEVMETHOD(device_suspend, jme_suspend), 212 DEVMETHOD(device_resume, jme_resume), 213 214 /* Bus interface. */ 215 DEVMETHOD(bus_print_child, bus_generic_print_child), 216 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 217 218 /* MII interface. */ 219 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 220 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 221 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 222 223 { NULL, NULL } 224 }; 225 226 static driver_t jme_driver = { 227 "jme", 228 jme_methods, 229 sizeof(struct jme_softc) 230 }; 231 232 static devclass_t jme_devclass; 233 234 DECLARE_DUMMY_MODULE(if_jme); 235 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 236 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 237 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 238 239 static const struct { 240 uint32_t jme_coal; 241 uint32_t jme_comp; 242 uint32_t jme_empty; 243 } jme_rx_status[JME_NRXRING_MAX] = { 244 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 245 INTR_RXQ0_DESC_EMPTY }, 246 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 247 INTR_RXQ1_DESC_EMPTY }, 248 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 249 INTR_RXQ2_DESC_EMPTY }, 250 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 251 INTR_RXQ3_DESC_EMPTY } 252 }; 253 254 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 255 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 256 static int jme_rx_ring_count = 0; 257 static int jme_msi_enable = 1; 258 static int jme_msix_enable = 1; 259 260 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 261 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 262 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 263 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 264 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 265 266 static __inline void 267 jme_setup_rxdesc(struct jme_rxdesc *rxd) 268 { 269 struct jme_desc *desc; 270 271 desc = rxd->rx_desc; 272 desc->buflen = htole32(MCLBYTES); 273 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr)); 274 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr)); 275 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 276 } 277 278 /* 279 * Read a PHY register on the MII of the JMC250. 280 */ 281 static int 282 jme_miibus_readreg(device_t dev, int phy, int reg) 283 { 284 struct jme_softc *sc = device_get_softc(dev); 285 uint32_t val; 286 int i; 287 288 /* For FPGA version, PHY address 0 should be ignored. */ 289 if (sc->jme_caps & JME_CAP_FPGA) { 290 if (phy == 0) 291 return (0); 292 } else { 293 if (sc->jme_phyaddr != phy) 294 return (0); 295 } 296 297 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 298 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 299 300 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 301 DELAY(1); 302 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 303 break; 304 } 305 if (i == 0) { 306 device_printf(sc->jme_dev, "phy read timeout: " 307 "phy %d, reg %d\n", phy, reg); 308 return (0); 309 } 310 311 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 312 } 313 314 /* 315 * Write a PHY register on the MII of the JMC250. 316 */ 317 static int 318 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 319 { 320 struct jme_softc *sc = device_get_softc(dev); 321 int i; 322 323 /* For FPGA version, PHY address 0 should be ignored. */ 324 if (sc->jme_caps & JME_CAP_FPGA) { 325 if (phy == 0) 326 return (0); 327 } else { 328 if (sc->jme_phyaddr != phy) 329 return (0); 330 } 331 332 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 333 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 334 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 335 336 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 337 DELAY(1); 338 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 339 break; 340 } 341 if (i == 0) { 342 device_printf(sc->jme_dev, "phy write timeout: " 343 "phy %d, reg %d\n", phy, reg); 344 } 345 346 return (0); 347 } 348 349 /* 350 * Callback from MII layer when media changes. 351 */ 352 static void 353 jme_miibus_statchg(device_t dev) 354 { 355 struct jme_softc *sc = device_get_softc(dev); 356 struct ifnet *ifp = &sc->arpcom.ac_if; 357 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 358 struct mii_data *mii; 359 struct jme_txdesc *txd; 360 bus_addr_t paddr; 361 int i, r; 362 363 if (sc->jme_in_tick) 364 jme_serialize_skipmain(sc); 365 ASSERT_IFNET_SERIALIZED_ALL(ifp); 366 367 if ((ifp->if_flags & IFF_RUNNING) == 0) 368 goto done; 369 370 mii = device_get_softc(sc->jme_miibus); 371 372 sc->jme_has_link = FALSE; 373 if ((mii->mii_media_status & IFM_AVALID) != 0) { 374 switch (IFM_SUBTYPE(mii->mii_media_active)) { 375 case IFM_10_T: 376 case IFM_100_TX: 377 sc->jme_has_link = TRUE; 378 break; 379 case IFM_1000_T: 380 if (sc->jme_caps & JME_CAP_FASTETH) 381 break; 382 sc->jme_has_link = TRUE; 383 break; 384 default: 385 break; 386 } 387 } 388 389 /* 390 * Disabling Rx/Tx MACs have a side-effect of resetting 391 * JME_TXNDA/JME_RXNDA register to the first address of 392 * Tx/Rx descriptor address. So driver should reset its 393 * internal procucer/consumer pointer and reclaim any 394 * allocated resources. Note, just saving the value of 395 * JME_TXNDA and JME_RXNDA registers before stopping MAC 396 * and restoring JME_TXNDA/JME_RXNDA register is not 397 * sufficient to make sure correct MAC state because 398 * stopping MAC operation can take a while and hardware 399 * might have updated JME_TXNDA/JME_RXNDA registers 400 * during the stop operation. 401 */ 402 403 /* Disable interrupts */ 404 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 405 406 /* Stop driver */ 407 ifp->if_flags &= ~IFF_RUNNING; 408 ifq_clr_oactive(&ifp->if_snd); 409 ifp->if_timer = 0; 410 callout_stop(&sc->jme_tick_ch); 411 412 /* Stop receiver/transmitter. */ 413 jme_stop_rx(sc); 414 jme_stop_tx(sc); 415 416 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 417 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 418 419 jme_rxeof(rdata, -1); 420 if (rdata->jme_rxhead != NULL) 421 m_freem(rdata->jme_rxhead); 422 JME_RXCHAIN_RESET(rdata); 423 424 /* 425 * Reuse configured Rx descriptors and reset 426 * procuder/consumer index. 427 */ 428 rdata->jme_rx_cons = 0; 429 } 430 if (JME_ENABLE_HWRSS(sc)) 431 jme_enable_rss(sc); 432 else 433 jme_disable_rss(sc); 434 435 jme_txeof(tdata); 436 if (tdata->jme_tx_cnt != 0) { 437 /* Remove queued packets for transmit. */ 438 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 439 txd = &tdata->jme_txdesc[i]; 440 if (txd->tx_m != NULL) { 441 bus_dmamap_unload( tdata->jme_tx_tag, 442 txd->tx_dmamap); 443 m_freem(txd->tx_m); 444 txd->tx_m = NULL; 445 txd->tx_ndesc = 0; 446 ifp->if_oerrors++; 447 } 448 } 449 } 450 jme_init_tx_ring(tdata); 451 452 /* Initialize shadow status block. */ 453 jme_init_ssb(sc); 454 455 /* Program MAC with resolved speed/duplex/flow-control. */ 456 if (sc->jme_has_link) { 457 jme_mac_config(sc); 458 459 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 460 461 /* Set Tx ring address to the hardware. */ 462 paddr = tdata->jme_tx_ring_paddr; 463 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 464 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 465 466 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 467 CSR_WRITE_4(sc, JME_RXCSR, 468 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 469 470 /* Set Rx ring address to the hardware. */ 471 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 472 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 473 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 474 } 475 476 /* Restart receiver/transmitter. */ 477 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 478 RXCSR_RXQ_START); 479 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 480 } 481 482 ifp->if_flags |= IFF_RUNNING; 483 ifq_clr_oactive(&ifp->if_snd); 484 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 485 JME_TICK_CPUID); 486 487 #ifdef IFPOLL_ENABLE 488 if (!(ifp->if_flags & IFF_NPOLLING)) 489 #endif 490 /* Reenable interrupts. */ 491 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 492 493 done: 494 if (sc->jme_in_tick) 495 jme_deserialize_skipmain(sc); 496 } 497 498 /* 499 * Get the current interface media status. 500 */ 501 static void 502 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 503 { 504 struct jme_softc *sc = ifp->if_softc; 505 struct mii_data *mii = device_get_softc(sc->jme_miibus); 506 507 ASSERT_IFNET_SERIALIZED_ALL(ifp); 508 509 mii_pollstat(mii); 510 ifmr->ifm_status = mii->mii_media_status; 511 ifmr->ifm_active = mii->mii_media_active; 512 } 513 514 /* 515 * Set hardware to newly-selected media. 516 */ 517 static int 518 jme_mediachange(struct ifnet *ifp) 519 { 520 struct jme_softc *sc = ifp->if_softc; 521 struct mii_data *mii = device_get_softc(sc->jme_miibus); 522 int error; 523 524 ASSERT_IFNET_SERIALIZED_ALL(ifp); 525 526 if (mii->mii_instance != 0) { 527 struct mii_softc *miisc; 528 529 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 530 mii_phy_reset(miisc); 531 } 532 error = mii_mediachg(mii); 533 534 return (error); 535 } 536 537 static int 538 jme_probe(device_t dev) 539 { 540 const struct jme_dev *sp; 541 uint16_t vid, did; 542 543 vid = pci_get_vendor(dev); 544 did = pci_get_device(dev); 545 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 546 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 547 struct jme_softc *sc = device_get_softc(dev); 548 549 sc->jme_caps = sp->jme_caps; 550 device_set_desc(dev, sp->jme_name); 551 return (0); 552 } 553 } 554 return (ENXIO); 555 } 556 557 static int 558 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 559 { 560 uint32_t reg; 561 int i; 562 563 *val = 0; 564 for (i = JME_TIMEOUT; i > 0; i--) { 565 reg = CSR_READ_4(sc, JME_SMBCSR); 566 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 567 break; 568 DELAY(1); 569 } 570 571 if (i == 0) { 572 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 573 return (ETIMEDOUT); 574 } 575 576 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 577 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 578 for (i = JME_TIMEOUT; i > 0; i--) { 579 DELAY(1); 580 reg = CSR_READ_4(sc, JME_SMBINTF); 581 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 582 break; 583 } 584 585 if (i == 0) { 586 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 587 return (ETIMEDOUT); 588 } 589 590 reg = CSR_READ_4(sc, JME_SMBINTF); 591 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 592 593 return (0); 594 } 595 596 static int 597 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 598 { 599 uint8_t fup, reg, val; 600 uint32_t offset; 601 int match; 602 603 offset = 0; 604 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 605 fup != JME_EEPROM_SIG0) 606 return (ENOENT); 607 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 608 fup != JME_EEPROM_SIG1) 609 return (ENOENT); 610 match = 0; 611 do { 612 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 613 break; 614 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 615 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 616 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 617 break; 618 if (reg >= JME_PAR0 && 619 reg < JME_PAR0 + ETHER_ADDR_LEN) { 620 if (jme_eeprom_read_byte(sc, offset + 2, 621 &val) != 0) 622 break; 623 eaddr[reg - JME_PAR0] = val; 624 match++; 625 } 626 } 627 /* Check for the end of EEPROM descriptor. */ 628 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 629 break; 630 /* Try next eeprom descriptor. */ 631 offset += JME_EEPROM_DESC_BYTES; 632 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 633 634 if (match == ETHER_ADDR_LEN) 635 return (0); 636 637 return (ENOENT); 638 } 639 640 static void 641 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 642 { 643 uint32_t par0, par1; 644 645 /* Read station address. */ 646 par0 = CSR_READ_4(sc, JME_PAR0); 647 par1 = CSR_READ_4(sc, JME_PAR1); 648 par1 &= 0xFFFF; 649 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 650 device_printf(sc->jme_dev, 651 "generating fake ethernet address.\n"); 652 par0 = karc4random(); 653 /* Set OUI to JMicron. */ 654 eaddr[0] = 0x00; 655 eaddr[1] = 0x1B; 656 eaddr[2] = 0x8C; 657 eaddr[3] = (par0 >> 16) & 0xff; 658 eaddr[4] = (par0 >> 8) & 0xff; 659 eaddr[5] = par0 & 0xff; 660 } else { 661 eaddr[0] = (par0 >> 0) & 0xFF; 662 eaddr[1] = (par0 >> 8) & 0xFF; 663 eaddr[2] = (par0 >> 16) & 0xFF; 664 eaddr[3] = (par0 >> 24) & 0xFF; 665 eaddr[4] = (par1 >> 0) & 0xFF; 666 eaddr[5] = (par1 >> 8) & 0xFF; 667 } 668 } 669 670 static int 671 jme_attach(device_t dev) 672 { 673 struct jme_softc *sc = device_get_softc(dev); 674 struct ifnet *ifp = &sc->arpcom.ac_if; 675 uint32_t reg; 676 uint16_t did; 677 uint8_t pcie_ptr, rev; 678 int error = 0, i, j, rx_desc_cnt, coal_max; 679 uint8_t eaddr[ETHER_ADDR_LEN]; 680 #ifdef IFPOLL_ENABLE 681 int offset, offset_def; 682 #endif 683 684 /* 685 * Initialize serializers 686 */ 687 lwkt_serialize_init(&sc->jme_serialize); 688 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize); 689 for (i = 0; i < JME_NRXRING_MAX; ++i) { 690 lwkt_serialize_init( 691 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 692 } 693 694 /* 695 * Get # of RX ring descriptors 696 */ 697 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count", 698 jme_rx_desc_count); 699 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN); 700 if (rx_desc_cnt > JME_NDESC_MAX) 701 rx_desc_cnt = JME_NDESC_MAX; 702 703 /* 704 * Get # of TX ring descriptors 705 */ 706 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 707 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count); 708 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 709 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN); 710 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX) 711 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX; 712 713 /* 714 * Get # of RX rings 715 */ 716 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count", 717 jme_rx_ring_count); 718 sc->jme_cdata.jme_rx_ring_cnt = 719 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX); 720 721 /* 722 * Initialize serializer array 723 */ 724 i = 0; 725 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 726 727 KKASSERT(i == JME_TX_SERIALIZE); 728 sc->jme_serialize_arr[i++] = 729 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 730 731 KKASSERT(i == JME_RX_SERIALIZE); 732 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) { 733 sc->jme_serialize_arr[i++] = 734 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 735 } 736 KKASSERT(i <= JME_NSERIALIZE); 737 sc->jme_serialize_cnt = i; 738 739 /* 740 * Setup TX ring specific data 741 */ 742 sc->jme_cdata.jme_tx_data.jme_sc = sc; 743 744 /* 745 * Setup RX rings specific data 746 */ 747 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 748 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 749 750 rdata->jme_sc = sc; 751 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 752 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 753 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 754 rdata->jme_rx_idx = i; 755 rdata->jme_rx_desc_cnt = rx_desc_cnt; 756 } 757 758 sc->jme_dev = dev; 759 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 760 761 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 762 763 callout_init(&sc->jme_tick_ch); 764 765 #ifndef BURN_BRIDGES 766 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 767 uint32_t irq, mem; 768 769 irq = pci_read_config(dev, PCIR_INTLINE, 4); 770 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 771 772 device_printf(dev, "chip is in D%d power mode " 773 "-- setting to D0\n", pci_get_powerstate(dev)); 774 775 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 776 777 pci_write_config(dev, PCIR_INTLINE, irq, 4); 778 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 779 } 780 #endif /* !BURN_BRIDGE */ 781 782 /* Enable bus mastering */ 783 pci_enable_busmaster(dev); 784 785 /* 786 * Allocate IO memory 787 * 788 * JMC250 supports both memory mapped and I/O register space 789 * access. Because I/O register access should use different 790 * BARs to access registers it's waste of time to use I/O 791 * register spce access. JMC250 uses 16K to map entire memory 792 * space. 793 */ 794 sc->jme_mem_rid = JME_PCIR_BAR; 795 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 796 &sc->jme_mem_rid, RF_ACTIVE); 797 if (sc->jme_mem_res == NULL) { 798 device_printf(dev, "can't allocate IO memory\n"); 799 return ENXIO; 800 } 801 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 802 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 803 804 /* 805 * Allocate IRQ 806 */ 807 error = jme_intr_alloc(dev); 808 if (error) 809 goto fail; 810 811 /* 812 * Extract revisions 813 */ 814 reg = CSR_READ_4(sc, JME_CHIPMODE); 815 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 816 CHIPMODE_NOT_FPGA) { 817 sc->jme_caps |= JME_CAP_FPGA; 818 if (bootverbose) { 819 device_printf(dev, "FPGA revision: 0x%04x\n", 820 (reg & CHIPMODE_FPGA_REV_MASK) >> 821 CHIPMODE_FPGA_REV_SHIFT); 822 } 823 } 824 825 /* NOTE: FM revision is put in the upper 4 bits */ 826 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 827 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 828 if (bootverbose) 829 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 830 831 did = pci_get_device(dev); 832 switch (did) { 833 case PCI_PRODUCT_JMICRON_JMC250: 834 if (rev == JME_REV1_A2) 835 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 836 break; 837 838 case PCI_PRODUCT_JMICRON_JMC260: 839 if (rev == JME_REV2) 840 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 841 break; 842 843 default: 844 panic("unknown device id 0x%04x", did); 845 } 846 if (rev >= JME_REV2) { 847 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 848 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 849 GHC_TXMAC_CLKSRC_1000; 850 } 851 852 /* Reset the ethernet controller. */ 853 jme_reset(sc); 854 855 /* Map MSI/MSI-X vectors */ 856 jme_set_msinum(sc); 857 858 /* Get station address. */ 859 reg = CSR_READ_4(sc, JME_SMBCSR); 860 if (reg & SMBCSR_EEPROM_PRESENT) 861 error = jme_eeprom_macaddr(sc, eaddr); 862 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 863 if (error != 0 && (bootverbose)) { 864 device_printf(dev, "ethernet hardware address " 865 "not found in EEPROM.\n"); 866 } 867 jme_reg_macaddr(sc, eaddr); 868 } 869 870 /* 871 * Save PHY address. 872 * Integrated JR0211 has fixed PHY address whereas FPGA version 873 * requires PHY probing to get correct PHY address. 874 */ 875 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 876 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 877 GPREG0_PHY_ADDR_MASK; 878 if (bootverbose) { 879 device_printf(dev, "PHY is at address %d.\n", 880 sc->jme_phyaddr); 881 } 882 } else { 883 sc->jme_phyaddr = 0; 884 } 885 886 /* Set max allowable DMA size. */ 887 pcie_ptr = pci_get_pciecap_ptr(dev); 888 if (pcie_ptr != 0) { 889 uint16_t ctrl; 890 891 sc->jme_caps |= JME_CAP_PCIE; 892 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 893 if (bootverbose) { 894 device_printf(dev, "Read request size : %d bytes.\n", 895 128 << ((ctrl >> 12) & 0x07)); 896 device_printf(dev, "TLP payload size : %d bytes.\n", 897 128 << ((ctrl >> 5) & 0x07)); 898 } 899 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 900 case PCIEM_DEVCTL_MAX_READRQ_128: 901 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 902 break; 903 case PCIEM_DEVCTL_MAX_READRQ_256: 904 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 905 break; 906 default: 907 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 908 break; 909 } 910 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 911 } else { 912 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 913 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 914 } 915 916 #ifdef notyet 917 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 918 sc->jme_caps |= JME_CAP_PMCAP; 919 #endif 920 921 #ifdef IFPOLL_ENABLE 922 /* 923 * NPOLLING RX CPU offset 924 */ 925 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 926 offset = 0; 927 } else { 928 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 929 device_get_unit(dev)) % ncpus2; 930 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 931 if (offset >= ncpus2 || 932 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 933 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 934 offset, offset_def); 935 offset = offset_def; 936 } 937 } 938 sc->jme_npoll_rxoff = offset; 939 940 /* 941 * NPOLLING TX CPU offset 942 */ 943 offset_def = sc->jme_npoll_rxoff; 944 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 945 if (offset >= ncpus2) { 946 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 947 offset, offset_def); 948 offset = offset_def; 949 } 950 sc->jme_npoll_txoff = offset; 951 #endif 952 953 /* 954 * Set default coalesce valves 955 */ 956 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 957 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 958 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 959 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 960 961 /* 962 * Adjust coalesce valves, in case that the number of TX/RX 963 * descs are set to small values by users. 964 * 965 * NOTE: coal_max will not be zero, since number of descs 966 * must aligned by JME_NDESC_ALIGN (16 currently) 967 */ 968 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2; 969 if (coal_max < sc->jme_tx_coal_pkt) 970 sc->jme_tx_coal_pkt = coal_max; 971 972 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2; 973 if (coal_max < sc->jme_rx_coal_pkt) 974 sc->jme_rx_coal_pkt = coal_max; 975 976 sc->jme_cdata.jme_tx_data.jme_tx_wreg = 16; 977 978 /* 979 * Create sysctl tree 980 */ 981 jme_sysctl_node(sc); 982 983 /* Allocate DMA stuffs */ 984 error = jme_dma_alloc(sc); 985 if (error) 986 goto fail; 987 988 ifp->if_softc = sc; 989 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 990 ifp->if_init = jme_init; 991 ifp->if_ioctl = jme_ioctl; 992 ifp->if_start = jme_start; 993 #ifdef IFPOLL_ENABLE 994 ifp->if_npoll = jme_npoll; 995 #endif 996 ifp->if_watchdog = jme_watchdog; 997 ifp->if_serialize = jme_serialize; 998 ifp->if_deserialize = jme_deserialize; 999 ifp->if_tryserialize = jme_tryserialize; 1000 #ifdef INVARIANTS 1001 ifp->if_serialize_assert = jme_serialize_assert; 1002 #endif 1003 ifq_set_maxlen(&ifp->if_snd, 1004 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD); 1005 ifq_set_ready(&ifp->if_snd); 1006 1007 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 1008 ifp->if_capabilities = IFCAP_HWCSUM | 1009 IFCAP_TSO | 1010 IFCAP_VLAN_MTU | 1011 IFCAP_VLAN_HWTAGGING; 1012 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN) 1013 ifp->if_capabilities |= IFCAP_RSS; 1014 ifp->if_capenable = ifp->if_capabilities; 1015 1016 /* 1017 * Disable TXCSUM by default to improve bulk data 1018 * transmit performance (+20Mbps improvement). 1019 */ 1020 ifp->if_capenable &= ~IFCAP_TXCSUM; 1021 1022 if (ifp->if_capenable & IFCAP_TXCSUM) 1023 ifp->if_hwassist |= JME_CSUM_FEATURES; 1024 ifp->if_hwassist |= CSUM_TSO; 1025 1026 /* Set up MII bus. */ 1027 error = mii_phy_probe(dev, &sc->jme_miibus, 1028 jme_mediachange, jme_mediastatus); 1029 if (error) { 1030 device_printf(dev, "no PHY found!\n"); 1031 goto fail; 1032 } 1033 1034 /* 1035 * Save PHYADDR for FPGA mode PHY. 1036 */ 1037 if (sc->jme_caps & JME_CAP_FPGA) { 1038 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1039 1040 if (mii->mii_instance != 0) { 1041 struct mii_softc *miisc; 1042 1043 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 1044 if (miisc->mii_phy != 0) { 1045 sc->jme_phyaddr = miisc->mii_phy; 1046 break; 1047 } 1048 } 1049 if (sc->jme_phyaddr != 0) { 1050 device_printf(sc->jme_dev, 1051 "FPGA PHY is at %d\n", sc->jme_phyaddr); 1052 /* vendor magic. */ 1053 jme_miibus_writereg(dev, sc->jme_phyaddr, 1054 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 1055 1056 /* XXX should we clear JME_WA_EXTFIFO */ 1057 } 1058 } 1059 } 1060 1061 ether_ifattach(ifp, eaddr, NULL); 1062 1063 /* Tell the upper layer(s) we support long frames. */ 1064 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1065 1066 error = jme_intr_setup(dev); 1067 if (error) { 1068 ether_ifdetach(ifp); 1069 goto fail; 1070 } 1071 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 1072 1073 return 0; 1074 fail: 1075 jme_detach(dev); 1076 return (error); 1077 } 1078 1079 static int 1080 jme_detach(device_t dev) 1081 { 1082 struct jme_softc *sc = device_get_softc(dev); 1083 1084 if (device_is_attached(dev)) { 1085 struct ifnet *ifp = &sc->arpcom.ac_if; 1086 1087 ifnet_serialize_all(ifp); 1088 jme_stop(sc); 1089 jme_intr_teardown(dev); 1090 ifnet_deserialize_all(ifp); 1091 1092 ether_ifdetach(ifp); 1093 } 1094 1095 if (sc->jme_sysctl_tree != NULL) 1096 sysctl_ctx_free(&sc->jme_sysctl_ctx); 1097 1098 if (sc->jme_miibus != NULL) 1099 device_delete_child(dev, sc->jme_miibus); 1100 bus_generic_detach(dev); 1101 1102 jme_intr_free(dev); 1103 1104 if (sc->jme_mem_res != NULL) { 1105 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 1106 sc->jme_mem_res); 1107 } 1108 1109 jme_dma_free(sc); 1110 1111 return (0); 1112 } 1113 1114 static void 1115 jme_sysctl_node(struct jme_softc *sc) 1116 { 1117 #ifdef JME_RSS_DEBUG 1118 int r; 1119 #endif 1120 1121 sysctl_ctx_init(&sc->jme_sysctl_ctx); 1122 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 1123 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1124 device_get_nameunit(sc->jme_dev), 1125 CTLFLAG_RD, 0, ""); 1126 if (sc->jme_sysctl_tree == NULL) { 1127 device_printf(sc->jme_dev, "can't add sysctl node\n"); 1128 return; 1129 } 1130 1131 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1132 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1133 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1134 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1135 1136 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1137 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1138 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1139 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1140 1141 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1142 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1143 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1144 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1145 1146 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1147 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1148 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1149 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1150 1151 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1152 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1153 "rx_desc_count", CTLFLAG_RD, 1154 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt, 1155 0, "RX desc count"); 1156 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1157 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1158 "tx_desc_count", CTLFLAG_RD, 1159 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, 1160 0, "TX desc count"); 1161 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1162 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1163 "rx_ring_count", CTLFLAG_RD, 1164 &sc->jme_cdata.jme_rx_ring_cnt, 1165 0, "RX ring count"); 1166 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1167 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1168 "tx_wreg", CTLFLAG_RW, 1169 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0, 1170 "# of segments before writing to hardware register"); 1171 1172 #ifdef JME_RSS_DEBUG 1173 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1174 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1175 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1176 0, "RSS debug level"); 1177 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1178 char rx_ring_desc[32]; 1179 1180 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1181 "rx_ring%d_pkt", r); 1182 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1183 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1184 rx_ring_desc, CTLFLAG_RW, 1185 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets"); 1186 1187 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1188 "rx_ring%d_emp", r); 1189 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1190 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1191 rx_ring_desc, CTLFLAG_RW, 1192 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp, 1193 "# of time RX ring empty"); 1194 } 1195 #endif 1196 1197 #ifdef IFPOLL_ENABLE 1198 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1199 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1200 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1201 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1202 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1203 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1204 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1205 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1206 #endif 1207 } 1208 1209 static int 1210 jme_dma_alloc(struct jme_softc *sc) 1211 { 1212 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1213 struct jme_txdesc *txd; 1214 bus_dmamem_t dmem; 1215 int error, i, asize; 1216 1217 asize = __VM_CACHELINE_ALIGN( 1218 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc)); 1219 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF, 1220 M_WAITOK | M_ZERO); 1221 1222 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1223 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 1224 1225 asize = __VM_CACHELINE_ALIGN( 1226 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc)); 1227 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF, 1228 M_WAITOK | M_ZERO); 1229 } 1230 1231 /* Create parent ring tag. */ 1232 error = bus_dma_tag_create(NULL,/* parent */ 1233 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1234 sc->jme_lowaddr, /* lowaddr */ 1235 BUS_SPACE_MAXADDR, /* highaddr */ 1236 NULL, NULL, /* filter, filterarg */ 1237 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1238 0, /* nsegments */ 1239 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1240 0, /* flags */ 1241 &sc->jme_cdata.jme_ring_tag); 1242 if (error) { 1243 device_printf(sc->jme_dev, 1244 "could not create parent ring DMA tag.\n"); 1245 return error; 1246 } 1247 1248 /* 1249 * Create DMA stuffs for TX ring 1250 */ 1251 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN); 1252 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1253 JME_TX_RING_ALIGN, 0, 1254 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1255 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1256 if (error) { 1257 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1258 return error; 1259 } 1260 tdata->jme_tx_ring_tag = dmem.dmem_tag; 1261 tdata->jme_tx_ring_map = dmem.dmem_map; 1262 tdata->jme_tx_ring = dmem.dmem_addr; 1263 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr; 1264 1265 /* 1266 * Create DMA stuffs for RX rings 1267 */ 1268 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1269 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1270 if (error) 1271 return error; 1272 } 1273 1274 /* Create parent buffer tag. */ 1275 error = bus_dma_tag_create(NULL,/* parent */ 1276 1, 0, /* algnmnt, boundary */ 1277 sc->jme_lowaddr, /* lowaddr */ 1278 BUS_SPACE_MAXADDR, /* highaddr */ 1279 NULL, NULL, /* filter, filterarg */ 1280 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1281 0, /* nsegments */ 1282 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1283 0, /* flags */ 1284 &sc->jme_cdata.jme_buffer_tag); 1285 if (error) { 1286 device_printf(sc->jme_dev, 1287 "could not create parent buffer DMA tag.\n"); 1288 return error; 1289 } 1290 1291 /* 1292 * Create DMA stuffs for shadow status block 1293 */ 1294 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN); 1295 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1296 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1297 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1298 if (error) { 1299 device_printf(sc->jme_dev, 1300 "could not create shadow status block.\n"); 1301 return error; 1302 } 1303 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1304 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1305 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1306 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1307 1308 /* 1309 * Create DMA stuffs for TX buffers 1310 */ 1311 1312 /* Create tag for Tx buffers. */ 1313 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1314 1, 0, /* algnmnt, boundary */ 1315 BUS_SPACE_MAXADDR, /* lowaddr */ 1316 BUS_SPACE_MAXADDR, /* highaddr */ 1317 NULL, NULL, /* filter, filterarg */ 1318 JME_TSO_MAXSIZE, /* maxsize */ 1319 JME_MAXTXSEGS, /* nsegments */ 1320 JME_MAXSEGSIZE, /* maxsegsize */ 1321 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1322 &tdata->jme_tx_tag); 1323 if (error != 0) { 1324 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1325 return error; 1326 } 1327 1328 /* Create DMA maps for Tx buffers. */ 1329 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1330 txd = &tdata->jme_txdesc[i]; 1331 error = bus_dmamap_create(tdata->jme_tx_tag, 1332 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1333 &txd->tx_dmamap); 1334 if (error) { 1335 int j; 1336 1337 device_printf(sc->jme_dev, 1338 "could not create %dth Tx dmamap.\n", i); 1339 1340 for (j = 0; j < i; ++j) { 1341 txd = &tdata->jme_txdesc[j]; 1342 bus_dmamap_destroy(tdata->jme_tx_tag, 1343 txd->tx_dmamap); 1344 } 1345 bus_dma_tag_destroy(tdata->jme_tx_tag); 1346 tdata->jme_tx_tag = NULL; 1347 return error; 1348 } 1349 } 1350 1351 /* 1352 * Create DMA stuffs for RX buffers 1353 */ 1354 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1355 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1356 if (error) 1357 return error; 1358 } 1359 return 0; 1360 } 1361 1362 static void 1363 jme_dma_free(struct jme_softc *sc) 1364 { 1365 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1366 struct jme_txdesc *txd; 1367 struct jme_rxdesc *rxd; 1368 struct jme_rxdata *rdata; 1369 int i, r; 1370 1371 /* Tx ring */ 1372 if (tdata->jme_tx_ring_tag != NULL) { 1373 bus_dmamap_unload(tdata->jme_tx_ring_tag, 1374 tdata->jme_tx_ring_map); 1375 bus_dmamem_free(tdata->jme_tx_ring_tag, 1376 tdata->jme_tx_ring, tdata->jme_tx_ring_map); 1377 bus_dma_tag_destroy(tdata->jme_tx_ring_tag); 1378 tdata->jme_tx_ring_tag = NULL; 1379 } 1380 1381 /* Rx ring */ 1382 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1383 rdata = &sc->jme_cdata.jme_rx_data[r]; 1384 if (rdata->jme_rx_ring_tag != NULL) { 1385 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1386 rdata->jme_rx_ring_map); 1387 bus_dmamem_free(rdata->jme_rx_ring_tag, 1388 rdata->jme_rx_ring, 1389 rdata->jme_rx_ring_map); 1390 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1391 rdata->jme_rx_ring_tag = NULL; 1392 } 1393 } 1394 1395 /* Tx buffers */ 1396 if (tdata->jme_tx_tag != NULL) { 1397 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1398 txd = &tdata->jme_txdesc[i]; 1399 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap); 1400 } 1401 bus_dma_tag_destroy(tdata->jme_tx_tag); 1402 tdata->jme_tx_tag = NULL; 1403 } 1404 1405 /* Rx buffers */ 1406 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1407 rdata = &sc->jme_cdata.jme_rx_data[r]; 1408 if (rdata->jme_rx_tag != NULL) { 1409 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 1410 rxd = &rdata->jme_rxdesc[i]; 1411 bus_dmamap_destroy(rdata->jme_rx_tag, 1412 rxd->rx_dmamap); 1413 } 1414 bus_dmamap_destroy(rdata->jme_rx_tag, 1415 rdata->jme_rx_sparemap); 1416 bus_dma_tag_destroy(rdata->jme_rx_tag); 1417 rdata->jme_rx_tag = NULL; 1418 } 1419 } 1420 1421 /* Shadow status block. */ 1422 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1423 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1424 sc->jme_cdata.jme_ssb_map); 1425 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1426 sc->jme_cdata.jme_ssb_block, 1427 sc->jme_cdata.jme_ssb_map); 1428 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1429 sc->jme_cdata.jme_ssb_tag = NULL; 1430 } 1431 1432 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1433 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1434 sc->jme_cdata.jme_buffer_tag = NULL; 1435 } 1436 if (sc->jme_cdata.jme_ring_tag != NULL) { 1437 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1438 sc->jme_cdata.jme_ring_tag = NULL; 1439 } 1440 1441 if (tdata->jme_txdesc != NULL) { 1442 kfree(tdata->jme_txdesc, M_DEVBUF); 1443 tdata->jme_txdesc = NULL; 1444 } 1445 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1446 rdata = &sc->jme_cdata.jme_rx_data[r]; 1447 if (rdata->jme_rxdesc != NULL) { 1448 kfree(rdata->jme_rxdesc, M_DEVBUF); 1449 rdata->jme_rxdesc = NULL; 1450 } 1451 } 1452 } 1453 1454 /* 1455 * Make sure the interface is stopped at reboot time. 1456 */ 1457 static int 1458 jme_shutdown(device_t dev) 1459 { 1460 return jme_suspend(dev); 1461 } 1462 1463 #ifdef notyet 1464 /* 1465 * Unlike other ethernet controllers, JMC250 requires 1466 * explicit resetting link speed to 10/100Mbps as gigabit 1467 * link will cunsume more power than 375mA. 1468 * Note, we reset the link speed to 10/100Mbps with 1469 * auto-negotiation but we don't know whether that operation 1470 * would succeed or not as we have no control after powering 1471 * off. If the renegotiation fail WOL may not work. Running 1472 * at 1Gbps draws more power than 375mA at 3.3V which is 1473 * specified in PCI specification and that would result in 1474 * complete shutdowning power to ethernet controller. 1475 * 1476 * TODO 1477 * Save current negotiated media speed/duplex/flow-control 1478 * to softc and restore the same link again after resuming. 1479 * PHY handling such as power down/resetting to 100Mbps 1480 * may be better handled in suspend method in phy driver. 1481 */ 1482 static void 1483 jme_setlinkspeed(struct jme_softc *sc) 1484 { 1485 struct mii_data *mii; 1486 int aneg, i; 1487 1488 JME_LOCK_ASSERT(sc); 1489 1490 mii = device_get_softc(sc->jme_miibus); 1491 mii_pollstat(mii); 1492 aneg = 0; 1493 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1494 switch IFM_SUBTYPE(mii->mii_media_active) { 1495 case IFM_10_T: 1496 case IFM_100_TX: 1497 return; 1498 case IFM_1000_T: 1499 aneg++; 1500 default: 1501 break; 1502 } 1503 } 1504 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1505 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1506 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1507 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1508 BMCR_AUTOEN | BMCR_STARTNEG); 1509 DELAY(1000); 1510 if (aneg != 0) { 1511 /* Poll link state until jme(4) get a 10/100 link. */ 1512 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1513 mii_pollstat(mii); 1514 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1515 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1516 case IFM_10_T: 1517 case IFM_100_TX: 1518 jme_mac_config(sc); 1519 return; 1520 default: 1521 break; 1522 } 1523 } 1524 JME_UNLOCK(sc); 1525 pause("jmelnk", hz); 1526 JME_LOCK(sc); 1527 } 1528 if (i == MII_ANEGTICKS_GIGE) 1529 device_printf(sc->jme_dev, "establishing link failed, " 1530 "WOL may not work!"); 1531 } 1532 /* 1533 * No link, force MAC to have 100Mbps, full-duplex link. 1534 * This is the last resort and may/may not work. 1535 */ 1536 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1537 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1538 jme_mac_config(sc); 1539 } 1540 1541 static void 1542 jme_setwol(struct jme_softc *sc) 1543 { 1544 struct ifnet *ifp = &sc->arpcom.ac_if; 1545 uint32_t gpr, pmcs; 1546 uint16_t pmstat; 1547 int pmc; 1548 1549 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1550 /* No PME capability, PHY power down. */ 1551 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1552 MII_BMCR, BMCR_PDOWN); 1553 return; 1554 } 1555 1556 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1557 pmcs = CSR_READ_4(sc, JME_PMCS); 1558 pmcs &= ~PMCS_WOL_ENB_MASK; 1559 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1560 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1561 /* Enable PME message. */ 1562 gpr |= GPREG0_PME_ENB; 1563 /* For gigabit controllers, reset link speed to 10/100. */ 1564 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1565 jme_setlinkspeed(sc); 1566 } 1567 1568 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1569 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1570 1571 /* Request PME. */ 1572 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1573 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1574 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1575 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1576 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1577 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1578 /* No WOL, PHY power down. */ 1579 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1580 MII_BMCR, BMCR_PDOWN); 1581 } 1582 } 1583 #endif 1584 1585 static int 1586 jme_suspend(device_t dev) 1587 { 1588 struct jme_softc *sc = device_get_softc(dev); 1589 struct ifnet *ifp = &sc->arpcom.ac_if; 1590 1591 ifnet_serialize_all(ifp); 1592 jme_stop(sc); 1593 #ifdef notyet 1594 jme_setwol(sc); 1595 #endif 1596 ifnet_deserialize_all(ifp); 1597 1598 return (0); 1599 } 1600 1601 static int 1602 jme_resume(device_t dev) 1603 { 1604 struct jme_softc *sc = device_get_softc(dev); 1605 struct ifnet *ifp = &sc->arpcom.ac_if; 1606 #ifdef notyet 1607 int pmc; 1608 #endif 1609 1610 ifnet_serialize_all(ifp); 1611 1612 #ifdef notyet 1613 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1614 uint16_t pmstat; 1615 1616 pmstat = pci_read_config(sc->jme_dev, 1617 pmc + PCIR_POWER_STATUS, 2); 1618 /* Disable PME clear PME status. */ 1619 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1620 pci_write_config(sc->jme_dev, 1621 pmc + PCIR_POWER_STATUS, pmstat, 2); 1622 } 1623 #endif 1624 1625 if (ifp->if_flags & IFF_UP) 1626 jme_init(sc); 1627 1628 ifnet_deserialize_all(ifp); 1629 1630 return (0); 1631 } 1632 1633 static __inline int 1634 jme_tso_pullup(struct mbuf **mp) 1635 { 1636 int hoff, iphlen, thoff; 1637 struct mbuf *m; 1638 1639 m = *mp; 1640 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1641 1642 iphlen = m->m_pkthdr.csum_iphlen; 1643 thoff = m->m_pkthdr.csum_thlen; 1644 hoff = m->m_pkthdr.csum_lhlen; 1645 1646 KASSERT(iphlen > 0, ("invalid ip hlen")); 1647 KASSERT(thoff > 0, ("invalid tcp hlen")); 1648 KASSERT(hoff > 0, ("invalid ether hlen")); 1649 1650 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1651 m = m_pullup(m, hoff + iphlen + thoff); 1652 if (m == NULL) { 1653 *mp = NULL; 1654 return ENOBUFS; 1655 } 1656 *mp = m; 1657 } 1658 return 0; 1659 } 1660 1661 static int 1662 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used) 1663 { 1664 struct jme_txdesc *txd; 1665 struct jme_desc *desc; 1666 struct mbuf *m; 1667 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1668 int maxsegs, nsegs; 1669 int error, i, prod, symbol_desc; 1670 uint32_t cflags, flag64, mss; 1671 1672 M_ASSERTPKTHDR((*m_head)); 1673 1674 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) { 1675 /* XXX Is this necessary? */ 1676 error = jme_tso_pullup(m_head); 1677 if (error) 1678 return error; 1679 } 1680 1681 prod = tdata->jme_tx_prod; 1682 txd = &tdata->jme_txdesc[prod]; 1683 1684 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1685 symbol_desc = 1; 1686 else 1687 symbol_desc = 0; 1688 1689 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) - 1690 (JME_TXD_RSVD + symbol_desc); 1691 if (maxsegs > JME_MAXTXSEGS) 1692 maxsegs = JME_MAXTXSEGS; 1693 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc), 1694 ("not enough segments %d", maxsegs)); 1695 1696 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag, 1697 txd->tx_dmamap, m_head, 1698 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1699 if (error) 1700 goto fail; 1701 *segs_used += nsegs; 1702 1703 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap, 1704 BUS_DMASYNC_PREWRITE); 1705 1706 m = *m_head; 1707 cflags = 0; 1708 mss = 0; 1709 1710 /* Configure checksum offload. */ 1711 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1712 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; 1713 cflags |= JME_TD_TSO; 1714 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) { 1715 if (m->m_pkthdr.csum_flags & CSUM_IP) 1716 cflags |= JME_TD_IPCSUM; 1717 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1718 cflags |= JME_TD_TCPCSUM; 1719 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1720 cflags |= JME_TD_UDPCSUM; 1721 } 1722 1723 /* Configure VLAN. */ 1724 if (m->m_flags & M_VLANTAG) { 1725 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1726 cflags |= JME_TD_VLAN_TAG; 1727 } 1728 1729 desc = &tdata->jme_tx_ring[prod]; 1730 desc->flags = htole32(cflags); 1731 desc->addr_hi = htole32(m->m_pkthdr.len); 1732 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1733 /* 1734 * Use 64bits TX desc chain format. 1735 * 1736 * The first TX desc of the chain, which is setup here, 1737 * is just a symbol TX desc carrying no payload. 1738 */ 1739 flag64 = JME_TD_64BIT; 1740 desc->buflen = htole32(mss); 1741 desc->addr_lo = 0; 1742 1743 *segs_used += 1; 1744 1745 /* No effective TX desc is consumed */ 1746 i = 0; 1747 } else { 1748 /* 1749 * Use 32bits TX desc chain format. 1750 * 1751 * The first TX desc of the chain, which is setup here, 1752 * is an effective TX desc carrying the first segment of 1753 * the mbuf chain. 1754 */ 1755 flag64 = 0; 1756 desc->buflen = htole32(mss | txsegs[0].ds_len); 1757 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1758 1759 /* One effective TX desc is consumed */ 1760 i = 1; 1761 } 1762 tdata->jme_tx_cnt++; 1763 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1764 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1765 1766 txd->tx_ndesc = 1 - i; 1767 for (; i < nsegs; i++) { 1768 desc = &tdata->jme_tx_ring[prod]; 1769 desc->buflen = htole32(txsegs[i].ds_len); 1770 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1771 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1772 desc->flags = htole32(JME_TD_OWN | flag64); 1773 1774 tdata->jme_tx_cnt++; 1775 KKASSERT(tdata->jme_tx_cnt <= 1776 tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1777 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1778 } 1779 1780 /* Update producer index. */ 1781 tdata->jme_tx_prod = prod; 1782 /* 1783 * Finally request interrupt and give the first descriptor 1784 * owenership to hardware. 1785 */ 1786 desc = txd->tx_desc; 1787 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1788 1789 txd->tx_m = m; 1790 txd->tx_ndesc += nsegs; 1791 1792 return 0; 1793 fail: 1794 m_freem(*m_head); 1795 *m_head = NULL; 1796 return error; 1797 } 1798 1799 static void 1800 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1801 { 1802 struct jme_softc *sc = ifp->if_softc; 1803 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1804 struct mbuf *m_head; 1805 int enq = 0; 1806 1807 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1808 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 1809 1810 if (!sc->jme_has_link) { 1811 ifq_purge(&ifp->if_snd); 1812 return; 1813 } 1814 1815 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1816 return; 1817 1818 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata)) 1819 jme_txeof(tdata); 1820 1821 while (!ifq_is_empty(&ifp->if_snd)) { 1822 /* 1823 * Check number of available TX descs, always 1824 * leave JME_TXD_RSVD free TX descs. 1825 */ 1826 if (tdata->jme_tx_cnt + JME_TXD_SPARE > 1827 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) { 1828 ifq_set_oactive(&ifp->if_snd); 1829 break; 1830 } 1831 1832 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1833 if (m_head == NULL) 1834 break; 1835 1836 /* 1837 * Pack the data into the transmit ring. If we 1838 * don't have room, set the OACTIVE flag and wait 1839 * for the NIC to drain the ring. 1840 */ 1841 if (jme_encap(tdata, &m_head, &enq)) { 1842 KKASSERT(m_head == NULL); 1843 ifp->if_oerrors++; 1844 ifq_set_oactive(&ifp->if_snd); 1845 break; 1846 } 1847 1848 if (enq >= tdata->jme_tx_wreg) { 1849 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | 1850 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1851 enq = 0; 1852 } 1853 1854 /* 1855 * If there's a BPF listener, bounce a copy of this frame 1856 * to him. 1857 */ 1858 ETHER_BPF_MTAP(ifp, m_head); 1859 1860 /* Set a timeout in case the chip goes out to lunch. */ 1861 ifp->if_timer = JME_TX_TIMEOUT; 1862 } 1863 1864 if (enq > 0) { 1865 /* 1866 * Reading TXCSR takes very long time under heavy load 1867 * so cache TXCSR value and writes the ORed value with 1868 * the kick command to the TXCSR. This saves one register 1869 * access cycle. 1870 */ 1871 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1872 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1873 } 1874 } 1875 1876 static void 1877 jme_watchdog(struct ifnet *ifp) 1878 { 1879 struct jme_softc *sc = ifp->if_softc; 1880 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1881 1882 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1883 1884 if (!sc->jme_has_link) { 1885 if_printf(ifp, "watchdog timeout (missed link)\n"); 1886 ifp->if_oerrors++; 1887 jme_init(sc); 1888 return; 1889 } 1890 1891 jme_txeof(tdata); 1892 if (tdata->jme_tx_cnt == 0) { 1893 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1894 "-- recovering\n"); 1895 if (!ifq_is_empty(&ifp->if_snd)) 1896 if_devstart(ifp); 1897 return; 1898 } 1899 1900 if_printf(ifp, "watchdog timeout\n"); 1901 ifp->if_oerrors++; 1902 jme_init(sc); 1903 if (!ifq_is_empty(&ifp->if_snd)) 1904 if_devstart(ifp); 1905 } 1906 1907 static int 1908 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1909 { 1910 struct jme_softc *sc = ifp->if_softc; 1911 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1912 struct ifreq *ifr = (struct ifreq *)data; 1913 int error = 0, mask; 1914 1915 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1916 1917 switch (cmd) { 1918 case SIOCSIFMTU: 1919 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1920 (!(sc->jme_caps & JME_CAP_JUMBO) && 1921 ifr->ifr_mtu > JME_MAX_MTU)) { 1922 error = EINVAL; 1923 break; 1924 } 1925 1926 if (ifp->if_mtu != ifr->ifr_mtu) { 1927 /* 1928 * No special configuration is required when interface 1929 * MTU is changed but availability of Tx checksum 1930 * offload should be chcked against new MTU size as 1931 * FIFO size is just 2K. 1932 */ 1933 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1934 ifp->if_capenable &= 1935 ~(IFCAP_TXCSUM | IFCAP_TSO); 1936 ifp->if_hwassist &= 1937 ~(JME_CSUM_FEATURES | CSUM_TSO); 1938 } 1939 ifp->if_mtu = ifr->ifr_mtu; 1940 if (ifp->if_flags & IFF_RUNNING) 1941 jme_init(sc); 1942 } 1943 break; 1944 1945 case SIOCSIFFLAGS: 1946 if (ifp->if_flags & IFF_UP) { 1947 if (ifp->if_flags & IFF_RUNNING) { 1948 if ((ifp->if_flags ^ sc->jme_if_flags) & 1949 (IFF_PROMISC | IFF_ALLMULTI)) 1950 jme_set_filter(sc); 1951 } else { 1952 jme_init(sc); 1953 } 1954 } else { 1955 if (ifp->if_flags & IFF_RUNNING) 1956 jme_stop(sc); 1957 } 1958 sc->jme_if_flags = ifp->if_flags; 1959 break; 1960 1961 case SIOCADDMULTI: 1962 case SIOCDELMULTI: 1963 if (ifp->if_flags & IFF_RUNNING) 1964 jme_set_filter(sc); 1965 break; 1966 1967 case SIOCSIFMEDIA: 1968 case SIOCGIFMEDIA: 1969 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1970 break; 1971 1972 case SIOCSIFCAP: 1973 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1974 1975 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1976 ifp->if_capenable ^= IFCAP_TXCSUM; 1977 if (ifp->if_capenable & IFCAP_TXCSUM) 1978 ifp->if_hwassist |= JME_CSUM_FEATURES; 1979 else 1980 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1981 } 1982 if (mask & IFCAP_RXCSUM) { 1983 uint32_t reg; 1984 1985 ifp->if_capenable ^= IFCAP_RXCSUM; 1986 reg = CSR_READ_4(sc, JME_RXMAC); 1987 reg &= ~RXMAC_CSUM_ENB; 1988 if (ifp->if_capenable & IFCAP_RXCSUM) 1989 reg |= RXMAC_CSUM_ENB; 1990 CSR_WRITE_4(sc, JME_RXMAC, reg); 1991 } 1992 1993 if (mask & IFCAP_VLAN_HWTAGGING) { 1994 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1995 jme_set_vlan(sc); 1996 } 1997 1998 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1999 ifp->if_capenable ^= IFCAP_TSO; 2000 if (ifp->if_capenable & IFCAP_TSO) 2001 ifp->if_hwassist |= CSUM_TSO; 2002 else 2003 ifp->if_hwassist &= ~CSUM_TSO; 2004 } 2005 2006 if (mask & IFCAP_RSS) 2007 ifp->if_capenable ^= IFCAP_RSS; 2008 break; 2009 2010 default: 2011 error = ether_ioctl(ifp, cmd, data); 2012 break; 2013 } 2014 return (error); 2015 } 2016 2017 static void 2018 jme_mac_config(struct jme_softc *sc) 2019 { 2020 struct mii_data *mii; 2021 uint32_t ghc, rxmac, txmac, txpause, gp1; 2022 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 2023 2024 mii = device_get_softc(sc->jme_miibus); 2025 2026 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2027 DELAY(10); 2028 CSR_WRITE_4(sc, JME_GHC, 0); 2029 ghc = 0; 2030 rxmac = CSR_READ_4(sc, JME_RXMAC); 2031 rxmac &= ~RXMAC_FC_ENB; 2032 txmac = CSR_READ_4(sc, JME_TXMAC); 2033 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2034 txpause = CSR_READ_4(sc, JME_TXPFC); 2035 txpause &= ~TXPFC_PAUSE_ENB; 2036 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2037 ghc |= GHC_FULL_DUPLEX; 2038 rxmac &= ~RXMAC_COLL_DET_ENB; 2039 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2040 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2041 TXMAC_FRAME_BURST); 2042 #ifdef notyet 2043 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2044 txpause |= TXPFC_PAUSE_ENB; 2045 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2046 rxmac |= RXMAC_FC_ENB; 2047 #endif 2048 /* Disable retry transmit timer/retry limit. */ 2049 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2050 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2051 } else { 2052 rxmac |= RXMAC_COLL_DET_ENB; 2053 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2054 /* Enable retry transmit timer/retry limit. */ 2055 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2056 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2057 } 2058 2059 /* 2060 * Reprogram Tx/Rx MACs with resolved speed/duplex. 2061 */ 2062 gp1 = CSR_READ_4(sc, JME_GPREG1); 2063 gp1 &= ~GPREG1_WA_HDX; 2064 2065 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2066 hdx = 1; 2067 2068 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2069 case IFM_10_T: 2070 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 2071 if (hdx) 2072 gp1 |= GPREG1_WA_HDX; 2073 break; 2074 2075 case IFM_100_TX: 2076 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 2077 if (hdx) 2078 gp1 |= GPREG1_WA_HDX; 2079 2080 /* 2081 * Use extended FIFO depth to workaround CRC errors 2082 * emitted by chips before JMC250B 2083 */ 2084 phyconf = JMPHY_CONF_EXTFIFO; 2085 break; 2086 2087 case IFM_1000_T: 2088 if (sc->jme_caps & JME_CAP_FASTETH) 2089 break; 2090 2091 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 2092 if (hdx) 2093 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2094 break; 2095 2096 default: 2097 break; 2098 } 2099 CSR_WRITE_4(sc, JME_GHC, ghc); 2100 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2101 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2102 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2103 2104 if (sc->jme_workaround & JME_WA_EXTFIFO) { 2105 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2106 JMPHY_CONF, phyconf); 2107 } 2108 if (sc->jme_workaround & JME_WA_HDX) 2109 CSR_WRITE_4(sc, JME_GPREG1, gp1); 2110 } 2111 2112 static void 2113 jme_intr(void *xsc) 2114 { 2115 struct jme_softc *sc = xsc; 2116 struct ifnet *ifp = &sc->arpcom.ac_if; 2117 uint32_t status; 2118 int r; 2119 2120 ASSERT_SERIALIZED(&sc->jme_serialize); 2121 2122 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2123 if (status == 0 || status == 0xFFFFFFFF) 2124 return; 2125 2126 /* Disable interrupts. */ 2127 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2128 2129 status = CSR_READ_4(sc, JME_INTR_STATUS); 2130 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2131 goto back; 2132 2133 /* Reset PCC counter/timer and Ack interrupts. */ 2134 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2135 2136 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2137 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2138 2139 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2140 if (status & jme_rx_status[r].jme_coal) { 2141 status |= jme_rx_status[r].jme_coal | 2142 jme_rx_status[r].jme_comp; 2143 } 2144 } 2145 2146 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2147 2148 if (ifp->if_flags & IFF_RUNNING) { 2149 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2150 2151 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2152 jme_rx_intr(sc, status); 2153 2154 if (status & INTR_RXQ_DESC_EMPTY) { 2155 /* 2156 * Notify hardware availability of new Rx buffers. 2157 * Reading RXCSR takes very long time under heavy 2158 * load so cache RXCSR value and writes the ORed 2159 * value with the kick command to the RXCSR. This 2160 * saves one register access cycle. 2161 */ 2162 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2163 RXCSR_RX_ENB | RXCSR_RXQ_START); 2164 } 2165 2166 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2167 lwkt_serialize_enter(&tdata->jme_tx_serialize); 2168 jme_txeof(tdata); 2169 if (!ifq_is_empty(&ifp->if_snd)) 2170 if_devstart(ifp); 2171 lwkt_serialize_exit(&tdata->jme_tx_serialize); 2172 } 2173 } 2174 back: 2175 /* Reenable interrupts. */ 2176 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2177 } 2178 2179 static void 2180 jme_txeof(struct jme_txdata *tdata) 2181 { 2182 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if; 2183 int cons; 2184 2185 cons = tdata->jme_tx_cons; 2186 if (cons == tdata->jme_tx_prod) 2187 return; 2188 2189 /* 2190 * Go through our Tx list and free mbufs for those 2191 * frames which have been transmitted. 2192 */ 2193 while (cons != tdata->jme_tx_prod) { 2194 struct jme_txdesc *txd, *next_txd; 2195 uint32_t status, next_status; 2196 int next_cons, nsegs; 2197 2198 txd = &tdata->jme_txdesc[cons]; 2199 KASSERT(txd->tx_m != NULL, 2200 ("%s: freeing NULL mbuf!", __func__)); 2201 2202 status = le32toh(txd->tx_desc->flags); 2203 if ((status & JME_TD_OWN) == JME_TD_OWN) 2204 break; 2205 2206 /* 2207 * NOTE: 2208 * This chip will always update the TX descriptor's 2209 * buflen field and this updating always happens 2210 * after clearing the OWN bit, so even if the OWN 2211 * bit is cleared by the chip, we still don't sure 2212 * about whether the buflen field has been updated 2213 * by the chip or not. To avoid this race, we wait 2214 * for the next TX descriptor's OWN bit to be cleared 2215 * by the chip before reusing this TX descriptor. 2216 */ 2217 next_cons = cons; 2218 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt); 2219 next_txd = &tdata->jme_txdesc[next_cons]; 2220 if (next_txd->tx_m == NULL) 2221 break; 2222 next_status = le32toh(next_txd->tx_desc->flags); 2223 if ((next_status & JME_TD_OWN) == JME_TD_OWN) 2224 break; 2225 2226 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2227 ifp->if_oerrors++; 2228 } else { 2229 ifp->if_opackets++; 2230 if (status & JME_TD_COLLISION) { 2231 ifp->if_collisions += 2232 le32toh(txd->tx_desc->buflen) & 2233 JME_TD_BUF_LEN_MASK; 2234 } 2235 } 2236 2237 /* 2238 * Only the first descriptor of multi-descriptor 2239 * transmission is updated so driver have to skip entire 2240 * chained buffers for the transmiited frame. In other 2241 * words, JME_TD_OWN bit is valid only at the first 2242 * descriptor of a multi-descriptor transmission. 2243 */ 2244 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2245 tdata->jme_tx_ring[cons].flags = 0; 2246 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt); 2247 } 2248 2249 /* Reclaim transferred mbufs. */ 2250 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2251 m_freem(txd->tx_m); 2252 txd->tx_m = NULL; 2253 tdata->jme_tx_cnt -= txd->tx_ndesc; 2254 KASSERT(tdata->jme_tx_cnt >= 0, 2255 ("%s: Active Tx desc counter was garbled", __func__)); 2256 txd->tx_ndesc = 0; 2257 } 2258 tdata->jme_tx_cons = cons; 2259 2260 /* 1 for symbol TX descriptor */ 2261 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1) 2262 ifp->if_timer = 0; 2263 2264 if (tdata->jme_tx_cnt + JME_TXD_SPARE <= 2265 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) 2266 ifq_clr_oactive(&ifp->if_snd); 2267 } 2268 2269 static __inline void 2270 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count) 2271 { 2272 int i; 2273 2274 for (i = 0; i < count; ++i) { 2275 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]); 2276 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2277 } 2278 } 2279 2280 static __inline struct pktinfo * 2281 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2282 { 2283 if (flags & JME_RD_IPV4) 2284 pi->pi_netisr = NETISR_IP; 2285 else if (flags & JME_RD_IPV6) 2286 pi->pi_netisr = NETISR_IPV6; 2287 else 2288 return NULL; 2289 2290 pi->pi_flags = 0; 2291 pi->pi_l3proto = IPPROTO_UNKNOWN; 2292 2293 if (flags & JME_RD_MORE_FRAG) 2294 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2295 else if (flags & JME_RD_TCP) 2296 pi->pi_l3proto = IPPROTO_TCP; 2297 else if (flags & JME_RD_UDP) 2298 pi->pi_l3proto = IPPROTO_UDP; 2299 else 2300 pi = NULL; 2301 return pi; 2302 } 2303 2304 /* Receive a frame. */ 2305 static void 2306 jme_rxpkt(struct jme_rxdata *rdata) 2307 { 2308 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if; 2309 struct jme_desc *desc; 2310 struct jme_rxdesc *rxd; 2311 struct mbuf *mp, *m; 2312 uint32_t flags, status, hash, hashinfo; 2313 int cons, count, nsegs; 2314 2315 cons = rdata->jme_rx_cons; 2316 desc = &rdata->jme_rx_ring[cons]; 2317 2318 flags = le32toh(desc->flags); 2319 status = le32toh(desc->buflen); 2320 hash = le32toh(desc->addr_hi); 2321 hashinfo = le32toh(desc->addr_lo); 2322 nsegs = JME_RX_NSEGS(status); 2323 2324 if (nsegs > 1) { 2325 /* Skip the first descriptor. */ 2326 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2327 2328 /* 2329 * Clear the OWN bit of the following RX descriptors; 2330 * hardware will not clear the OWN bit except the first 2331 * RX descriptor. 2332 * 2333 * Since the first RX descriptor is setup, i.e. OWN bit 2334 * on, before its followins RX descriptors, leaving the 2335 * OWN bit on the following RX descriptors will trick 2336 * the hardware into thinking that the following RX 2337 * descriptors are ready to be used too. 2338 */ 2339 for (count = 1; count < nsegs; count++, 2340 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) 2341 rdata->jme_rx_ring[cons].flags = 0; 2342 2343 cons = rdata->jme_rx_cons; 2344 } 2345 2346 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, " 2347 "hash 0x%08x, hash info 0x%08x\n", 2348 rdata->jme_rx_idx, flags, hash, hashinfo); 2349 2350 if (status & JME_RX_ERR_STAT) { 2351 ifp->if_ierrors++; 2352 jme_discard_rxbufs(rdata, cons, nsegs); 2353 #ifdef JME_SHOW_ERRORS 2354 if_printf(ifp, "%s : receive error = 0x%b\n", 2355 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2356 #endif 2357 rdata->jme_rx_cons += nsegs; 2358 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2359 return; 2360 } 2361 2362 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2363 for (count = 0; count < nsegs; count++, 2364 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) { 2365 rxd = &rdata->jme_rxdesc[cons]; 2366 mp = rxd->rx_m; 2367 2368 /* Add a new receive buffer to the ring. */ 2369 if (jme_newbuf(rdata, rxd, 0) != 0) { 2370 ifp->if_iqdrops++; 2371 /* Reuse buffer. */ 2372 jme_discard_rxbufs(rdata, cons, nsegs - count); 2373 if (rdata->jme_rxhead != NULL) { 2374 m_freem(rdata->jme_rxhead); 2375 JME_RXCHAIN_RESET(rdata); 2376 } 2377 break; 2378 } 2379 2380 /* 2381 * Assume we've received a full sized frame. 2382 * Actual size is fixed when we encounter the end of 2383 * multi-segmented frame. 2384 */ 2385 mp->m_len = MCLBYTES; 2386 2387 /* Chain received mbufs. */ 2388 if (rdata->jme_rxhead == NULL) { 2389 rdata->jme_rxhead = mp; 2390 rdata->jme_rxtail = mp; 2391 } else { 2392 /* 2393 * Receive processor can receive a maximum frame 2394 * size of 65535 bytes. 2395 */ 2396 rdata->jme_rxtail->m_next = mp; 2397 rdata->jme_rxtail = mp; 2398 } 2399 2400 if (count == nsegs - 1) { 2401 struct pktinfo pi0, *pi; 2402 2403 /* Last desc. for this frame. */ 2404 m = rdata->jme_rxhead; 2405 m->m_pkthdr.len = rdata->jme_rxlen; 2406 if (nsegs > 1) { 2407 /* Set first mbuf size. */ 2408 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2409 /* Set last mbuf size. */ 2410 mp->m_len = rdata->jme_rxlen - 2411 ((MCLBYTES - JME_RX_PAD_BYTES) + 2412 (MCLBYTES * (nsegs - 2))); 2413 } else { 2414 m->m_len = rdata->jme_rxlen; 2415 } 2416 m->m_pkthdr.rcvif = ifp; 2417 2418 /* 2419 * Account for 10bytes auto padding which is used 2420 * to align IP header on 32bit boundary. Also note, 2421 * CRC bytes is automatically removed by the 2422 * hardware. 2423 */ 2424 m->m_data += JME_RX_PAD_BYTES; 2425 2426 /* Set checksum information. */ 2427 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2428 (flags & JME_RD_IPV4)) { 2429 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2430 if (flags & JME_RD_IPCSUM) 2431 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2432 if ((flags & JME_RD_MORE_FRAG) == 0 && 2433 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2434 (JME_RD_TCP | JME_RD_TCPCSUM) || 2435 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2436 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2437 m->m_pkthdr.csum_flags |= 2438 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2439 m->m_pkthdr.csum_data = 0xffff; 2440 } 2441 } 2442 2443 /* Check for VLAN tagged packets. */ 2444 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2445 (flags & JME_RD_VLAN_TAG)) { 2446 m->m_pkthdr.ether_vlantag = 2447 flags & JME_RD_VLAN_MASK; 2448 m->m_flags |= M_VLANTAG; 2449 } 2450 2451 ifp->if_ipackets++; 2452 2453 if (ifp->if_capenable & IFCAP_RSS) 2454 pi = jme_pktinfo(&pi0, flags); 2455 else 2456 pi = NULL; 2457 2458 if (pi != NULL && 2459 (hashinfo & JME_RD_HASH_FN_MASK) == 2460 JME_RD_HASH_FN_TOEPLITZ) { 2461 m->m_flags |= (M_HASH | M_CKHASH); 2462 m->m_pkthdr.hash = toeplitz_hash(hash); 2463 } 2464 2465 #ifdef JME_RSS_DEBUG 2466 if (pi != NULL) { 2467 JME_RSS_DPRINTF(rdata->jme_sc, 10, 2468 "isr %d flags %08x, l3 %d %s\n", 2469 pi->pi_netisr, pi->pi_flags, 2470 pi->pi_l3proto, 2471 (m->m_flags & M_HASH) ? "hash" : ""); 2472 } 2473 #endif 2474 2475 /* Pass it on. */ 2476 ether_input_pkt(ifp, m, pi); 2477 2478 /* Reset mbuf chains. */ 2479 JME_RXCHAIN_RESET(rdata); 2480 #ifdef JME_RSS_DEBUG 2481 rdata->jme_rx_pkt++; 2482 #endif 2483 } 2484 } 2485 2486 rdata->jme_rx_cons += nsegs; 2487 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2488 } 2489 2490 static void 2491 jme_rxeof(struct jme_rxdata *rdata, int count) 2492 { 2493 struct jme_desc *desc; 2494 int nsegs, pktlen; 2495 2496 for (;;) { 2497 #ifdef IFPOLL_ENABLE 2498 if (count >= 0 && count-- == 0) 2499 break; 2500 #endif 2501 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2502 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2503 break; 2504 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2505 break; 2506 2507 /* 2508 * Check number of segments against received bytes. 2509 * Non-matching value would indicate that hardware 2510 * is still trying to update Rx descriptors. I'm not 2511 * sure whether this check is needed. 2512 */ 2513 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2514 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2515 if (nsegs != howmany(pktlen, MCLBYTES)) { 2516 if_printf(&rdata->jme_sc->arpcom.ac_if, 2517 "RX fragment count(%d) and " 2518 "packet size(%d) mismach\n", nsegs, pktlen); 2519 break; 2520 } 2521 2522 /* 2523 * NOTE: 2524 * RSS hash and hash information may _not_ be set by the 2525 * hardware even if the OWN bit is cleared and VALID bit 2526 * is set. 2527 * 2528 * If the RSS information is not delivered by the hardware 2529 * yet, we MUST NOT accept this packet, let alone reusing 2530 * its RX descriptor. If this packet was accepted and its 2531 * RX descriptor was reused before hardware delivering the 2532 * RSS information, the RX buffer's address would be trashed 2533 * by the RSS information delivered by the hardware. 2534 */ 2535 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 2536 struct jme_rxdesc *rxd; 2537 uint32_t hashinfo; 2538 2539 hashinfo = le32toh(desc->addr_lo); 2540 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons]; 2541 2542 /* 2543 * This test should be enough to detect the pending 2544 * RSS information delivery, given: 2545 * - If RSS hash is not calculated, the hashinfo 2546 * will be 0. Howvever, the lower 32bits of RX 2547 * buffers' physical address will never be 0. 2548 * (see jme_rxbuf_dma_filter) 2549 * - If RSS hash is calculated, the lowest 4 bits 2550 * of hashinfo will be set, while the RX buffers 2551 * are at least 2K aligned. 2552 */ 2553 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) { 2554 #ifdef JME_SHOW_RSSWB 2555 if_printf(&rdata->jme_sc->arpcom.ac_if, 2556 "RSS is not written back yet\n"); 2557 #endif 2558 break; 2559 } 2560 } 2561 2562 /* Received a frame. */ 2563 jme_rxpkt(rdata); 2564 } 2565 } 2566 2567 static void 2568 jme_tick(void *xsc) 2569 { 2570 struct jme_softc *sc = xsc; 2571 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2572 2573 lwkt_serialize_enter(&sc->jme_serialize); 2574 2575 KKASSERT(mycpuid == JME_TICK_CPUID); 2576 2577 sc->jme_in_tick = TRUE; 2578 mii_tick(mii); 2579 sc->jme_in_tick = FALSE; 2580 2581 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2582 2583 lwkt_serialize_exit(&sc->jme_serialize); 2584 } 2585 2586 static void 2587 jme_reset(struct jme_softc *sc) 2588 { 2589 uint32_t val; 2590 2591 /* Make sure that TX and RX are stopped */ 2592 jme_stop_tx(sc); 2593 jme_stop_rx(sc); 2594 2595 /* Start reset */ 2596 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2597 DELAY(20); 2598 2599 /* 2600 * Hold reset bit before stop reset 2601 */ 2602 2603 /* Disable TXMAC and TXOFL clock sources */ 2604 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2605 /* Disable RXMAC clock source */ 2606 val = CSR_READ_4(sc, JME_GPREG1); 2607 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2608 /* Flush */ 2609 CSR_READ_4(sc, JME_GHC); 2610 2611 /* Stop reset */ 2612 CSR_WRITE_4(sc, JME_GHC, 0); 2613 /* Flush */ 2614 CSR_READ_4(sc, JME_GHC); 2615 2616 /* 2617 * Clear reset bit after stop reset 2618 */ 2619 2620 /* Enable TXMAC and TXOFL clock sources */ 2621 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2622 /* Enable RXMAC clock source */ 2623 val = CSR_READ_4(sc, JME_GPREG1); 2624 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2625 /* Flush */ 2626 CSR_READ_4(sc, JME_GHC); 2627 2628 /* Disable TXMAC and TXOFL clock sources */ 2629 CSR_WRITE_4(sc, JME_GHC, 0); 2630 /* Disable RXMAC clock source */ 2631 val = CSR_READ_4(sc, JME_GPREG1); 2632 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2633 /* Flush */ 2634 CSR_READ_4(sc, JME_GHC); 2635 2636 /* Enable TX and RX */ 2637 val = CSR_READ_4(sc, JME_TXCSR); 2638 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2639 val = CSR_READ_4(sc, JME_RXCSR); 2640 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2641 /* Flush */ 2642 CSR_READ_4(sc, JME_TXCSR); 2643 CSR_READ_4(sc, JME_RXCSR); 2644 2645 /* Enable TXMAC and TXOFL clock sources */ 2646 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2647 /* Eisable RXMAC clock source */ 2648 val = CSR_READ_4(sc, JME_GPREG1); 2649 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2650 /* Flush */ 2651 CSR_READ_4(sc, JME_GHC); 2652 2653 /* Stop TX and RX */ 2654 jme_stop_tx(sc); 2655 jme_stop_rx(sc); 2656 } 2657 2658 static void 2659 jme_init(void *xsc) 2660 { 2661 struct jme_softc *sc = xsc; 2662 struct ifnet *ifp = &sc->arpcom.ac_if; 2663 struct mii_data *mii; 2664 uint8_t eaddr[ETHER_ADDR_LEN]; 2665 bus_addr_t paddr; 2666 uint32_t reg; 2667 int error, r; 2668 2669 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2670 2671 /* 2672 * Cancel any pending I/O. 2673 */ 2674 jme_stop(sc); 2675 2676 /* 2677 * Reset the chip to a known state. 2678 */ 2679 jme_reset(sc); 2680 2681 /* 2682 * Setup MSI/MSI-X vectors to interrupts mapping 2683 */ 2684 jme_set_msinum(sc); 2685 2686 if (JME_ENABLE_HWRSS(sc)) 2687 jme_enable_rss(sc); 2688 else 2689 jme_disable_rss(sc); 2690 2691 /* Init RX descriptors */ 2692 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2693 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]); 2694 if (error) { 2695 if_printf(ifp, "initialization failed: " 2696 "no memory for %dth RX ring.\n", r); 2697 jme_stop(sc); 2698 return; 2699 } 2700 } 2701 2702 /* Init TX descriptors */ 2703 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data); 2704 2705 /* Initialize shadow status block. */ 2706 jme_init_ssb(sc); 2707 2708 /* Reprogram the station address. */ 2709 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2710 CSR_WRITE_4(sc, JME_PAR0, 2711 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2712 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2713 2714 /* 2715 * Configure Tx queue. 2716 * Tx priority queue weight value : 0 2717 * Tx FIFO threshold for processing next packet : 16QW 2718 * Maximum Tx DMA length : 512 2719 * Allow Tx DMA burst. 2720 */ 2721 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2722 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2723 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2724 sc->jme_txcsr |= sc->jme_tx_dma_size; 2725 sc->jme_txcsr |= TXCSR_DMA_BURST; 2726 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2727 2728 /* Set Tx descriptor counter. */ 2729 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt); 2730 2731 /* Set Tx ring address to the hardware. */ 2732 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr; 2733 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2734 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2735 2736 /* Configure TxMAC parameters. */ 2737 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2738 reg |= TXMAC_THRESH_1_PKT; 2739 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2740 CSR_WRITE_4(sc, JME_TXMAC, reg); 2741 2742 /* 2743 * Configure Rx queue. 2744 * FIFO full threshold for transmitting Tx pause packet : 128T 2745 * FIFO threshold for processing next packet : 128QW 2746 * Rx queue 0 select 2747 * Max Rx DMA length : 128 2748 * Rx descriptor retry : 32 2749 * Rx descriptor retry time gap : 256ns 2750 * Don't receive runt/bad frame. 2751 */ 2752 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2753 #if 0 2754 /* 2755 * Since Rx FIFO size is 4K bytes, receiving frames larger 2756 * than 4K bytes will suffer from Rx FIFO overruns. So 2757 * decrease FIFO threshold to reduce the FIFO overruns for 2758 * frames larger than 4000 bytes. 2759 * For best performance of standard MTU sized frames use 2760 * maximum allowable FIFO threshold, 128QW. 2761 */ 2762 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2763 JME_RX_FIFO_SIZE) 2764 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2765 else 2766 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2767 #else 2768 /* Improve PCI Express compatibility */ 2769 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2770 #endif 2771 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2772 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2773 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2774 /* XXX TODO DROP_BAD */ 2775 2776 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2777 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 2778 2779 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2780 2781 /* Set Rx descriptor counter. */ 2782 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt); 2783 2784 /* Set Rx ring address to the hardware. */ 2785 paddr = rdata->jme_rx_ring_paddr; 2786 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2787 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2788 } 2789 2790 /* Clear receive filter. */ 2791 CSR_WRITE_4(sc, JME_RXMAC, 0); 2792 2793 /* Set up the receive filter. */ 2794 jme_set_filter(sc); 2795 jme_set_vlan(sc); 2796 2797 /* 2798 * Disable all WOL bits as WOL can interfere normal Rx 2799 * operation. Also clear WOL detection status bits. 2800 */ 2801 reg = CSR_READ_4(sc, JME_PMCS); 2802 reg &= ~PMCS_WOL_ENB_MASK; 2803 CSR_WRITE_4(sc, JME_PMCS, reg); 2804 2805 /* 2806 * Pad 10bytes right before received frame. This will greatly 2807 * help Rx performance on strict-alignment architectures as 2808 * it does not need to copy the frame to align the payload. 2809 */ 2810 reg = CSR_READ_4(sc, JME_RXMAC); 2811 reg |= RXMAC_PAD_10BYTES; 2812 2813 if (ifp->if_capenable & IFCAP_RXCSUM) 2814 reg |= RXMAC_CSUM_ENB; 2815 CSR_WRITE_4(sc, JME_RXMAC, reg); 2816 2817 /* Configure general purpose reg0 */ 2818 reg = CSR_READ_4(sc, JME_GPREG0); 2819 reg &= ~GPREG0_PCC_UNIT_MASK; 2820 /* Set PCC timer resolution to micro-seconds unit. */ 2821 reg |= GPREG0_PCC_UNIT_US; 2822 /* 2823 * Disable all shadow register posting as we have to read 2824 * JME_INTR_STATUS register in jme_intr. Also it seems 2825 * that it's hard to synchronize interrupt status between 2826 * hardware and software with shadow posting due to 2827 * requirements of bus_dmamap_sync(9). 2828 */ 2829 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2830 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2831 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2832 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2833 /* Disable posting of DW0. */ 2834 reg &= ~GPREG0_POST_DW0_ENB; 2835 /* Clear PME message. */ 2836 reg &= ~GPREG0_PME_ENB; 2837 /* Set PHY address. */ 2838 reg &= ~GPREG0_PHY_ADDR_MASK; 2839 reg |= sc->jme_phyaddr; 2840 CSR_WRITE_4(sc, JME_GPREG0, reg); 2841 2842 /* Configure Tx queue 0 packet completion coalescing. */ 2843 jme_set_tx_coal(sc); 2844 2845 /* Configure Rx queues packet completion coalescing. */ 2846 jme_set_rx_coal(sc); 2847 2848 /* Configure shadow status block but don't enable posting. */ 2849 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2850 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2851 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2852 2853 /* Disable Timer 1 and Timer 2. */ 2854 CSR_WRITE_4(sc, JME_TIMER1, 0); 2855 CSR_WRITE_4(sc, JME_TIMER2, 0); 2856 2857 /* Configure retry transmit period, retry limit value. */ 2858 CSR_WRITE_4(sc, JME_TXTRHD, 2859 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2860 TXTRHD_RT_PERIOD_MASK) | 2861 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2862 TXTRHD_RT_LIMIT_SHIFT)); 2863 2864 #ifdef IFPOLL_ENABLE 2865 if (!(ifp->if_flags & IFF_NPOLLING)) 2866 #endif 2867 /* Initialize the interrupt mask. */ 2868 jme_enable_intr(sc); 2869 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2870 2871 /* 2872 * Enabling Tx/Rx DMA engines and Rx queue processing is 2873 * done after detection of valid link in jme_miibus_statchg. 2874 */ 2875 sc->jme_has_link = FALSE; 2876 2877 /* Set the current media. */ 2878 mii = device_get_softc(sc->jme_miibus); 2879 mii_mediachg(mii); 2880 2881 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 2882 JME_TICK_CPUID); 2883 2884 ifp->if_flags |= IFF_RUNNING; 2885 ifq_clr_oactive(&ifp->if_snd); 2886 } 2887 2888 static void 2889 jme_stop(struct jme_softc *sc) 2890 { 2891 struct ifnet *ifp = &sc->arpcom.ac_if; 2892 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2893 struct jme_txdesc *txd; 2894 struct jme_rxdesc *rxd; 2895 struct jme_rxdata *rdata; 2896 int i, r; 2897 2898 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2899 2900 /* 2901 * Mark the interface down and cancel the watchdog timer. 2902 */ 2903 ifp->if_flags &= ~IFF_RUNNING; 2904 ifq_clr_oactive(&ifp->if_snd); 2905 ifp->if_timer = 0; 2906 2907 callout_stop(&sc->jme_tick_ch); 2908 sc->jme_has_link = FALSE; 2909 2910 /* 2911 * Disable interrupts. 2912 */ 2913 jme_disable_intr(sc); 2914 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2915 2916 /* Disable updating shadow status block. */ 2917 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2918 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2919 2920 /* Stop receiver, transmitter. */ 2921 jme_stop_rx(sc); 2922 jme_stop_tx(sc); 2923 2924 /* 2925 * Free partial finished RX segments 2926 */ 2927 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2928 rdata = &sc->jme_cdata.jme_rx_data[r]; 2929 if (rdata->jme_rxhead != NULL) 2930 m_freem(rdata->jme_rxhead); 2931 JME_RXCHAIN_RESET(rdata); 2932 } 2933 2934 /* 2935 * Free RX and TX mbufs still in the queues. 2936 */ 2937 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2938 rdata = &sc->jme_cdata.jme_rx_data[r]; 2939 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2940 rxd = &rdata->jme_rxdesc[i]; 2941 if (rxd->rx_m != NULL) { 2942 bus_dmamap_unload(rdata->jme_rx_tag, 2943 rxd->rx_dmamap); 2944 m_freem(rxd->rx_m); 2945 rxd->rx_m = NULL; 2946 } 2947 } 2948 } 2949 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 2950 txd = &tdata->jme_txdesc[i]; 2951 if (txd->tx_m != NULL) { 2952 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2953 m_freem(txd->tx_m); 2954 txd->tx_m = NULL; 2955 txd->tx_ndesc = 0; 2956 } 2957 } 2958 } 2959 2960 static void 2961 jme_stop_tx(struct jme_softc *sc) 2962 { 2963 uint32_t reg; 2964 int i; 2965 2966 reg = CSR_READ_4(sc, JME_TXCSR); 2967 if ((reg & TXCSR_TX_ENB) == 0) 2968 return; 2969 reg &= ~TXCSR_TX_ENB; 2970 CSR_WRITE_4(sc, JME_TXCSR, reg); 2971 for (i = JME_TIMEOUT; i > 0; i--) { 2972 DELAY(1); 2973 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2974 break; 2975 } 2976 if (i == 0) 2977 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2978 } 2979 2980 static void 2981 jme_stop_rx(struct jme_softc *sc) 2982 { 2983 uint32_t reg; 2984 int i; 2985 2986 reg = CSR_READ_4(sc, JME_RXCSR); 2987 if ((reg & RXCSR_RX_ENB) == 0) 2988 return; 2989 reg &= ~RXCSR_RX_ENB; 2990 CSR_WRITE_4(sc, JME_RXCSR, reg); 2991 for (i = JME_TIMEOUT; i > 0; i--) { 2992 DELAY(1); 2993 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2994 break; 2995 } 2996 if (i == 0) 2997 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2998 } 2999 3000 static void 3001 jme_init_tx_ring(struct jme_txdata *tdata) 3002 { 3003 struct jme_txdesc *txd; 3004 int i; 3005 3006 tdata->jme_tx_prod = 0; 3007 tdata->jme_tx_cons = 0; 3008 tdata->jme_tx_cnt = 0; 3009 3010 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata)); 3011 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 3012 txd = &tdata->jme_txdesc[i]; 3013 txd->tx_m = NULL; 3014 txd->tx_desc = &tdata->jme_tx_ring[i]; 3015 txd->tx_ndesc = 0; 3016 } 3017 } 3018 3019 static void 3020 jme_init_ssb(struct jme_softc *sc) 3021 { 3022 struct jme_chain_data *cd; 3023 3024 cd = &sc->jme_cdata; 3025 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 3026 } 3027 3028 static int 3029 jme_init_rx_ring(struct jme_rxdata *rdata) 3030 { 3031 struct jme_rxdesc *rxd; 3032 int i; 3033 3034 KKASSERT(rdata->jme_rxhead == NULL && 3035 rdata->jme_rxtail == NULL && 3036 rdata->jme_rxlen == 0); 3037 rdata->jme_rx_cons = 0; 3038 3039 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata)); 3040 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3041 int error; 3042 3043 rxd = &rdata->jme_rxdesc[i]; 3044 rxd->rx_m = NULL; 3045 rxd->rx_desc = &rdata->jme_rx_ring[i]; 3046 error = jme_newbuf(rdata, rxd, 1); 3047 if (error) 3048 return error; 3049 } 3050 return 0; 3051 } 3052 3053 static int 3054 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init) 3055 { 3056 struct mbuf *m; 3057 bus_dma_segment_t segs; 3058 bus_dmamap_t map; 3059 int error, nsegs; 3060 3061 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3062 if (m == NULL) 3063 return ENOBUFS; 3064 /* 3065 * JMC250 has 64bit boundary alignment limitation so jme(4) 3066 * takes advantage of 10 bytes padding feature of hardware 3067 * in order not to copy entire frame to align IP header on 3068 * 32bit boundary. 3069 */ 3070 m->m_len = m->m_pkthdr.len = MCLBYTES; 3071 3072 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 3073 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 3074 BUS_DMA_NOWAIT); 3075 if (error) { 3076 m_freem(m); 3077 if (init) { 3078 if_printf(&rdata->jme_sc->arpcom.ac_if, 3079 "can't load RX mbuf\n"); 3080 } 3081 return error; 3082 } 3083 3084 if (rxd->rx_m != NULL) { 3085 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 3086 BUS_DMASYNC_POSTREAD); 3087 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 3088 } 3089 map = rxd->rx_dmamap; 3090 rxd->rx_dmamap = rdata->jme_rx_sparemap; 3091 rdata->jme_rx_sparemap = map; 3092 rxd->rx_m = m; 3093 rxd->rx_paddr = segs.ds_addr; 3094 3095 jme_setup_rxdesc(rxd); 3096 return 0; 3097 } 3098 3099 static void 3100 jme_set_vlan(struct jme_softc *sc) 3101 { 3102 struct ifnet *ifp = &sc->arpcom.ac_if; 3103 uint32_t reg; 3104 3105 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3106 3107 reg = CSR_READ_4(sc, JME_RXMAC); 3108 reg &= ~RXMAC_VLAN_ENB; 3109 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 3110 reg |= RXMAC_VLAN_ENB; 3111 CSR_WRITE_4(sc, JME_RXMAC, reg); 3112 } 3113 3114 static void 3115 jme_set_filter(struct jme_softc *sc) 3116 { 3117 struct ifnet *ifp = &sc->arpcom.ac_if; 3118 struct ifmultiaddr *ifma; 3119 uint32_t crc; 3120 uint32_t mchash[2]; 3121 uint32_t rxcfg; 3122 3123 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3124 3125 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3126 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3127 RXMAC_ALLMULTI); 3128 3129 /* 3130 * Always accept frames destined to our station address. 3131 * Always accept broadcast frames. 3132 */ 3133 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 3134 3135 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 3136 if (ifp->if_flags & IFF_PROMISC) 3137 rxcfg |= RXMAC_PROMISC; 3138 if (ifp->if_flags & IFF_ALLMULTI) 3139 rxcfg |= RXMAC_ALLMULTI; 3140 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3141 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3142 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3143 return; 3144 } 3145 3146 /* 3147 * Set up the multicast address filter by passing all multicast 3148 * addresses through a CRC generator, and then using the low-order 3149 * 6 bits as an index into the 64 bit multicast hash table. The 3150 * high order bits select the register, while the rest of the bits 3151 * select the bit within the register. 3152 */ 3153 rxcfg |= RXMAC_MULTICAST; 3154 bzero(mchash, sizeof(mchash)); 3155 3156 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3157 if (ifma->ifma_addr->sa_family != AF_LINK) 3158 continue; 3159 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3160 ifma->ifma_addr), ETHER_ADDR_LEN); 3161 3162 /* Just want the 6 least significant bits. */ 3163 crc &= 0x3f; 3164 3165 /* Set the corresponding bit in the hash table. */ 3166 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3167 } 3168 3169 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3170 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3171 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3172 } 3173 3174 static int 3175 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 3176 { 3177 struct jme_softc *sc = arg1; 3178 struct ifnet *ifp = &sc->arpcom.ac_if; 3179 int error, v; 3180 3181 ifnet_serialize_all(ifp); 3182 3183 v = sc->jme_tx_coal_to; 3184 error = sysctl_handle_int(oidp, &v, 0, req); 3185 if (error || req->newptr == NULL) 3186 goto back; 3187 3188 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 3189 error = EINVAL; 3190 goto back; 3191 } 3192 3193 if (v != sc->jme_tx_coal_to) { 3194 sc->jme_tx_coal_to = v; 3195 if (ifp->if_flags & IFF_RUNNING) 3196 jme_set_tx_coal(sc); 3197 } 3198 back: 3199 ifnet_deserialize_all(ifp); 3200 return error; 3201 } 3202 3203 static int 3204 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3205 { 3206 struct jme_softc *sc = arg1; 3207 struct ifnet *ifp = &sc->arpcom.ac_if; 3208 int error, v; 3209 3210 ifnet_serialize_all(ifp); 3211 3212 v = sc->jme_tx_coal_pkt; 3213 error = sysctl_handle_int(oidp, &v, 0, req); 3214 if (error || req->newptr == NULL) 3215 goto back; 3216 3217 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 3218 error = EINVAL; 3219 goto back; 3220 } 3221 3222 if (v != sc->jme_tx_coal_pkt) { 3223 sc->jme_tx_coal_pkt = v; 3224 if (ifp->if_flags & IFF_RUNNING) 3225 jme_set_tx_coal(sc); 3226 } 3227 back: 3228 ifnet_deserialize_all(ifp); 3229 return error; 3230 } 3231 3232 static int 3233 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 3234 { 3235 struct jme_softc *sc = arg1; 3236 struct ifnet *ifp = &sc->arpcom.ac_if; 3237 int error, v; 3238 3239 ifnet_serialize_all(ifp); 3240 3241 v = sc->jme_rx_coal_to; 3242 error = sysctl_handle_int(oidp, &v, 0, req); 3243 if (error || req->newptr == NULL) 3244 goto back; 3245 3246 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 3247 error = EINVAL; 3248 goto back; 3249 } 3250 3251 if (v != sc->jme_rx_coal_to) { 3252 sc->jme_rx_coal_to = v; 3253 if (ifp->if_flags & IFF_RUNNING) 3254 jme_set_rx_coal(sc); 3255 } 3256 back: 3257 ifnet_deserialize_all(ifp); 3258 return error; 3259 } 3260 3261 static int 3262 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3263 { 3264 struct jme_softc *sc = arg1; 3265 struct ifnet *ifp = &sc->arpcom.ac_if; 3266 int error, v; 3267 3268 ifnet_serialize_all(ifp); 3269 3270 v = sc->jme_rx_coal_pkt; 3271 error = sysctl_handle_int(oidp, &v, 0, req); 3272 if (error || req->newptr == NULL) 3273 goto back; 3274 3275 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3276 error = EINVAL; 3277 goto back; 3278 } 3279 3280 if (v != sc->jme_rx_coal_pkt) { 3281 sc->jme_rx_coal_pkt = v; 3282 if (ifp->if_flags & IFF_RUNNING) 3283 jme_set_rx_coal(sc); 3284 } 3285 back: 3286 ifnet_deserialize_all(ifp); 3287 return error; 3288 } 3289 3290 static void 3291 jme_set_tx_coal(struct jme_softc *sc) 3292 { 3293 uint32_t reg; 3294 3295 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3296 PCCTX_COAL_TO_MASK; 3297 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3298 PCCTX_COAL_PKT_MASK; 3299 reg |= PCCTX_COAL_TXQ0; 3300 CSR_WRITE_4(sc, JME_PCCTX, reg); 3301 } 3302 3303 static void 3304 jme_set_rx_coal(struct jme_softc *sc) 3305 { 3306 uint32_t reg; 3307 int r; 3308 3309 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3310 PCCRX_COAL_TO_MASK; 3311 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3312 PCCRX_COAL_PKT_MASK; 3313 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) 3314 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3315 } 3316 3317 #ifdef IFPOLL_ENABLE 3318 3319 static void 3320 jme_npoll_status(struct ifnet *ifp) 3321 { 3322 struct jme_softc *sc = ifp->if_softc; 3323 uint32_t status; 3324 3325 ASSERT_SERIALIZED(&sc->jme_serialize); 3326 3327 status = CSR_READ_4(sc, JME_INTR_STATUS); 3328 if (status & INTR_RXQ_DESC_EMPTY) { 3329 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3330 jme_rx_restart(sc, status); 3331 } 3332 } 3333 3334 static void 3335 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3336 { 3337 struct jme_rxdata *rdata = arg; 3338 3339 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3340 3341 jme_rxeof(rdata, cycle); 3342 } 3343 3344 static void 3345 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3346 { 3347 struct jme_txdata *tdata = arg; 3348 3349 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3350 3351 jme_txeof(tdata); 3352 if (!ifq_is_empty(&ifp->if_snd)) 3353 if_devstart(ifp); 3354 } 3355 3356 static void 3357 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3358 { 3359 struct jme_softc *sc = ifp->if_softc; 3360 3361 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3362 3363 if (info) { 3364 int i, off; 3365 3366 info->ifpi_status.status_func = jme_npoll_status; 3367 info->ifpi_status.serializer = &sc->jme_serialize; 3368 3369 off = sc->jme_npoll_txoff; 3370 KKASSERT(off <= ncpus2); 3371 info->ifpi_tx[off].poll_func = jme_npoll_tx; 3372 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data; 3373 info->ifpi_tx[off].serializer = 3374 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3375 3376 off = sc->jme_npoll_rxoff; 3377 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3378 struct jme_rxdata *rdata = 3379 &sc->jme_cdata.jme_rx_data[i]; 3380 int idx = i + off; 3381 3382 info->ifpi_rx[idx].poll_func = jme_npoll_rx; 3383 info->ifpi_rx[idx].arg = rdata; 3384 info->ifpi_rx[idx].serializer = 3385 &rdata->jme_rx_serialize; 3386 } 3387 3388 if (ifp->if_flags & IFF_RUNNING) 3389 jme_disable_intr(sc); 3390 ifq_set_cpuid(&ifp->if_snd, sc->jme_npoll_txoff); 3391 } else { 3392 if (ifp->if_flags & IFF_RUNNING) 3393 jme_enable_intr(sc); 3394 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 3395 } 3396 } 3397 3398 static int 3399 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3400 { 3401 struct jme_softc *sc = (void *)arg1; 3402 struct ifnet *ifp = &sc->arpcom.ac_if; 3403 int error, off; 3404 3405 off = sc->jme_npoll_rxoff; 3406 error = sysctl_handle_int(oidp, &off, 0, req); 3407 if (error || req->newptr == NULL) 3408 return error; 3409 if (off < 0) 3410 return EINVAL; 3411 3412 ifnet_serialize_all(ifp); 3413 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3414 error = EINVAL; 3415 } else { 3416 error = 0; 3417 sc->jme_npoll_rxoff = off; 3418 } 3419 ifnet_deserialize_all(ifp); 3420 3421 return error; 3422 } 3423 3424 static int 3425 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3426 { 3427 struct jme_softc *sc = (void *)arg1; 3428 struct ifnet *ifp = &sc->arpcom.ac_if; 3429 int error, off; 3430 3431 off = sc->jme_npoll_txoff; 3432 error = sysctl_handle_int(oidp, &off, 0, req); 3433 if (error || req->newptr == NULL) 3434 return error; 3435 if (off < 0) 3436 return EINVAL; 3437 3438 ifnet_serialize_all(ifp); 3439 if (off >= ncpus2) { 3440 error = EINVAL; 3441 } else { 3442 error = 0; 3443 sc->jme_npoll_txoff = off; 3444 } 3445 ifnet_deserialize_all(ifp); 3446 3447 return error; 3448 } 3449 3450 #endif /* IFPOLL_ENABLE */ 3451 3452 static int 3453 jme_rxring_dma_alloc(struct jme_rxdata *rdata) 3454 { 3455 bus_dmamem_t dmem; 3456 int error, asize; 3457 3458 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN); 3459 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag, 3460 JME_RX_RING_ALIGN, 0, 3461 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3462 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3463 if (error) { 3464 device_printf(rdata->jme_sc->jme_dev, 3465 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx); 3466 return error; 3467 } 3468 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3469 rdata->jme_rx_ring_map = dmem.dmem_map; 3470 rdata->jme_rx_ring = dmem.dmem_addr; 3471 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3472 3473 return 0; 3474 } 3475 3476 static int 3477 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr) 3478 { 3479 if ((paddr & 0xffffffff) == 0) { 3480 /* 3481 * Don't allow lower 32bits of the RX buffer's 3482 * physical address to be 0, else it will break 3483 * hardware pending RSS information delivery 3484 * detection on RX path. 3485 */ 3486 return 1; 3487 } 3488 return 0; 3489 } 3490 3491 static int 3492 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata) 3493 { 3494 bus_addr_t lowaddr; 3495 int i, error; 3496 3497 lowaddr = BUS_SPACE_MAXADDR; 3498 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 3499 /* jme_rxbuf_dma_filter will be called */ 3500 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3501 } 3502 3503 /* Create tag for Rx buffers. */ 3504 error = bus_dma_tag_create( 3505 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */ 3506 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3507 lowaddr, /* lowaddr */ 3508 BUS_SPACE_MAXADDR, /* highaddr */ 3509 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */ 3510 MCLBYTES, /* maxsize */ 3511 1, /* nsegments */ 3512 MCLBYTES, /* maxsegsize */ 3513 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3514 &rdata->jme_rx_tag); 3515 if (error) { 3516 device_printf(rdata->jme_sc->jme_dev, 3517 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx); 3518 return error; 3519 } 3520 3521 /* Create DMA maps for Rx buffers. */ 3522 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3523 &rdata->jme_rx_sparemap); 3524 if (error) { 3525 device_printf(rdata->jme_sc->jme_dev, 3526 "could not create %dth spare Rx dmamap.\n", 3527 rdata->jme_rx_idx); 3528 bus_dma_tag_destroy(rdata->jme_rx_tag); 3529 rdata->jme_rx_tag = NULL; 3530 return error; 3531 } 3532 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3533 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3534 3535 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3536 &rxd->rx_dmamap); 3537 if (error) { 3538 int j; 3539 3540 device_printf(rdata->jme_sc->jme_dev, 3541 "could not create %dth Rx dmamap " 3542 "for %dth RX ring.\n", i, rdata->jme_rx_idx); 3543 3544 for (j = 0; j < i; ++j) { 3545 rxd = &rdata->jme_rxdesc[j]; 3546 bus_dmamap_destroy(rdata->jme_rx_tag, 3547 rxd->rx_dmamap); 3548 } 3549 bus_dmamap_destroy(rdata->jme_rx_tag, 3550 rdata->jme_rx_sparemap); 3551 bus_dma_tag_destroy(rdata->jme_rx_tag); 3552 rdata->jme_rx_tag = NULL; 3553 return error; 3554 } 3555 } 3556 return 0; 3557 } 3558 3559 static void 3560 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3561 { 3562 int r; 3563 3564 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3565 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3566 3567 if (status & rdata->jme_rx_coal) { 3568 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3569 jme_rxeof(rdata, -1); 3570 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3571 } 3572 } 3573 } 3574 3575 static void 3576 jme_enable_rss(struct jme_softc *sc) 3577 { 3578 uint32_t rssc, ind; 3579 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3580 int i; 3581 3582 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 || 3583 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4, 3584 ("%s: invalid # of RX rings (%d)", 3585 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt)); 3586 3587 rssc = RSSC_HASH_64_ENTRY; 3588 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3589 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1; 3590 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3591 CSR_WRITE_4(sc, JME_RSSC, rssc); 3592 3593 toeplitz_get_key(key, sizeof(key)); 3594 for (i = 0; i < RSSKEY_NREGS; ++i) { 3595 uint32_t keyreg; 3596 3597 keyreg = RSSKEY_REGVAL(key, i); 3598 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg); 3599 3600 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg); 3601 } 3602 3603 /* 3604 * Create redirect table in following fashion: 3605 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3606 */ 3607 ind = 0; 3608 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3609 int q; 3610 3611 q = i % sc->jme_cdata.jme_rx_ring_cnt; 3612 ind |= q << (i * 8); 3613 } 3614 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3615 3616 for (i = 0; i < RSSTBL_NREGS; ++i) 3617 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3618 } 3619 3620 static void 3621 jme_disable_rss(struct jme_softc *sc) 3622 { 3623 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3624 } 3625 3626 static void 3627 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3628 { 3629 struct jme_softc *sc = ifp->if_softc; 3630 3631 ifnet_serialize_array_enter(sc->jme_serialize_arr, 3632 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3633 } 3634 3635 static void 3636 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3637 { 3638 struct jme_softc *sc = ifp->if_softc; 3639 3640 ifnet_serialize_array_exit(sc->jme_serialize_arr, 3641 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3642 } 3643 3644 static int 3645 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3646 { 3647 struct jme_softc *sc = ifp->if_softc; 3648 3649 return ifnet_serialize_array_try(sc->jme_serialize_arr, 3650 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3651 } 3652 3653 #ifdef INVARIANTS 3654 3655 static void 3656 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3657 boolean_t serialized) 3658 { 3659 struct jme_softc *sc = ifp->if_softc; 3660 3661 ifnet_serialize_array_assert(sc->jme_serialize_arr, 3662 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, 3663 slz, serialized); 3664 } 3665 3666 #endif /* INVARIANTS */ 3667 3668 static void 3669 jme_msix_try_alloc(device_t dev) 3670 { 3671 struct jme_softc *sc = device_get_softc(dev); 3672 struct jme_msix_data *msix; 3673 int error, i, r, msix_enable, msix_count; 3674 int offset, offset_def; 3675 3676 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt); 3677 KKASSERT(msix_count <= JME_NMSIX); 3678 3679 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable); 3680 3681 /* 3682 * We leave the 1st MSI-X vector unused, so we 3683 * actually need msix_count + 1 MSI-X vectors. 3684 */ 3685 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3686 return; 3687 3688 for (i = 0; i < msix_count; ++i) 3689 sc->jme_msix[i].jme_msix_rid = -1; 3690 3691 i = 0; 3692 3693 /* 3694 * Setup status MSI-X 3695 */ 3696 3697 msix = &sc->jme_msix[i++]; 3698 msix->jme_msix_cpuid = 0; 3699 msix->jme_msix_arg = sc; 3700 msix->jme_msix_func = jme_msix_status; 3701 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3702 msix->jme_msix_intrs |= 3703 sc->jme_cdata.jme_rx_data[r].jme_rx_empty; 3704 } 3705 msix->jme_msix_serialize = &sc->jme_serialize; 3706 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts", 3707 device_get_nameunit(dev)); 3708 3709 /* 3710 * Setup TX MSI-X 3711 */ 3712 3713 offset_def = device_get_unit(dev) % ncpus2; 3714 offset = device_getenv_int(dev, "msix.txoff", offset_def); 3715 if (offset >= ncpus2) { 3716 device_printf(dev, "invalid msix.txoff %d, use %d\n", 3717 offset, offset_def); 3718 offset = offset_def; 3719 } 3720 3721 msix = &sc->jme_msix[i++]; 3722 msix->jme_msix_cpuid = offset; 3723 sc->jme_tx_cpuid = msix->jme_msix_cpuid; 3724 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data; 3725 msix->jme_msix_func = jme_msix_tx; 3726 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3727 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3728 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3729 device_get_nameunit(dev)); 3730 3731 /* 3732 * Setup RX MSI-X 3733 */ 3734 3735 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 3736 offset = 0; 3737 } else { 3738 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 3739 device_get_unit(dev)) % ncpus2; 3740 3741 offset = device_getenv_int(dev, "msix.rxoff", offset_def); 3742 if (offset >= ncpus2 || 3743 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3744 device_printf(dev, "invalid msix.rxoff %d, use %d\n", 3745 offset, offset_def); 3746 offset = offset_def; 3747 } 3748 } 3749 3750 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3751 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3752 3753 msix = &sc->jme_msix[i++]; 3754 msix->jme_msix_cpuid = r + offset; 3755 KKASSERT(msix->jme_msix_cpuid < ncpus2); 3756 msix->jme_msix_arg = rdata; 3757 msix->jme_msix_func = jme_msix_rx; 3758 msix->jme_msix_intrs = rdata->jme_rx_coal; 3759 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3760 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3761 "%s rx%d", device_get_nameunit(dev), r); 3762 } 3763 3764 KKASSERT(i == msix_count); 3765 3766 error = pci_setup_msix(dev); 3767 if (error) 3768 return; 3769 3770 /* Setup jme_msix_cnt early, so we could cleanup */ 3771 sc->jme_msix_cnt = msix_count; 3772 3773 for (i = 0; i < msix_count; ++i) { 3774 msix = &sc->jme_msix[i]; 3775 3776 msix->jme_msix_vector = i + 1; 3777 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3778 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3779 if (error) 3780 goto back; 3781 3782 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3783 &msix->jme_msix_rid, RF_ACTIVE); 3784 if (msix->jme_msix_res == NULL) { 3785 error = ENOMEM; 3786 goto back; 3787 } 3788 } 3789 3790 for (i = 0; i < JME_INTR_CNT; ++i) { 3791 uint32_t intr_mask = (1 << i); 3792 int x; 3793 3794 if ((JME_INTRS & intr_mask) == 0) 3795 continue; 3796 3797 for (x = 0; x < msix_count; ++x) { 3798 msix = &sc->jme_msix[x]; 3799 if (msix->jme_msix_intrs & intr_mask) { 3800 int reg, shift; 3801 3802 reg = i / JME_MSINUM_FACTOR; 3803 KKASSERT(reg < JME_MSINUM_CNT); 3804 3805 shift = (i % JME_MSINUM_FACTOR) * 4; 3806 3807 sc->jme_msinum[reg] |= 3808 (msix->jme_msix_vector << shift); 3809 3810 break; 3811 } 3812 } 3813 } 3814 3815 if (bootverbose) { 3816 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3817 device_printf(dev, "MSINUM%d: %#x\n", i, 3818 sc->jme_msinum[i]); 3819 } 3820 } 3821 3822 pci_enable_msix(dev); 3823 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3824 3825 back: 3826 if (error) 3827 jme_msix_free(dev); 3828 } 3829 3830 static int 3831 jme_intr_alloc(device_t dev) 3832 { 3833 struct jme_softc *sc = device_get_softc(dev); 3834 u_int irq_flags; 3835 3836 jme_msix_try_alloc(dev); 3837 3838 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3839 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3840 &sc->jme_irq_rid, &irq_flags); 3841 3842 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3843 &sc->jme_irq_rid, irq_flags); 3844 if (sc->jme_irq_res == NULL) { 3845 device_printf(dev, "can't allocate irq\n"); 3846 return ENXIO; 3847 } 3848 } 3849 return 0; 3850 } 3851 3852 static void 3853 jme_msix_free(device_t dev) 3854 { 3855 struct jme_softc *sc = device_get_softc(dev); 3856 int i; 3857 3858 KKASSERT(sc->jme_msix_cnt > 1); 3859 3860 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3861 struct jme_msix_data *msix = &sc->jme_msix[i]; 3862 3863 if (msix->jme_msix_res != NULL) { 3864 bus_release_resource(dev, SYS_RES_IRQ, 3865 msix->jme_msix_rid, msix->jme_msix_res); 3866 msix->jme_msix_res = NULL; 3867 } 3868 if (msix->jme_msix_rid >= 0) { 3869 pci_release_msix_vector(dev, msix->jme_msix_rid); 3870 msix->jme_msix_rid = -1; 3871 } 3872 } 3873 pci_teardown_msix(dev); 3874 } 3875 3876 static void 3877 jme_intr_free(device_t dev) 3878 { 3879 struct jme_softc *sc = device_get_softc(dev); 3880 3881 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3882 if (sc->jme_irq_res != NULL) { 3883 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3884 sc->jme_irq_res); 3885 } 3886 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3887 pci_release_msi(dev); 3888 } else { 3889 jme_msix_free(dev); 3890 } 3891 } 3892 3893 static void 3894 jme_msix_tx(void *xtdata) 3895 { 3896 struct jme_txdata *tdata = xtdata; 3897 struct jme_softc *sc = tdata->jme_sc; 3898 struct ifnet *ifp = &sc->arpcom.ac_if; 3899 3900 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3901 3902 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3903 3904 CSR_WRITE_4(sc, JME_INTR_STATUS, 3905 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3906 3907 if (ifp->if_flags & IFF_RUNNING) { 3908 jme_txeof(tdata); 3909 if (!ifq_is_empty(&ifp->if_snd)) 3910 if_devstart(ifp); 3911 } 3912 3913 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3914 } 3915 3916 static void 3917 jme_msix_rx(void *xrdata) 3918 { 3919 struct jme_rxdata *rdata = xrdata; 3920 struct jme_softc *sc = rdata->jme_sc; 3921 struct ifnet *ifp = &sc->arpcom.ac_if; 3922 3923 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3924 3925 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal); 3926 3927 CSR_WRITE_4(sc, JME_INTR_STATUS, 3928 rdata->jme_rx_coal | rdata->jme_rx_comp); 3929 3930 if (ifp->if_flags & IFF_RUNNING) 3931 jme_rxeof(rdata, -1); 3932 3933 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal); 3934 } 3935 3936 static void 3937 jme_msix_status(void *xsc) 3938 { 3939 struct jme_softc *sc = xsc; 3940 struct ifnet *ifp = &sc->arpcom.ac_if; 3941 uint32_t status; 3942 3943 ASSERT_SERIALIZED(&sc->jme_serialize); 3944 3945 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY); 3946 3947 status = CSR_READ_4(sc, JME_INTR_STATUS); 3948 3949 if (status & INTR_RXQ_DESC_EMPTY) { 3950 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3951 if (ifp->if_flags & IFF_RUNNING) 3952 jme_rx_restart(sc, status); 3953 } 3954 3955 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY); 3956 } 3957 3958 static void 3959 jme_rx_restart(struct jme_softc *sc, uint32_t status) 3960 { 3961 int i; 3962 3963 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3964 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 3965 3966 if (status & rdata->jme_rx_empty) { 3967 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3968 jme_rxeof(rdata, -1); 3969 #ifdef JME_RSS_DEBUG 3970 rdata->jme_rx_emp++; 3971 #endif 3972 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3973 } 3974 } 3975 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 3976 RXCSR_RXQ_START); 3977 } 3978 3979 static void 3980 jme_set_msinum(struct jme_softc *sc) 3981 { 3982 int i; 3983 3984 for (i = 0; i < JME_MSINUM_CNT; ++i) 3985 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 3986 } 3987 3988 static int 3989 jme_intr_setup(device_t dev) 3990 { 3991 struct jme_softc *sc = device_get_softc(dev); 3992 int error; 3993 3994 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3995 return jme_msix_setup(dev); 3996 3997 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 3998 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 3999 if (error) { 4000 device_printf(dev, "could not set up interrupt handler.\n"); 4001 return error; 4002 } 4003 sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res); 4004 4005 return 0; 4006 } 4007 4008 static void 4009 jme_intr_teardown(device_t dev) 4010 { 4011 struct jme_softc *sc = device_get_softc(dev); 4012 4013 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 4014 jme_msix_teardown(dev, sc->jme_msix_cnt); 4015 else 4016 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 4017 } 4018 4019 static int 4020 jme_msix_setup(device_t dev) 4021 { 4022 struct jme_softc *sc = device_get_softc(dev); 4023 int x; 4024 4025 for (x = 0; x < sc->jme_msix_cnt; ++x) { 4026 struct jme_msix_data *msix = &sc->jme_msix[x]; 4027 int error; 4028 4029 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 4030 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 4031 &msix->jme_msix_handle, msix->jme_msix_serialize, 4032 msix->jme_msix_desc); 4033 if (error) { 4034 device_printf(dev, "could not set up %s " 4035 "interrupt handler.\n", msix->jme_msix_desc); 4036 jme_msix_teardown(dev, x); 4037 return error; 4038 } 4039 } 4040 return 0; 4041 } 4042 4043 static void 4044 jme_msix_teardown(device_t dev, int msix_count) 4045 { 4046 struct jme_softc *sc = device_get_softc(dev); 4047 int x; 4048 4049 for (x = 0; x < msix_count; ++x) { 4050 struct jme_msix_data *msix = &sc->jme_msix[x]; 4051 4052 bus_teardown_intr(dev, msix->jme_msix_res, 4053 msix->jme_msix_handle); 4054 } 4055 } 4056 4057 static void 4058 jme_serialize_skipmain(struct jme_softc *sc) 4059 { 4060 lwkt_serialize_array_enter(sc->jme_serialize_arr, 4061 sc->jme_serialize_cnt, 1); 4062 } 4063 4064 static void 4065 jme_deserialize_skipmain(struct jme_softc *sc) 4066 { 4067 lwkt_serialize_array_exit(sc->jme_serialize_arr, 4068 sc->jme_serialize_cnt, 1); 4069 } 4070 4071 static void 4072 jme_enable_intr(struct jme_softc *sc) 4073 { 4074 int i; 4075 4076 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4077 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]); 4078 4079 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 4080 } 4081 4082 static void 4083 jme_disable_intr(struct jme_softc *sc) 4084 { 4085 int i; 4086 4087 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 4088 4089 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4090 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]); 4091 } 4092