1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/emx/if_emx.h> 112 113 #define DEBUG_HW 0 114 115 #ifdef EMX_RSS_DEBUG 116 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 117 do { \ 118 if (sc->rss_debug >= lvl) \ 119 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 120 } while (0) 121 #else /* !EMX_RSS_DEBUG */ 122 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 123 #endif /* EMX_RSS_DEBUG */ 124 125 #define EMX_NAME "Intel(R) PRO/1000 " 126 127 #define EMX_DEVICE(id) \ 128 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 129 #define EMX_DEVICE_NULL { 0, 0, NULL } 130 131 static const struct emx_device { 132 uint16_t vid; 133 uint16_t did; 134 const char *desc; 135 } emx_devices[] = { 136 EMX_DEVICE(82571EB_COPPER), 137 EMX_DEVICE(82571EB_FIBER), 138 EMX_DEVICE(82571EB_SERDES), 139 EMX_DEVICE(82571EB_SERDES_DUAL), 140 EMX_DEVICE(82571EB_SERDES_QUAD), 141 EMX_DEVICE(82571EB_QUAD_COPPER), 142 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 143 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 144 EMX_DEVICE(82571EB_QUAD_FIBER), 145 EMX_DEVICE(82571PT_QUAD_COPPER), 146 147 EMX_DEVICE(82572EI_COPPER), 148 EMX_DEVICE(82572EI_FIBER), 149 EMX_DEVICE(82572EI_SERDES), 150 EMX_DEVICE(82572EI), 151 152 EMX_DEVICE(82573E), 153 EMX_DEVICE(82573E_IAMT), 154 EMX_DEVICE(82573L), 155 156 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 157 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 158 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 160 161 EMX_DEVICE(82574L), 162 EMX_DEVICE(82574LA), 163 164 EMX_DEVICE(PCH_LPT_I217_LM), 165 EMX_DEVICE(PCH_LPT_I217_V), 166 EMX_DEVICE(PCH_LPTLP_I218_LM), 167 EMX_DEVICE(PCH_LPTLP_I218_V), 168 EMX_DEVICE(PCH_I218_LM2), 169 EMX_DEVICE(PCH_I218_V2), 170 EMX_DEVICE(PCH_I218_LM3), 171 EMX_DEVICE(PCH_I218_V3), 172 173 /* required last entry */ 174 EMX_DEVICE_NULL 175 }; 176 177 static int emx_probe(device_t); 178 static int emx_attach(device_t); 179 static int emx_detach(device_t); 180 static int emx_shutdown(device_t); 181 static int emx_suspend(device_t); 182 static int emx_resume(device_t); 183 184 static void emx_init(void *); 185 static void emx_stop(struct emx_softc *); 186 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 187 static void emx_start(struct ifnet *, struct ifaltq_subque *); 188 #ifdef IFPOLL_ENABLE 189 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 190 static void emx_npoll_status(struct ifnet *); 191 static void emx_npoll_tx(struct ifnet *, void *, int); 192 static void emx_npoll_rx(struct ifnet *, void *, int); 193 #endif 194 static void emx_watchdog(struct ifaltq_subque *); 195 static void emx_media_status(struct ifnet *, struct ifmediareq *); 196 static int emx_media_change(struct ifnet *); 197 static void emx_timer(void *); 198 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 199 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 200 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 201 #ifdef INVARIANTS 202 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 203 boolean_t); 204 #endif 205 206 static void emx_intr(void *); 207 static void emx_intr_mask(void *); 208 static void emx_intr_body(struct emx_softc *, boolean_t); 209 static void emx_rxeof(struct emx_rxdata *, int); 210 static void emx_txeof(struct emx_txdata *); 211 static void emx_tx_collect(struct emx_txdata *); 212 static void emx_tx_purge(struct emx_softc *); 213 static void emx_enable_intr(struct emx_softc *); 214 static void emx_disable_intr(struct emx_softc *); 215 216 static int emx_dma_alloc(struct emx_softc *); 217 static void emx_dma_free(struct emx_softc *); 218 static void emx_init_tx_ring(struct emx_txdata *); 219 static int emx_init_rx_ring(struct emx_rxdata *); 220 static void emx_free_tx_ring(struct emx_txdata *); 221 static void emx_free_rx_ring(struct emx_rxdata *); 222 static int emx_create_tx_ring(struct emx_txdata *); 223 static int emx_create_rx_ring(struct emx_rxdata *); 224 static void emx_destroy_tx_ring(struct emx_txdata *, int); 225 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 226 static int emx_newbuf(struct emx_rxdata *, int, int); 227 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 228 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 229 uint32_t *, uint32_t *); 230 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 231 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 232 uint32_t *, uint32_t *); 233 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 234 235 static int emx_is_valid_eaddr(const uint8_t *); 236 static int emx_reset(struct emx_softc *); 237 static void emx_setup_ifp(struct emx_softc *); 238 static void emx_init_tx_unit(struct emx_softc *); 239 static void emx_init_rx_unit(struct emx_softc *); 240 static void emx_update_stats(struct emx_softc *); 241 static void emx_set_promisc(struct emx_softc *); 242 static void emx_disable_promisc(struct emx_softc *); 243 static void emx_set_multi(struct emx_softc *); 244 static void emx_update_link_status(struct emx_softc *); 245 static void emx_smartspeed(struct emx_softc *); 246 static void emx_set_itr(struct emx_softc *, uint32_t); 247 static void emx_disable_aspm(struct emx_softc *); 248 249 static void emx_print_debug_info(struct emx_softc *); 250 static void emx_print_nvm_info(struct emx_softc *); 251 static void emx_print_hw_stats(struct emx_softc *); 252 253 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 254 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 255 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 256 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 257 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 258 #ifdef IFPOLL_ENABLE 259 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 261 #endif 262 static void emx_add_sysctl(struct emx_softc *); 263 264 static void emx_serialize_skipmain(struct emx_softc *); 265 static void emx_deserialize_skipmain(struct emx_softc *); 266 267 /* Management and WOL Support */ 268 static void emx_get_mgmt(struct emx_softc *); 269 static void emx_rel_mgmt(struct emx_softc *); 270 static void emx_get_hw_control(struct emx_softc *); 271 static void emx_rel_hw_control(struct emx_softc *); 272 static void emx_enable_wol(device_t); 273 274 static device_method_t emx_methods[] = { 275 /* Device interface */ 276 DEVMETHOD(device_probe, emx_probe), 277 DEVMETHOD(device_attach, emx_attach), 278 DEVMETHOD(device_detach, emx_detach), 279 DEVMETHOD(device_shutdown, emx_shutdown), 280 DEVMETHOD(device_suspend, emx_suspend), 281 DEVMETHOD(device_resume, emx_resume), 282 DEVMETHOD_END 283 }; 284 285 static driver_t emx_driver = { 286 "emx", 287 emx_methods, 288 sizeof(struct emx_softc), 289 }; 290 291 static devclass_t emx_devclass; 292 293 DECLARE_DUMMY_MODULE(if_emx); 294 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 295 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 296 297 /* 298 * Tunables 299 */ 300 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 301 static int emx_rxd = EMX_DEFAULT_RXD; 302 static int emx_txd = EMX_DEFAULT_TXD; 303 static int emx_smart_pwr_down = 0; 304 static int emx_rxr = 0; 305 static int emx_txr = 1; 306 307 /* Controls whether promiscuous also shows bad packets */ 308 static int emx_debug_sbp = 0; 309 310 static int emx_82573_workaround = 1; 311 static int emx_msi_enable = 1; 312 313 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 314 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 315 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 316 TUNABLE_INT("hw.emx.txd", &emx_txd); 317 TUNABLE_INT("hw.emx.txr", &emx_txr); 318 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 319 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 320 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 321 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 322 323 /* Global used in WOL setup with multiport cards */ 324 static int emx_global_quad_port_a = 0; 325 326 /* Set this to one to display debug statistics */ 327 static int emx_display_debug_stats = 0; 328 329 #if !defined(KTR_IF_EMX) 330 #define KTR_IF_EMX KTR_ALL 331 #endif 332 KTR_INFO_MASTER(if_emx); 333 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 334 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 335 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 336 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 337 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 338 #define logif(name) KTR_LOG(if_emx_ ## name) 339 340 static __inline void 341 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 342 { 343 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 344 /* DD bit must be cleared */ 345 rxd->rxd_staterr = 0; 346 } 347 348 static __inline void 349 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 350 { 351 /* Ignore Checksum bit is set */ 352 if (staterr & E1000_RXD_STAT_IXSM) 353 return; 354 355 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 356 E1000_RXD_STAT_IPCS) 357 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 358 359 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 360 E1000_RXD_STAT_TCPCS) { 361 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 362 CSUM_PSEUDO_HDR | 363 CSUM_FRAG_NOT_CHECKED; 364 mp->m_pkthdr.csum_data = htons(0xffff); 365 } 366 } 367 368 static __inline struct pktinfo * 369 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 370 uint32_t mrq, uint32_t hash, uint32_t staterr) 371 { 372 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 373 case EMX_RXDMRQ_IPV4_TCP: 374 pi->pi_netisr = NETISR_IP; 375 pi->pi_flags = 0; 376 pi->pi_l3proto = IPPROTO_TCP; 377 break; 378 379 case EMX_RXDMRQ_IPV6_TCP: 380 pi->pi_netisr = NETISR_IPV6; 381 pi->pi_flags = 0; 382 pi->pi_l3proto = IPPROTO_TCP; 383 break; 384 385 case EMX_RXDMRQ_IPV4: 386 if (staterr & E1000_RXD_STAT_IXSM) 387 return NULL; 388 389 if ((staterr & 390 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 391 E1000_RXD_STAT_TCPCS) { 392 pi->pi_netisr = NETISR_IP; 393 pi->pi_flags = 0; 394 pi->pi_l3proto = IPPROTO_UDP; 395 break; 396 } 397 /* FALL THROUGH */ 398 default: 399 return NULL; 400 } 401 402 m->m_flags |= M_HASH; 403 m->m_pkthdr.hash = toeplitz_hash(hash); 404 return pi; 405 } 406 407 static int 408 emx_probe(device_t dev) 409 { 410 const struct emx_device *d; 411 uint16_t vid, did; 412 413 vid = pci_get_vendor(dev); 414 did = pci_get_device(dev); 415 416 for (d = emx_devices; d->desc != NULL; ++d) { 417 if (vid == d->vid && did == d->did) { 418 device_set_desc(dev, d->desc); 419 device_set_async_attach(dev, TRUE); 420 return 0; 421 } 422 } 423 return ENXIO; 424 } 425 426 static int 427 emx_attach(device_t dev) 428 { 429 struct emx_softc *sc = device_get_softc(dev); 430 int error = 0, i, throttle, msi_enable, tx_ring_max; 431 u_int intr_flags; 432 uint16_t eeprom_data, device_id, apme_mask; 433 driver_intr_t *intr_func; 434 #ifdef IFPOLL_ENABLE 435 int offset, offset_def; 436 #endif 437 438 /* 439 * Setup RX rings 440 */ 441 for (i = 0; i < EMX_NRX_RING; ++i) { 442 sc->rx_data[i].sc = sc; 443 sc->rx_data[i].idx = i; 444 } 445 446 /* 447 * Setup TX ring 448 */ 449 for (i = 0; i < EMX_NTX_RING; ++i) { 450 sc->tx_data[i].sc = sc; 451 sc->tx_data[i].idx = i; 452 } 453 454 /* 455 * Initialize serializers 456 */ 457 lwkt_serialize_init(&sc->main_serialize); 458 for (i = 0; i < EMX_NTX_RING; ++i) 459 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 460 for (i = 0; i < EMX_NRX_RING; ++i) 461 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 462 463 /* 464 * Initialize serializer array 465 */ 466 i = 0; 467 468 KKASSERT(i < EMX_NSERIALIZE); 469 sc->serializes[i++] = &sc->main_serialize; 470 471 KKASSERT(i < EMX_NSERIALIZE); 472 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 473 KKASSERT(i < EMX_NSERIALIZE); 474 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 475 476 KKASSERT(i < EMX_NSERIALIZE); 477 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 478 KKASSERT(i < EMX_NSERIALIZE); 479 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 480 481 KKASSERT(i == EMX_NSERIALIZE); 482 483 ifmedia_init(&sc->media, IFM_IMASK, emx_media_change, emx_media_status); 484 callout_init_mp(&sc->timer); 485 486 sc->dev = sc->osdep.dev = dev; 487 488 /* 489 * Determine hardware and mac type 490 */ 491 sc->hw.vendor_id = pci_get_vendor(dev); 492 sc->hw.device_id = pci_get_device(dev); 493 sc->hw.revision_id = pci_get_revid(dev); 494 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 495 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 496 497 if (e1000_set_mac_type(&sc->hw)) 498 return ENXIO; 499 500 /* Enable bus mastering */ 501 pci_enable_busmaster(dev); 502 503 /* 504 * Allocate IO memory 505 */ 506 sc->memory_rid = EMX_BAR_MEM; 507 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 508 &sc->memory_rid, RF_ACTIVE); 509 if (sc->memory == NULL) { 510 device_printf(dev, "Unable to allocate bus resource: memory\n"); 511 error = ENXIO; 512 goto fail; 513 } 514 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 515 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 516 517 /* XXX This is quite goofy, it is not actually used */ 518 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 519 520 /* 521 * Don't enable MSI-X on 82574, see: 522 * 82574 specification update errata #15 523 * 524 * Don't enable MSI on 82571/82572, see: 525 * 82571/82572 specification update errata #63 526 */ 527 msi_enable = emx_msi_enable; 528 if (msi_enable && 529 (sc->hw.mac.type == e1000_82571 || 530 sc->hw.mac.type == e1000_82572)) 531 msi_enable = 0; 532 533 /* 534 * Allocate interrupt 535 */ 536 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 537 &sc->intr_rid, &intr_flags); 538 539 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 540 int unshared; 541 542 unshared = device_getenv_int(dev, "irq.unshared", 0); 543 if (!unshared) { 544 sc->flags |= EMX_FLAG_SHARED_INTR; 545 if (bootverbose) 546 device_printf(dev, "IRQ shared\n"); 547 } else { 548 intr_flags &= ~RF_SHAREABLE; 549 if (bootverbose) 550 device_printf(dev, "IRQ unshared\n"); 551 } 552 } 553 554 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 555 intr_flags); 556 if (sc->intr_res == NULL) { 557 device_printf(dev, "Unable to allocate bus resource: " 558 "interrupt\n"); 559 error = ENXIO; 560 goto fail; 561 } 562 563 /* Save PCI command register for Shared Code */ 564 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 565 sc->hw.back = &sc->osdep; 566 567 /* 568 * For I217/I218, we need to map the flash memory and this 569 * must happen after the MAC is identified. 570 */ 571 if (sc->hw.mac.type == e1000_pch_lpt) { 572 sc->flash_rid = EMX_BAR_FLASH; 573 574 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 575 &sc->flash_rid, RF_ACTIVE); 576 if (sc->flash == NULL) { 577 device_printf(dev, "Mapping of Flash failed\n"); 578 error = ENXIO; 579 goto fail; 580 } 581 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 582 sc->osdep.flash_bus_space_handle = 583 rman_get_bushandle(sc->flash); 584 585 /* 586 * This is used in the shared code 587 * XXX this goof is actually not used. 588 */ 589 sc->hw.flash_address = (uint8_t *)sc->flash; 590 } 591 592 /* Do Shared Code initialization */ 593 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 594 device_printf(dev, "Setup of Shared code failed\n"); 595 error = ENXIO; 596 goto fail; 597 } 598 e1000_get_bus_info(&sc->hw); 599 600 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 601 sc->hw.phy.autoneg_wait_to_complete = FALSE; 602 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 603 604 /* 605 * Interrupt throttle rate 606 */ 607 throttle = device_getenv_int(dev, "int_throttle_ceil", 608 emx_int_throttle_ceil); 609 if (throttle == 0) { 610 sc->int_throttle_ceil = 0; 611 } else { 612 if (throttle < 0) 613 throttle = EMX_DEFAULT_ITR; 614 615 /* Recalculate the tunable value to get the exact frequency. */ 616 throttle = 1000000000 / 256 / throttle; 617 618 /* Upper 16bits of ITR is reserved and should be zero */ 619 if (throttle & 0xffff0000) 620 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 621 622 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 623 } 624 625 e1000_init_script_state_82541(&sc->hw, TRUE); 626 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 627 628 /* Copper options */ 629 if (sc->hw.phy.media_type == e1000_media_type_copper) { 630 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 631 sc->hw.phy.disable_polarity_correction = FALSE; 632 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 633 } 634 635 /* Set the frame limits assuming standard ethernet sized frames. */ 636 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 637 638 /* This controls when hardware reports transmit completion status. */ 639 sc->hw.mac.report_tx_early = 1; 640 641 /* Calculate # of RX rings */ 642 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 643 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 644 645 /* 646 * Calculate # of TX rings 647 * 648 * XXX 649 * I217/I218 claims to have 2 TX queues 650 * 651 * NOTE: 652 * Don't enable multiple TX queues on 82574; it always gives 653 * watchdog timeout on TX queue0, when multiple TCP streams are 654 * received. It was originally suspected that the hardware TX 655 * checksum offloading caused this watchdog timeout, since only 656 * TCP ACKs are sent during TCP receiving tests. However, even 657 * if the hardware TX checksum offloading is disable, TX queue0 658 * still will give watchdog. 659 */ 660 tx_ring_max = 1; 661 if (sc->hw.mac.type == e1000_82571 || 662 sc->hw.mac.type == e1000_82572 || 663 sc->hw.mac.type == e1000_80003es2lan) 664 tx_ring_max = EMX_NTX_RING; 665 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 666 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 667 668 /* Allocate RX/TX rings' busdma(9) stuffs */ 669 error = emx_dma_alloc(sc); 670 if (error) 671 goto fail; 672 673 /* Allocate multicast array memory. */ 674 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 675 M_DEVBUF, M_WAITOK); 676 677 /* Indicate SOL/IDER usage */ 678 if (e1000_check_reset_block(&sc->hw)) { 679 device_printf(dev, 680 "PHY reset is blocked due to SOL/IDER session.\n"); 681 } 682 683 /* Disable EEE on I217/I218 */ 684 sc->hw.dev_spec.ich8lan.eee_disable = 1; 685 686 /* 687 * Start from a known state, this is important in reading the 688 * nvm and mac from that. 689 */ 690 e1000_reset_hw(&sc->hw); 691 692 /* Make sure we have a good EEPROM before we read from it */ 693 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 694 /* 695 * Some PCI-E parts fail the first check due to 696 * the link being in sleep state, call it again, 697 * if it fails a second time its a real issue. 698 */ 699 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 700 device_printf(dev, 701 "The EEPROM Checksum Is Not Valid\n"); 702 error = EIO; 703 goto fail; 704 } 705 } 706 707 /* Copy the permanent MAC address out of the EEPROM */ 708 if (e1000_read_mac_addr(&sc->hw) < 0) { 709 device_printf(dev, "EEPROM read error while reading MAC" 710 " address\n"); 711 error = EIO; 712 goto fail; 713 } 714 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 715 device_printf(dev, "Invalid MAC address\n"); 716 error = EIO; 717 goto fail; 718 } 719 720 /* Disable ULP support */ 721 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 722 723 /* Determine if we have to control management hardware */ 724 if (e1000_enable_mng_pass_thru(&sc->hw)) 725 sc->flags |= EMX_FLAG_HAS_MGMT; 726 727 /* 728 * Setup Wake-on-Lan 729 */ 730 apme_mask = EMX_EEPROM_APME; 731 eeprom_data = 0; 732 switch (sc->hw.mac.type) { 733 case e1000_82573: 734 sc->flags |= EMX_FLAG_HAS_AMT; 735 /* FALL THROUGH */ 736 737 case e1000_82571: 738 case e1000_82572: 739 case e1000_80003es2lan: 740 if (sc->hw.bus.func == 1) { 741 e1000_read_nvm(&sc->hw, 742 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 743 } else { 744 e1000_read_nvm(&sc->hw, 745 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 746 } 747 break; 748 749 default: 750 e1000_read_nvm(&sc->hw, 751 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 752 break; 753 } 754 if (eeprom_data & apme_mask) 755 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 756 757 /* 758 * We have the eeprom settings, now apply the special cases 759 * where the eeprom may be wrong or the board won't support 760 * wake on lan on a particular port 761 */ 762 device_id = pci_get_device(dev); 763 switch (device_id) { 764 case E1000_DEV_ID_82571EB_FIBER: 765 /* 766 * Wake events only supported on port A for dual fiber 767 * regardless of eeprom setting 768 */ 769 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 770 E1000_STATUS_FUNC_1) 771 sc->wol = 0; 772 break; 773 774 case E1000_DEV_ID_82571EB_QUAD_COPPER: 775 case E1000_DEV_ID_82571EB_QUAD_FIBER: 776 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 777 /* if quad port sc, disable WoL on all but port A */ 778 if (emx_global_quad_port_a != 0) 779 sc->wol = 0; 780 /* Reset for multiple quad port adapters */ 781 if (++emx_global_quad_port_a == 4) 782 emx_global_quad_port_a = 0; 783 break; 784 } 785 786 /* XXX disable wol */ 787 sc->wol = 0; 788 789 #ifdef IFPOLL_ENABLE 790 /* 791 * NPOLLING RX CPU offset 792 */ 793 if (sc->rx_ring_cnt == ncpus2) { 794 offset = 0; 795 } else { 796 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 797 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 798 if (offset >= ncpus2 || 799 offset % sc->rx_ring_cnt != 0) { 800 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 801 offset, offset_def); 802 offset = offset_def; 803 } 804 } 805 sc->rx_npoll_off = offset; 806 807 /* 808 * NPOLLING TX CPU offset 809 */ 810 if (sc->tx_ring_cnt == ncpus2) { 811 offset = 0; 812 } else { 813 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 814 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 815 if (offset >= ncpus2 || 816 offset % sc->tx_ring_cnt != 0) { 817 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 818 offset, offset_def); 819 offset = offset_def; 820 } 821 } 822 sc->tx_npoll_off = offset; 823 #endif 824 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 825 826 /* Setup OS specific network interface */ 827 emx_setup_ifp(sc); 828 829 /* Add sysctl tree, must after em_setup_ifp() */ 830 emx_add_sysctl(sc); 831 832 /* Reset the hardware */ 833 error = emx_reset(sc); 834 if (error) { 835 /* 836 * Some 82573 parts fail the first reset, call it again, 837 * if it fails a second time its a real issue. 838 */ 839 error = emx_reset(sc); 840 if (error) { 841 device_printf(dev, "Unable to reset the hardware\n"); 842 ether_ifdetach(&sc->arpcom.ac_if); 843 goto fail; 844 } 845 } 846 847 /* Initialize statistics */ 848 emx_update_stats(sc); 849 850 sc->hw.mac.get_link_status = 1; 851 emx_update_link_status(sc); 852 853 /* Non-AMT based hardware can now take control from firmware */ 854 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 855 EMX_FLAG_HAS_MGMT) 856 emx_get_hw_control(sc); 857 858 /* 859 * Missing Interrupt Following ICR read: 860 * 861 * 82571/82572 specification update errata #76 862 * 82573 specification update errata #31 863 * 82574 specification update errata #12 864 */ 865 intr_func = emx_intr; 866 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 867 (sc->hw.mac.type == e1000_82571 || 868 sc->hw.mac.type == e1000_82572 || 869 sc->hw.mac.type == e1000_82573 || 870 sc->hw.mac.type == e1000_82574)) 871 intr_func = emx_intr_mask; 872 873 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 874 &sc->intr_tag, &sc->main_serialize); 875 if (error) { 876 device_printf(dev, "Failed to register interrupt handler"); 877 ether_ifdetach(&sc->arpcom.ac_if); 878 goto fail; 879 } 880 return (0); 881 fail: 882 emx_detach(dev); 883 return (error); 884 } 885 886 static int 887 emx_detach(device_t dev) 888 { 889 struct emx_softc *sc = device_get_softc(dev); 890 891 if (device_is_attached(dev)) { 892 struct ifnet *ifp = &sc->arpcom.ac_if; 893 894 ifnet_serialize_all(ifp); 895 896 emx_stop(sc); 897 898 e1000_phy_hw_reset(&sc->hw); 899 900 emx_rel_mgmt(sc); 901 emx_rel_hw_control(sc); 902 903 if (sc->wol) { 904 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 905 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 906 emx_enable_wol(dev); 907 } 908 909 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 910 911 ifnet_deserialize_all(ifp); 912 913 ether_ifdetach(ifp); 914 } else if (sc->memory != NULL) { 915 emx_rel_hw_control(sc); 916 } 917 918 ifmedia_removeall(&sc->media); 919 bus_generic_detach(dev); 920 921 if (sc->intr_res != NULL) { 922 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 923 sc->intr_res); 924 } 925 926 if (sc->intr_type == PCI_INTR_TYPE_MSI) 927 pci_release_msi(dev); 928 929 if (sc->memory != NULL) { 930 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 931 sc->memory); 932 } 933 934 if (sc->flash != NULL) { 935 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 936 sc->flash); 937 } 938 939 emx_dma_free(sc); 940 941 if (sc->mta != NULL) 942 kfree(sc->mta, M_DEVBUF); 943 944 return (0); 945 } 946 947 static int 948 emx_shutdown(device_t dev) 949 { 950 return emx_suspend(dev); 951 } 952 953 static int 954 emx_suspend(device_t dev) 955 { 956 struct emx_softc *sc = device_get_softc(dev); 957 struct ifnet *ifp = &sc->arpcom.ac_if; 958 959 ifnet_serialize_all(ifp); 960 961 emx_stop(sc); 962 963 emx_rel_mgmt(sc); 964 emx_rel_hw_control(sc); 965 966 if (sc->wol) { 967 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 968 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 969 emx_enable_wol(dev); 970 } 971 972 ifnet_deserialize_all(ifp); 973 974 return bus_generic_suspend(dev); 975 } 976 977 static int 978 emx_resume(device_t dev) 979 { 980 struct emx_softc *sc = device_get_softc(dev); 981 struct ifnet *ifp = &sc->arpcom.ac_if; 982 int i; 983 984 ifnet_serialize_all(ifp); 985 986 emx_init(sc); 987 emx_get_mgmt(sc); 988 for (i = 0; i < sc->tx_ring_inuse; ++i) 989 ifsq_devstart_sched(sc->tx_data[i].ifsq); 990 991 ifnet_deserialize_all(ifp); 992 993 return bus_generic_resume(dev); 994 } 995 996 static void 997 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 998 { 999 struct emx_softc *sc = ifp->if_softc; 1000 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1001 struct mbuf *m_head; 1002 int idx = -1, nsegs = 0; 1003 1004 KKASSERT(tdata->ifsq == ifsq); 1005 ASSERT_SERIALIZED(&tdata->tx_serialize); 1006 1007 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1008 return; 1009 1010 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1011 ifsq_purge(ifsq); 1012 return; 1013 } 1014 1015 while (!ifsq_is_empty(ifsq)) { 1016 /* Now do we at least have a minimal? */ 1017 if (EMX_IS_OACTIVE(tdata)) { 1018 emx_tx_collect(tdata); 1019 if (EMX_IS_OACTIVE(tdata)) { 1020 ifsq_set_oactive(ifsq); 1021 break; 1022 } 1023 } 1024 1025 logif(pkt_txqueue); 1026 m_head = ifsq_dequeue(ifsq); 1027 if (m_head == NULL) 1028 break; 1029 1030 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1031 IFNET_STAT_INC(ifp, oerrors, 1); 1032 emx_tx_collect(tdata); 1033 continue; 1034 } 1035 1036 if (nsegs >= tdata->tx_wreg_nsegs) { 1037 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1038 nsegs = 0; 1039 idx = -1; 1040 } 1041 1042 /* Send a copy of the frame to the BPF listener */ 1043 ETHER_BPF_MTAP(ifp, m_head); 1044 1045 /* Set timeout in case hardware has problems transmitting. */ 1046 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1047 } 1048 if (idx >= 0) 1049 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1050 } 1051 1052 static int 1053 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1054 { 1055 struct emx_softc *sc = ifp->if_softc; 1056 struct ifreq *ifr = (struct ifreq *)data; 1057 uint16_t eeprom_data = 0; 1058 int max_frame_size, mask, reinit; 1059 int error = 0; 1060 1061 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1062 1063 switch (command) { 1064 case SIOCSIFMTU: 1065 switch (sc->hw.mac.type) { 1066 case e1000_82573: 1067 /* 1068 * 82573 only supports jumbo frames 1069 * if ASPM is disabled. 1070 */ 1071 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1072 &eeprom_data); 1073 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1074 max_frame_size = ETHER_MAX_LEN; 1075 break; 1076 } 1077 /* FALL THROUGH */ 1078 1079 /* Limit Jumbo Frame size */ 1080 case e1000_82571: 1081 case e1000_82572: 1082 case e1000_82574: 1083 case e1000_pch_lpt: 1084 case e1000_80003es2lan: 1085 max_frame_size = 9234; 1086 break; 1087 1088 default: 1089 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1090 break; 1091 } 1092 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1093 ETHER_CRC_LEN) { 1094 error = EINVAL; 1095 break; 1096 } 1097 1098 ifp->if_mtu = ifr->ifr_mtu; 1099 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1100 ETHER_CRC_LEN; 1101 1102 if (ifp->if_flags & IFF_RUNNING) 1103 emx_init(sc); 1104 break; 1105 1106 case SIOCSIFFLAGS: 1107 if (ifp->if_flags & IFF_UP) { 1108 if ((ifp->if_flags & IFF_RUNNING)) { 1109 if ((ifp->if_flags ^ sc->if_flags) & 1110 (IFF_PROMISC | IFF_ALLMULTI)) { 1111 emx_disable_promisc(sc); 1112 emx_set_promisc(sc); 1113 } 1114 } else { 1115 emx_init(sc); 1116 } 1117 } else if (ifp->if_flags & IFF_RUNNING) { 1118 emx_stop(sc); 1119 } 1120 sc->if_flags = ifp->if_flags; 1121 break; 1122 1123 case SIOCADDMULTI: 1124 case SIOCDELMULTI: 1125 if (ifp->if_flags & IFF_RUNNING) { 1126 emx_disable_intr(sc); 1127 emx_set_multi(sc); 1128 #ifdef IFPOLL_ENABLE 1129 if (!(ifp->if_flags & IFF_NPOLLING)) 1130 #endif 1131 emx_enable_intr(sc); 1132 } 1133 break; 1134 1135 case SIOCSIFMEDIA: 1136 /* Check SOL/IDER usage */ 1137 if (e1000_check_reset_block(&sc->hw)) { 1138 device_printf(sc->dev, "Media change is" 1139 " blocked due to SOL/IDER session.\n"); 1140 break; 1141 } 1142 /* FALL THROUGH */ 1143 1144 case SIOCGIFMEDIA: 1145 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1146 break; 1147 1148 case SIOCSIFCAP: 1149 reinit = 0; 1150 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1151 if (mask & IFCAP_RXCSUM) { 1152 ifp->if_capenable ^= IFCAP_RXCSUM; 1153 reinit = 1; 1154 } 1155 if (mask & IFCAP_VLAN_HWTAGGING) { 1156 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1157 reinit = 1; 1158 } 1159 if (mask & IFCAP_TXCSUM) { 1160 ifp->if_capenable ^= IFCAP_TXCSUM; 1161 if (ifp->if_capenable & IFCAP_TXCSUM) 1162 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1163 else 1164 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1165 } 1166 if (mask & IFCAP_TSO) { 1167 ifp->if_capenable ^= IFCAP_TSO; 1168 if (ifp->if_capenable & IFCAP_TSO) 1169 ifp->if_hwassist |= CSUM_TSO; 1170 else 1171 ifp->if_hwassist &= ~CSUM_TSO; 1172 } 1173 if (mask & IFCAP_RSS) 1174 ifp->if_capenable ^= IFCAP_RSS; 1175 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1176 emx_init(sc); 1177 break; 1178 1179 default: 1180 error = ether_ioctl(ifp, command, data); 1181 break; 1182 } 1183 return (error); 1184 } 1185 1186 static void 1187 emx_watchdog(struct ifaltq_subque *ifsq) 1188 { 1189 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1190 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1191 struct emx_softc *sc = ifp->if_softc; 1192 int i; 1193 1194 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1195 1196 /* 1197 * The timer is set to 5 every time start queues a packet. 1198 * Then txeof keeps resetting it as long as it cleans at 1199 * least one descriptor. 1200 * Finally, anytime all descriptors are clean the timer is 1201 * set to 0. 1202 */ 1203 1204 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1205 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1206 /* 1207 * If we reach here, all TX jobs are completed and 1208 * the TX engine should have been idled for some time. 1209 * We don't need to call ifsq_devstart_sched() here. 1210 */ 1211 ifsq_clr_oactive(ifsq); 1212 tdata->tx_watchdog.wd_timer = 0; 1213 return; 1214 } 1215 1216 /* 1217 * If we are in this routine because of pause frames, then 1218 * don't reset the hardware. 1219 */ 1220 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1221 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1222 return; 1223 } 1224 1225 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1226 1227 IFNET_STAT_INC(ifp, oerrors, 1); 1228 1229 emx_init(sc); 1230 for (i = 0; i < sc->tx_ring_inuse; ++i) 1231 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1232 } 1233 1234 static void 1235 emx_init(void *xsc) 1236 { 1237 struct emx_softc *sc = xsc; 1238 struct ifnet *ifp = &sc->arpcom.ac_if; 1239 device_t dev = sc->dev; 1240 boolean_t polling; 1241 int i; 1242 1243 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1244 1245 emx_stop(sc); 1246 1247 /* Get the latest mac address, User can use a LAA */ 1248 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1249 1250 /* Put the address into the Receive Address Array */ 1251 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1252 1253 /* 1254 * With the 82571 sc, RAR[0] may be overwritten 1255 * when the other port is reset, we make a duplicate 1256 * in RAR[14] for that eventuality, this assures 1257 * the interface continues to function. 1258 */ 1259 if (sc->hw.mac.type == e1000_82571) { 1260 e1000_set_laa_state_82571(&sc->hw, TRUE); 1261 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1262 E1000_RAR_ENTRIES - 1); 1263 } 1264 1265 /* Initialize the hardware */ 1266 if (emx_reset(sc)) { 1267 device_printf(dev, "Unable to reset the hardware\n"); 1268 /* XXX emx_stop()? */ 1269 return; 1270 } 1271 emx_update_link_status(sc); 1272 1273 /* Setup VLAN support, basic and offload if available */ 1274 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1275 1276 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1277 uint32_t ctrl; 1278 1279 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1280 ctrl |= E1000_CTRL_VME; 1281 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1282 } 1283 1284 /* Configure for OS presence */ 1285 emx_get_mgmt(sc); 1286 1287 polling = FALSE; 1288 #ifdef IFPOLL_ENABLE 1289 if (ifp->if_flags & IFF_NPOLLING) 1290 polling = TRUE; 1291 #endif 1292 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1293 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1294 1295 /* Prepare transmit descriptors and buffers */ 1296 for (i = 0; i < sc->tx_ring_inuse; ++i) 1297 emx_init_tx_ring(&sc->tx_data[i]); 1298 emx_init_tx_unit(sc); 1299 1300 /* Setup Multicast table */ 1301 emx_set_multi(sc); 1302 1303 /* Prepare receive descriptors and buffers */ 1304 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1305 if (emx_init_rx_ring(&sc->rx_data[i])) { 1306 device_printf(dev, 1307 "Could not setup receive structures\n"); 1308 emx_stop(sc); 1309 return; 1310 } 1311 } 1312 emx_init_rx_unit(sc); 1313 1314 /* Don't lose promiscuous settings */ 1315 emx_set_promisc(sc); 1316 1317 ifp->if_flags |= IFF_RUNNING; 1318 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1319 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1320 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1321 } 1322 1323 callout_reset(&sc->timer, hz, emx_timer, sc); 1324 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1325 1326 /* MSI/X configuration for 82574 */ 1327 if (sc->hw.mac.type == e1000_82574) { 1328 int tmp; 1329 1330 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1331 tmp |= E1000_CTRL_EXT_PBA_CLR; 1332 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1333 /* 1334 * XXX MSIX 1335 * Set the IVAR - interrupt vector routing. 1336 * Each nibble represents a vector, high bit 1337 * is enable, other 3 bits are the MSIX table 1338 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1339 * Link (other) to 2, hence the magic number. 1340 */ 1341 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1342 } 1343 1344 /* 1345 * Only enable interrupts if we are not polling, make sure 1346 * they are off otherwise. 1347 */ 1348 if (polling) 1349 emx_disable_intr(sc); 1350 else 1351 emx_enable_intr(sc); 1352 1353 /* AMT based hardware can now take control from firmware */ 1354 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1355 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1356 emx_get_hw_control(sc); 1357 } 1358 1359 static void 1360 emx_intr(void *xsc) 1361 { 1362 emx_intr_body(xsc, TRUE); 1363 } 1364 1365 static void 1366 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1367 { 1368 struct ifnet *ifp = &sc->arpcom.ac_if; 1369 uint32_t reg_icr; 1370 1371 logif(intr_beg); 1372 ASSERT_SERIALIZED(&sc->main_serialize); 1373 1374 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1375 1376 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1377 logif(intr_end); 1378 return; 1379 } 1380 1381 /* 1382 * XXX: some laptops trigger several spurious interrupts 1383 * on emx(4) when in the resume cycle. The ICR register 1384 * reports all-ones value in this case. Processing such 1385 * interrupts would lead to a freeze. I don't know why. 1386 */ 1387 if (reg_icr == 0xffffffff) { 1388 logif(intr_end); 1389 return; 1390 } 1391 1392 if (ifp->if_flags & IFF_RUNNING) { 1393 if (reg_icr & 1394 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1395 int i; 1396 1397 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1398 lwkt_serialize_enter( 1399 &sc->rx_data[i].rx_serialize); 1400 emx_rxeof(&sc->rx_data[i], -1); 1401 lwkt_serialize_exit( 1402 &sc->rx_data[i].rx_serialize); 1403 } 1404 } 1405 if (reg_icr & E1000_ICR_TXDW) { 1406 struct emx_txdata *tdata = &sc->tx_data[0]; 1407 1408 lwkt_serialize_enter(&tdata->tx_serialize); 1409 emx_txeof(tdata); 1410 if (!ifsq_is_empty(tdata->ifsq)) 1411 ifsq_devstart(tdata->ifsq); 1412 lwkt_serialize_exit(&tdata->tx_serialize); 1413 } 1414 } 1415 1416 /* Link status change */ 1417 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1418 emx_serialize_skipmain(sc); 1419 1420 callout_stop(&sc->timer); 1421 sc->hw.mac.get_link_status = 1; 1422 emx_update_link_status(sc); 1423 1424 /* Deal with TX cruft when link lost */ 1425 emx_tx_purge(sc); 1426 1427 callout_reset(&sc->timer, hz, emx_timer, sc); 1428 1429 emx_deserialize_skipmain(sc); 1430 } 1431 1432 if (reg_icr & E1000_ICR_RXO) 1433 sc->rx_overruns++; 1434 1435 logif(intr_end); 1436 } 1437 1438 static void 1439 emx_intr_mask(void *xsc) 1440 { 1441 struct emx_softc *sc = xsc; 1442 1443 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1444 /* 1445 * NOTE: 1446 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1447 * so don't check it. 1448 */ 1449 emx_intr_body(sc, FALSE); 1450 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1451 } 1452 1453 static void 1454 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1455 { 1456 struct emx_softc *sc = ifp->if_softc; 1457 1458 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1459 1460 emx_update_link_status(sc); 1461 1462 ifmr->ifm_status = IFM_AVALID; 1463 ifmr->ifm_active = IFM_ETHER; 1464 1465 if (!sc->link_active) 1466 return; 1467 1468 ifmr->ifm_status |= IFM_ACTIVE; 1469 1470 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1471 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1472 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1473 } else { 1474 switch (sc->link_speed) { 1475 case 10: 1476 ifmr->ifm_active |= IFM_10_T; 1477 break; 1478 case 100: 1479 ifmr->ifm_active |= IFM_100_TX; 1480 break; 1481 1482 case 1000: 1483 ifmr->ifm_active |= IFM_1000_T; 1484 break; 1485 } 1486 if (sc->link_duplex == FULL_DUPLEX) 1487 ifmr->ifm_active |= IFM_FDX; 1488 else 1489 ifmr->ifm_active |= IFM_HDX; 1490 } 1491 } 1492 1493 static int 1494 emx_media_change(struct ifnet *ifp) 1495 { 1496 struct emx_softc *sc = ifp->if_softc; 1497 struct ifmedia *ifm = &sc->media; 1498 1499 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1500 1501 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1502 return (EINVAL); 1503 1504 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1505 case IFM_AUTO: 1506 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1507 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1508 break; 1509 1510 case IFM_1000_LX: 1511 case IFM_1000_SX: 1512 case IFM_1000_T: 1513 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1514 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1515 break; 1516 1517 case IFM_100_TX: 1518 sc->hw.mac.autoneg = FALSE; 1519 sc->hw.phy.autoneg_advertised = 0; 1520 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1521 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1522 else 1523 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1524 break; 1525 1526 case IFM_10_T: 1527 sc->hw.mac.autoneg = FALSE; 1528 sc->hw.phy.autoneg_advertised = 0; 1529 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1530 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1531 else 1532 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1533 break; 1534 1535 default: 1536 if_printf(ifp, "Unsupported media type\n"); 1537 break; 1538 } 1539 1540 emx_init(sc); 1541 1542 return (0); 1543 } 1544 1545 static int 1546 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1547 int *segs_used, int *idx) 1548 { 1549 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1550 bus_dmamap_t map; 1551 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1552 struct e1000_tx_desc *ctxd = NULL; 1553 struct mbuf *m_head = *m_headp; 1554 uint32_t txd_upper, txd_lower, cmd = 0; 1555 int maxsegs, nsegs, i, j, first, last = 0, error; 1556 1557 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1558 error = emx_tso_pullup(tdata, m_headp); 1559 if (error) 1560 return error; 1561 m_head = *m_headp; 1562 } 1563 1564 txd_upper = txd_lower = 0; 1565 1566 /* 1567 * Capture the first descriptor index, this descriptor 1568 * will have the index of the EOP which is the only one 1569 * that now gets a DONE bit writeback. 1570 */ 1571 first = tdata->next_avail_tx_desc; 1572 tx_buffer = &tdata->tx_buf[first]; 1573 tx_buffer_mapped = tx_buffer; 1574 map = tx_buffer->map; 1575 1576 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1577 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1578 if (maxsegs > EMX_MAX_SCATTER) 1579 maxsegs = EMX_MAX_SCATTER; 1580 1581 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1582 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1583 if (error) { 1584 m_freem(*m_headp); 1585 *m_headp = NULL; 1586 return error; 1587 } 1588 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1589 1590 m_head = *m_headp; 1591 tdata->tx_nsegs += nsegs; 1592 *segs_used += nsegs; 1593 1594 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1595 /* TSO will consume one TX desc */ 1596 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1597 tdata->tx_nsegs += i; 1598 *segs_used += i; 1599 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1600 /* TX csum offloading will consume one TX desc */ 1601 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1602 tdata->tx_nsegs += i; 1603 *segs_used += i; 1604 } 1605 1606 /* Handle VLAN tag */ 1607 if (m_head->m_flags & M_VLANTAG) { 1608 /* Set the vlan id. */ 1609 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1610 /* Tell hardware to add tag */ 1611 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1612 } 1613 1614 i = tdata->next_avail_tx_desc; 1615 1616 /* Set up our transmit descriptors */ 1617 for (j = 0; j < nsegs; j++) { 1618 tx_buffer = &tdata->tx_buf[i]; 1619 ctxd = &tdata->tx_desc_base[i]; 1620 1621 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1622 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1623 txd_lower | segs[j].ds_len); 1624 ctxd->upper.data = htole32(txd_upper); 1625 1626 last = i; 1627 if (++i == tdata->num_tx_desc) 1628 i = 0; 1629 } 1630 1631 tdata->next_avail_tx_desc = i; 1632 1633 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1634 tdata->num_tx_desc_avail -= nsegs; 1635 1636 tx_buffer->m_head = m_head; 1637 tx_buffer_mapped->map = tx_buffer->map; 1638 tx_buffer->map = map; 1639 1640 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1641 tdata->tx_nsegs = 0; 1642 1643 /* 1644 * Report Status (RS) is turned on 1645 * every tx_intr_nsegs descriptors. 1646 */ 1647 cmd = E1000_TXD_CMD_RS; 1648 1649 /* 1650 * Keep track of the descriptor, which will 1651 * be written back by hardware. 1652 */ 1653 tdata->tx_dd[tdata->tx_dd_tail] = last; 1654 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1655 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1656 } 1657 1658 /* 1659 * Last Descriptor of Packet needs End Of Packet (EOP) 1660 */ 1661 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1662 1663 /* 1664 * Defer TDT updating, until enough descriptors are setup 1665 */ 1666 *idx = i; 1667 1668 #ifdef EMX_TSS_DEBUG 1669 tdata->tx_pkts++; 1670 #endif 1671 1672 return (0); 1673 } 1674 1675 static void 1676 emx_set_promisc(struct emx_softc *sc) 1677 { 1678 struct ifnet *ifp = &sc->arpcom.ac_if; 1679 uint32_t reg_rctl; 1680 1681 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1682 1683 if (ifp->if_flags & IFF_PROMISC) { 1684 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1685 /* Turn this on if you want to see bad packets */ 1686 if (emx_debug_sbp) 1687 reg_rctl |= E1000_RCTL_SBP; 1688 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1689 } else if (ifp->if_flags & IFF_ALLMULTI) { 1690 reg_rctl |= E1000_RCTL_MPE; 1691 reg_rctl &= ~E1000_RCTL_UPE; 1692 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1693 } 1694 } 1695 1696 static void 1697 emx_disable_promisc(struct emx_softc *sc) 1698 { 1699 uint32_t reg_rctl; 1700 1701 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1702 1703 reg_rctl &= ~E1000_RCTL_UPE; 1704 reg_rctl &= ~E1000_RCTL_MPE; 1705 reg_rctl &= ~E1000_RCTL_SBP; 1706 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1707 } 1708 1709 static void 1710 emx_set_multi(struct emx_softc *sc) 1711 { 1712 struct ifnet *ifp = &sc->arpcom.ac_if; 1713 struct ifmultiaddr *ifma; 1714 uint32_t reg_rctl = 0; 1715 uint8_t *mta; 1716 int mcnt = 0; 1717 1718 mta = sc->mta; 1719 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1720 1721 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1722 if (ifma->ifma_addr->sa_family != AF_LINK) 1723 continue; 1724 1725 if (mcnt == EMX_MCAST_ADDR_MAX) 1726 break; 1727 1728 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1729 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1730 mcnt++; 1731 } 1732 1733 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1734 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1735 reg_rctl |= E1000_RCTL_MPE; 1736 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1737 } else { 1738 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1739 } 1740 } 1741 1742 /* 1743 * This routine checks for link status and updates statistics. 1744 */ 1745 static void 1746 emx_timer(void *xsc) 1747 { 1748 struct emx_softc *sc = xsc; 1749 struct ifnet *ifp = &sc->arpcom.ac_if; 1750 1751 lwkt_serialize_enter(&sc->main_serialize); 1752 1753 emx_update_link_status(sc); 1754 emx_update_stats(sc); 1755 1756 /* Reset LAA into RAR[0] on 82571 */ 1757 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1758 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1759 1760 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1761 emx_print_hw_stats(sc); 1762 1763 emx_smartspeed(sc); 1764 1765 callout_reset(&sc->timer, hz, emx_timer, sc); 1766 1767 lwkt_serialize_exit(&sc->main_serialize); 1768 } 1769 1770 static void 1771 emx_update_link_status(struct emx_softc *sc) 1772 { 1773 struct e1000_hw *hw = &sc->hw; 1774 struct ifnet *ifp = &sc->arpcom.ac_if; 1775 device_t dev = sc->dev; 1776 uint32_t link_check = 0; 1777 1778 /* Get the cached link value or read phy for real */ 1779 switch (hw->phy.media_type) { 1780 case e1000_media_type_copper: 1781 if (hw->mac.get_link_status) { 1782 /* Do the work to read phy */ 1783 e1000_check_for_link(hw); 1784 link_check = !hw->mac.get_link_status; 1785 if (link_check) /* ESB2 fix */ 1786 e1000_cfg_on_link_up(hw); 1787 } else { 1788 link_check = TRUE; 1789 } 1790 break; 1791 1792 case e1000_media_type_fiber: 1793 e1000_check_for_link(hw); 1794 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1795 break; 1796 1797 case e1000_media_type_internal_serdes: 1798 e1000_check_for_link(hw); 1799 link_check = sc->hw.mac.serdes_has_link; 1800 break; 1801 1802 case e1000_media_type_unknown: 1803 default: 1804 break; 1805 } 1806 1807 /* Now check for a transition */ 1808 if (link_check && sc->link_active == 0) { 1809 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1810 &sc->link_duplex); 1811 1812 /* 1813 * Check if we should enable/disable SPEED_MODE bit on 1814 * 82571EB/82572EI 1815 */ 1816 if (sc->link_speed != SPEED_1000 && 1817 (hw->mac.type == e1000_82571 || 1818 hw->mac.type == e1000_82572)) { 1819 int tarc0; 1820 1821 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1822 tarc0 &= ~EMX_TARC_SPEED_MODE; 1823 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1824 } 1825 if (bootverbose) { 1826 device_printf(dev, "Link is up %d Mbps %s\n", 1827 sc->link_speed, 1828 ((sc->link_duplex == FULL_DUPLEX) ? 1829 "Full Duplex" : "Half Duplex")); 1830 } 1831 sc->link_active = 1; 1832 sc->smartspeed = 0; 1833 ifp->if_baudrate = sc->link_speed * 1000000; 1834 ifp->if_link_state = LINK_STATE_UP; 1835 if_link_state_change(ifp); 1836 } else if (!link_check && sc->link_active == 1) { 1837 ifp->if_baudrate = sc->link_speed = 0; 1838 sc->link_duplex = 0; 1839 if (bootverbose) 1840 device_printf(dev, "Link is Down\n"); 1841 sc->link_active = 0; 1842 ifp->if_link_state = LINK_STATE_DOWN; 1843 if_link_state_change(ifp); 1844 } 1845 } 1846 1847 static void 1848 emx_stop(struct emx_softc *sc) 1849 { 1850 struct ifnet *ifp = &sc->arpcom.ac_if; 1851 int i; 1852 1853 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1854 1855 emx_disable_intr(sc); 1856 1857 callout_stop(&sc->timer); 1858 1859 ifp->if_flags &= ~IFF_RUNNING; 1860 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1861 struct emx_txdata *tdata = &sc->tx_data[i]; 1862 1863 ifsq_clr_oactive(tdata->ifsq); 1864 ifsq_watchdog_stop(&tdata->tx_watchdog); 1865 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1866 } 1867 1868 /* 1869 * Disable multiple receive queues. 1870 * 1871 * NOTE: 1872 * We should disable multiple receive queues before 1873 * resetting the hardware. 1874 */ 1875 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1876 1877 e1000_reset_hw(&sc->hw); 1878 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1879 1880 for (i = 0; i < sc->tx_ring_cnt; ++i) 1881 emx_free_tx_ring(&sc->tx_data[i]); 1882 for (i = 0; i < sc->rx_ring_cnt; ++i) 1883 emx_free_rx_ring(&sc->rx_data[i]); 1884 } 1885 1886 static int 1887 emx_reset(struct emx_softc *sc) 1888 { 1889 device_t dev = sc->dev; 1890 uint16_t rx_buffer_size; 1891 uint32_t pba; 1892 1893 /* Set up smart power down as default off on newer adapters. */ 1894 if (!emx_smart_pwr_down && 1895 (sc->hw.mac.type == e1000_82571 || 1896 sc->hw.mac.type == e1000_82572)) { 1897 uint16_t phy_tmp = 0; 1898 1899 /* Speed up time to link by disabling smart power down. */ 1900 e1000_read_phy_reg(&sc->hw, 1901 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1902 phy_tmp &= ~IGP02E1000_PM_SPD; 1903 e1000_write_phy_reg(&sc->hw, 1904 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1905 } 1906 1907 /* 1908 * Packet Buffer Allocation (PBA) 1909 * Writing PBA sets the receive portion of the buffer 1910 * the remainder is used for the transmit buffer. 1911 */ 1912 switch (sc->hw.mac.type) { 1913 /* Total Packet Buffer on these is 48K */ 1914 case e1000_82571: 1915 case e1000_82572: 1916 case e1000_80003es2lan: 1917 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1918 break; 1919 1920 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1921 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1922 break; 1923 1924 case e1000_82574: 1925 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1926 break; 1927 1928 case e1000_pch_lpt: 1929 pba = E1000_PBA_26K; 1930 break; 1931 1932 default: 1933 /* Devices before 82547 had a Packet Buffer of 64K. */ 1934 if (sc->hw.mac.max_frame_size > 8192) 1935 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1936 else 1937 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1938 } 1939 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1940 1941 /* 1942 * These parameters control the automatic generation (Tx) and 1943 * response (Rx) to Ethernet PAUSE frames. 1944 * - High water mark should allow for at least two frames to be 1945 * received after sending an XOFF. 1946 * - Low water mark works best when it is very near the high water mark. 1947 * This allows the receiver to restart by sending XON when it has 1948 * drained a bit. Here we use an arbitary value of 1500 which will 1949 * restart after one full frame is pulled from the buffer. There 1950 * could be several smaller frames in the buffer and if so they will 1951 * not trigger the XON until their total number reduces the buffer 1952 * by 1500. 1953 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1954 */ 1955 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1956 1957 sc->hw.fc.high_water = rx_buffer_size - 1958 roundup2(sc->hw.mac.max_frame_size, 1024); 1959 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1960 1961 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1962 sc->hw.fc.send_xon = TRUE; 1963 sc->hw.fc.requested_mode = e1000_fc_full; 1964 1965 /* 1966 * Device specific overrides/settings 1967 */ 1968 if (sc->hw.mac.type == e1000_pch_lpt) { 1969 sc->hw.fc.high_water = 0x5C20; 1970 sc->hw.fc.low_water = 0x5048; 1971 sc->hw.fc.pause_time = 0x0650; 1972 sc->hw.fc.refresh_time = 0x0400; 1973 /* Jumbos need adjusted PBA */ 1974 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 1975 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 1976 else 1977 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 1978 } else if (sc->hw.mac.type == e1000_80003es2lan) { 1979 sc->hw.fc.pause_time = 0xFFFF; 1980 } 1981 1982 /* Issue a global reset */ 1983 e1000_reset_hw(&sc->hw); 1984 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1985 emx_disable_aspm(sc); 1986 1987 if (e1000_init_hw(&sc->hw) < 0) { 1988 device_printf(dev, "Hardware Initialization Failed\n"); 1989 return (EIO); 1990 } 1991 1992 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1993 e1000_get_phy_info(&sc->hw); 1994 e1000_check_for_link(&sc->hw); 1995 1996 return (0); 1997 } 1998 1999 static void 2000 emx_setup_ifp(struct emx_softc *sc) 2001 { 2002 struct ifnet *ifp = &sc->arpcom.ac_if; 2003 int i; 2004 2005 if_initname(ifp, device_get_name(sc->dev), 2006 device_get_unit(sc->dev)); 2007 ifp->if_softc = sc; 2008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2009 ifp->if_init = emx_init; 2010 ifp->if_ioctl = emx_ioctl; 2011 ifp->if_start = emx_start; 2012 #ifdef IFPOLL_ENABLE 2013 ifp->if_npoll = emx_npoll; 2014 #endif 2015 ifp->if_serialize = emx_serialize; 2016 ifp->if_deserialize = emx_deserialize; 2017 ifp->if_tryserialize = emx_tryserialize; 2018 #ifdef INVARIANTS 2019 ifp->if_serialize_assert = emx_serialize_assert; 2020 #endif 2021 2022 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2023 ifq_set_ready(&ifp->if_snd); 2024 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2025 2026 ifp->if_mapsubq = ifq_mapsubq_mask; 2027 ifq_set_subq_mask(&ifp->if_snd, 0); 2028 2029 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2030 2031 ifp->if_capabilities = IFCAP_HWCSUM | 2032 IFCAP_VLAN_HWTAGGING | 2033 IFCAP_VLAN_MTU | 2034 IFCAP_TSO; 2035 if (sc->rx_ring_cnt > 1) 2036 ifp->if_capabilities |= IFCAP_RSS; 2037 ifp->if_capenable = ifp->if_capabilities; 2038 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2039 2040 /* 2041 * Tell the upper layer(s) we support long frames. 2042 */ 2043 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2044 2045 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2046 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2047 struct emx_txdata *tdata = &sc->tx_data[i]; 2048 2049 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2050 ifsq_set_priv(ifsq, tdata); 2051 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2052 tdata->ifsq = ifsq; 2053 2054 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2055 } 2056 2057 /* 2058 * Specify the media types supported by this sc and register 2059 * callbacks to update media and link information 2060 */ 2061 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2062 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2063 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2064 0, NULL); 2065 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2066 } else { 2067 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2068 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2069 0, NULL); 2070 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2071 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2072 0, NULL); 2073 if (sc->hw.phy.type != e1000_phy_ife) { 2074 ifmedia_add(&sc->media, 2075 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2076 ifmedia_add(&sc->media, 2077 IFM_ETHER | IFM_1000_T, 0, NULL); 2078 } 2079 } 2080 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2081 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 2082 } 2083 2084 /* 2085 * Workaround for SmartSpeed on 82541 and 82547 controllers 2086 */ 2087 static void 2088 emx_smartspeed(struct emx_softc *sc) 2089 { 2090 uint16_t phy_tmp; 2091 2092 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2093 sc->hw.mac.autoneg == 0 || 2094 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2095 return; 2096 2097 if (sc->smartspeed == 0) { 2098 /* 2099 * If Master/Slave config fault is asserted twice, 2100 * we assume back-to-back 2101 */ 2102 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2103 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2104 return; 2105 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2106 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2107 e1000_read_phy_reg(&sc->hw, 2108 PHY_1000T_CTRL, &phy_tmp); 2109 if (phy_tmp & CR_1000T_MS_ENABLE) { 2110 phy_tmp &= ~CR_1000T_MS_ENABLE; 2111 e1000_write_phy_reg(&sc->hw, 2112 PHY_1000T_CTRL, phy_tmp); 2113 sc->smartspeed++; 2114 if (sc->hw.mac.autoneg && 2115 !e1000_phy_setup_autoneg(&sc->hw) && 2116 !e1000_read_phy_reg(&sc->hw, 2117 PHY_CONTROL, &phy_tmp)) { 2118 phy_tmp |= MII_CR_AUTO_NEG_EN | 2119 MII_CR_RESTART_AUTO_NEG; 2120 e1000_write_phy_reg(&sc->hw, 2121 PHY_CONTROL, phy_tmp); 2122 } 2123 } 2124 } 2125 return; 2126 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2127 /* If still no link, perhaps using 2/3 pair cable */ 2128 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2129 phy_tmp |= CR_1000T_MS_ENABLE; 2130 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2131 if (sc->hw.mac.autoneg && 2132 !e1000_phy_setup_autoneg(&sc->hw) && 2133 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2134 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2135 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2136 } 2137 } 2138 2139 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2140 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2141 sc->smartspeed = 0; 2142 } 2143 2144 static int 2145 emx_create_tx_ring(struct emx_txdata *tdata) 2146 { 2147 device_t dev = tdata->sc->dev; 2148 struct emx_txbuf *tx_buffer; 2149 int error, i, tsize, ntxd; 2150 2151 /* 2152 * Validate number of transmit descriptors. It must not exceed 2153 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2154 */ 2155 ntxd = device_getenv_int(dev, "txd", emx_txd); 2156 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2157 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2158 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2159 EMX_DEFAULT_TXD, ntxd); 2160 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2161 } else { 2162 tdata->num_tx_desc = ntxd; 2163 } 2164 2165 /* 2166 * Allocate Transmit Descriptor ring 2167 */ 2168 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2169 EMX_DBA_ALIGN); 2170 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2171 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2172 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2173 &tdata->tx_desc_paddr); 2174 if (tdata->tx_desc_base == NULL) { 2175 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2176 return ENOMEM; 2177 } 2178 2179 tsize = __VM_CACHELINE_ALIGN( 2180 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2181 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2182 2183 /* 2184 * Create DMA tags for tx buffers 2185 */ 2186 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2187 1, 0, /* alignment, bounds */ 2188 BUS_SPACE_MAXADDR, /* lowaddr */ 2189 BUS_SPACE_MAXADDR, /* highaddr */ 2190 NULL, NULL, /* filter, filterarg */ 2191 EMX_TSO_SIZE, /* maxsize */ 2192 EMX_MAX_SCATTER, /* nsegments */ 2193 EMX_MAX_SEGSIZE, /* maxsegsize */ 2194 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2195 BUS_DMA_ONEBPAGE, /* flags */ 2196 &tdata->txtag); 2197 if (error) { 2198 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2199 kfree(tdata->tx_buf, M_DEVBUF); 2200 tdata->tx_buf = NULL; 2201 return error; 2202 } 2203 2204 /* 2205 * Create DMA maps for tx buffers 2206 */ 2207 for (i = 0; i < tdata->num_tx_desc; i++) { 2208 tx_buffer = &tdata->tx_buf[i]; 2209 2210 error = bus_dmamap_create(tdata->txtag, 2211 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2212 &tx_buffer->map); 2213 if (error) { 2214 device_printf(dev, "Unable to create TX DMA map\n"); 2215 emx_destroy_tx_ring(tdata, i); 2216 return error; 2217 } 2218 } 2219 2220 /* 2221 * Setup TX parameters 2222 */ 2223 tdata->spare_tx_desc = EMX_TX_SPARE; 2224 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2225 2226 /* 2227 * Keep following relationship between spare_tx_desc, oact_tx_desc 2228 * and tx_intr_nsegs: 2229 * (spare_tx_desc + EMX_TX_RESERVED) <= 2230 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2231 */ 2232 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2233 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2234 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2235 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2236 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2237 2238 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2239 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2240 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2241 2242 /* 2243 * Pullup extra 4bytes into the first data segment for TSO, see: 2244 * 82571/82572 specification update errata #7 2245 * 2246 * Same applies to I217 (and maybe I218). 2247 * 2248 * NOTE: 2249 * 4bytes instead of 2bytes, which are mentioned in the errata, 2250 * are pulled; mainly to keep rest of the data properly aligned. 2251 */ 2252 if (tdata->sc->hw.mac.type == e1000_82571 || 2253 tdata->sc->hw.mac.type == e1000_82572 || 2254 tdata->sc->hw.mac.type == e1000_pch_lpt) 2255 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2256 2257 return (0); 2258 } 2259 2260 static void 2261 emx_init_tx_ring(struct emx_txdata *tdata) 2262 { 2263 /* Clear the old ring contents */ 2264 bzero(tdata->tx_desc_base, 2265 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2266 2267 /* Reset state */ 2268 tdata->next_avail_tx_desc = 0; 2269 tdata->next_tx_to_clean = 0; 2270 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2271 2272 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2273 if (tdata->sc->tx_ring_inuse > 1) { 2274 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2275 if (bootverbose) { 2276 if_printf(&tdata->sc->arpcom.ac_if, 2277 "TX %d force ctx setup\n", tdata->idx); 2278 } 2279 } 2280 } 2281 2282 static void 2283 emx_init_tx_unit(struct emx_softc *sc) 2284 { 2285 uint32_t tctl, tarc, tipg = 0; 2286 int i; 2287 2288 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2289 struct emx_txdata *tdata = &sc->tx_data[i]; 2290 uint64_t bus_addr; 2291 2292 /* Setup the Base and Length of the Tx Descriptor Ring */ 2293 bus_addr = tdata->tx_desc_paddr; 2294 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2295 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2296 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2297 (uint32_t)(bus_addr >> 32)); 2298 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2299 (uint32_t)bus_addr); 2300 /* Setup the HW Tx Head and Tail descriptor pointers */ 2301 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2302 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2303 } 2304 2305 /* Set the default values for the Tx Inter Packet Gap timer */ 2306 switch (sc->hw.mac.type) { 2307 case e1000_80003es2lan: 2308 tipg = DEFAULT_82543_TIPG_IPGR1; 2309 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2310 E1000_TIPG_IPGR2_SHIFT; 2311 break; 2312 2313 default: 2314 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2315 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2316 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2317 else 2318 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2319 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2320 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2321 break; 2322 } 2323 2324 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2325 2326 /* NOTE: 0 is not allowed for TIDV */ 2327 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2328 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2329 2330 if (sc->hw.mac.type == e1000_82571 || 2331 sc->hw.mac.type == e1000_82572) { 2332 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2333 tarc |= EMX_TARC_SPEED_MODE; 2334 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2335 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2336 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2337 tarc |= 1; 2338 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2339 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2340 tarc |= 1; 2341 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2342 } 2343 2344 /* Program the Transmit Control Register */ 2345 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2346 tctl &= ~E1000_TCTL_CT; 2347 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2348 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2349 tctl |= E1000_TCTL_MULR; 2350 2351 /* This write will effectively turn on the transmit unit. */ 2352 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2353 2354 if (sc->hw.mac.type == e1000_82571 || 2355 sc->hw.mac.type == e1000_82572 || 2356 sc->hw.mac.type == e1000_80003es2lan) { 2357 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2358 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2359 tarc &= ~(1 << 28); 2360 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2361 } 2362 2363 if (sc->tx_ring_inuse > 1) { 2364 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2365 tarc &= ~EMX_TARC_COUNT_MASK; 2366 tarc |= 1; 2367 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2368 2369 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2370 tarc &= ~EMX_TARC_COUNT_MASK; 2371 tarc |= 1; 2372 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2373 } 2374 } 2375 2376 static void 2377 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2378 { 2379 struct emx_txbuf *tx_buffer; 2380 int i; 2381 2382 /* Free Transmit Descriptor ring */ 2383 if (tdata->tx_desc_base) { 2384 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2385 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2386 tdata->tx_desc_dmap); 2387 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2388 2389 tdata->tx_desc_base = NULL; 2390 } 2391 2392 if (tdata->tx_buf == NULL) 2393 return; 2394 2395 for (i = 0; i < ndesc; i++) { 2396 tx_buffer = &tdata->tx_buf[i]; 2397 2398 KKASSERT(tx_buffer->m_head == NULL); 2399 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2400 } 2401 bus_dma_tag_destroy(tdata->txtag); 2402 2403 kfree(tdata->tx_buf, M_DEVBUF); 2404 tdata->tx_buf = NULL; 2405 } 2406 2407 /* 2408 * The offload context needs to be set when we transfer the first 2409 * packet of a particular protocol (TCP/UDP). This routine has been 2410 * enhanced to deal with inserted VLAN headers. 2411 * 2412 * If the new packet's ether header length, ip header length and 2413 * csum offloading type are same as the previous packet, we should 2414 * avoid allocating a new csum context descriptor; mainly to take 2415 * advantage of the pipeline effect of the TX data read request. 2416 * 2417 * This function returns number of TX descrptors allocated for 2418 * csum context. 2419 */ 2420 static int 2421 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2422 uint32_t *txd_upper, uint32_t *txd_lower) 2423 { 2424 struct e1000_context_desc *TXD; 2425 int curr_txd, ehdrlen, csum_flags; 2426 uint32_t cmd, hdr_len, ip_hlen; 2427 2428 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2429 ip_hlen = mp->m_pkthdr.csum_iphlen; 2430 ehdrlen = mp->m_pkthdr.csum_lhlen; 2431 2432 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2433 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2434 tdata->csum_flags == csum_flags) { 2435 /* 2436 * Same csum offload context as the previous packets; 2437 * just return. 2438 */ 2439 *txd_upper = tdata->csum_txd_upper; 2440 *txd_lower = tdata->csum_txd_lower; 2441 return 0; 2442 } 2443 2444 /* 2445 * Setup a new csum offload context. 2446 */ 2447 2448 curr_txd = tdata->next_avail_tx_desc; 2449 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2450 2451 cmd = 0; 2452 2453 /* Setup of IP header checksum. */ 2454 if (csum_flags & CSUM_IP) { 2455 /* 2456 * Start offset for header checksum calculation. 2457 * End offset for header checksum calculation. 2458 * Offset of place to put the checksum. 2459 */ 2460 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2461 TXD->lower_setup.ip_fields.ipcse = 2462 htole16(ehdrlen + ip_hlen - 1); 2463 TXD->lower_setup.ip_fields.ipcso = 2464 ehdrlen + offsetof(struct ip, ip_sum); 2465 cmd |= E1000_TXD_CMD_IP; 2466 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2467 } 2468 hdr_len = ehdrlen + ip_hlen; 2469 2470 if (csum_flags & CSUM_TCP) { 2471 /* 2472 * Start offset for payload checksum calculation. 2473 * End offset for payload checksum calculation. 2474 * Offset of place to put the checksum. 2475 */ 2476 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2477 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2478 TXD->upper_setup.tcp_fields.tucso = 2479 hdr_len + offsetof(struct tcphdr, th_sum); 2480 cmd |= E1000_TXD_CMD_TCP; 2481 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2482 } else if (csum_flags & CSUM_UDP) { 2483 /* 2484 * Start offset for header checksum calculation. 2485 * End offset for header checksum calculation. 2486 * Offset of place to put the checksum. 2487 */ 2488 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2489 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2490 TXD->upper_setup.tcp_fields.tucso = 2491 hdr_len + offsetof(struct udphdr, uh_sum); 2492 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2493 } 2494 2495 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2496 E1000_TXD_DTYP_D; /* Data descr */ 2497 2498 /* Save the information for this csum offloading context */ 2499 tdata->csum_lhlen = ehdrlen; 2500 tdata->csum_iphlen = ip_hlen; 2501 tdata->csum_flags = csum_flags; 2502 tdata->csum_txd_upper = *txd_upper; 2503 tdata->csum_txd_lower = *txd_lower; 2504 2505 TXD->tcp_seg_setup.data = htole32(0); 2506 TXD->cmd_and_length = 2507 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2508 2509 if (++curr_txd == tdata->num_tx_desc) 2510 curr_txd = 0; 2511 2512 KKASSERT(tdata->num_tx_desc_avail > 0); 2513 tdata->num_tx_desc_avail--; 2514 2515 tdata->next_avail_tx_desc = curr_txd; 2516 return 1; 2517 } 2518 2519 static void 2520 emx_txeof(struct emx_txdata *tdata) 2521 { 2522 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 2523 struct emx_txbuf *tx_buffer; 2524 int first, num_avail; 2525 2526 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2527 return; 2528 2529 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2530 return; 2531 2532 num_avail = tdata->num_tx_desc_avail; 2533 first = tdata->next_tx_to_clean; 2534 2535 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2536 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2537 struct e1000_tx_desc *tx_desc; 2538 2539 tx_desc = &tdata->tx_desc_base[dd_idx]; 2540 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2541 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2542 2543 if (++dd_idx == tdata->num_tx_desc) 2544 dd_idx = 0; 2545 2546 while (first != dd_idx) { 2547 logif(pkt_txclean); 2548 2549 num_avail++; 2550 2551 tx_buffer = &tdata->tx_buf[first]; 2552 if (tx_buffer->m_head) { 2553 IFNET_STAT_INC(ifp, opackets, 1); 2554 bus_dmamap_unload(tdata->txtag, 2555 tx_buffer->map); 2556 m_freem(tx_buffer->m_head); 2557 tx_buffer->m_head = NULL; 2558 } 2559 2560 if (++first == tdata->num_tx_desc) 2561 first = 0; 2562 } 2563 } else { 2564 break; 2565 } 2566 } 2567 tdata->next_tx_to_clean = first; 2568 tdata->num_tx_desc_avail = num_avail; 2569 2570 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2571 tdata->tx_dd_head = 0; 2572 tdata->tx_dd_tail = 0; 2573 } 2574 2575 if (!EMX_IS_OACTIVE(tdata)) { 2576 ifsq_clr_oactive(tdata->ifsq); 2577 2578 /* All clean, turn off the timer */ 2579 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2580 tdata->tx_watchdog.wd_timer = 0; 2581 } 2582 } 2583 2584 static void 2585 emx_tx_collect(struct emx_txdata *tdata) 2586 { 2587 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 2588 struct emx_txbuf *tx_buffer; 2589 int tdh, first, num_avail, dd_idx = -1; 2590 2591 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2592 return; 2593 2594 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2595 if (tdh == tdata->next_tx_to_clean) 2596 return; 2597 2598 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2599 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2600 2601 num_avail = tdata->num_tx_desc_avail; 2602 first = tdata->next_tx_to_clean; 2603 2604 while (first != tdh) { 2605 logif(pkt_txclean); 2606 2607 num_avail++; 2608 2609 tx_buffer = &tdata->tx_buf[first]; 2610 if (tx_buffer->m_head) { 2611 IFNET_STAT_INC(ifp, opackets, 1); 2612 bus_dmamap_unload(tdata->txtag, 2613 tx_buffer->map); 2614 m_freem(tx_buffer->m_head); 2615 tx_buffer->m_head = NULL; 2616 } 2617 2618 if (first == dd_idx) { 2619 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2620 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2621 tdata->tx_dd_head = 0; 2622 tdata->tx_dd_tail = 0; 2623 dd_idx = -1; 2624 } else { 2625 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2626 } 2627 } 2628 2629 if (++first == tdata->num_tx_desc) 2630 first = 0; 2631 } 2632 tdata->next_tx_to_clean = first; 2633 tdata->num_tx_desc_avail = num_avail; 2634 2635 if (!EMX_IS_OACTIVE(tdata)) { 2636 ifsq_clr_oactive(tdata->ifsq); 2637 2638 /* All clean, turn off the timer */ 2639 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2640 tdata->tx_watchdog.wd_timer = 0; 2641 } 2642 } 2643 2644 /* 2645 * When Link is lost sometimes there is work still in the TX ring 2646 * which will result in a watchdog, rather than allow that do an 2647 * attempted cleanup and then reinit here. Note that this has been 2648 * seens mostly with fiber adapters. 2649 */ 2650 static void 2651 emx_tx_purge(struct emx_softc *sc) 2652 { 2653 int i; 2654 2655 if (sc->link_active) 2656 return; 2657 2658 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2659 struct emx_txdata *tdata = &sc->tx_data[i]; 2660 2661 if (tdata->tx_watchdog.wd_timer) { 2662 emx_tx_collect(tdata); 2663 if (tdata->tx_watchdog.wd_timer) { 2664 if_printf(&sc->arpcom.ac_if, 2665 "Link lost, TX pending, reinit\n"); 2666 emx_init(sc); 2667 return; 2668 } 2669 } 2670 } 2671 } 2672 2673 static int 2674 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2675 { 2676 struct mbuf *m; 2677 bus_dma_segment_t seg; 2678 bus_dmamap_t map; 2679 struct emx_rxbuf *rx_buffer; 2680 int error, nseg; 2681 2682 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2683 if (m == NULL) { 2684 if (init) { 2685 if_printf(&rdata->sc->arpcom.ac_if, 2686 "Unable to allocate RX mbuf\n"); 2687 } 2688 return (ENOBUFS); 2689 } 2690 m->m_len = m->m_pkthdr.len = MCLBYTES; 2691 2692 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2693 m_adj(m, ETHER_ALIGN); 2694 2695 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2696 rdata->rx_sparemap, m, 2697 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2698 if (error) { 2699 m_freem(m); 2700 if (init) { 2701 if_printf(&rdata->sc->arpcom.ac_if, 2702 "Unable to load RX mbuf\n"); 2703 } 2704 return (error); 2705 } 2706 2707 rx_buffer = &rdata->rx_buf[i]; 2708 if (rx_buffer->m_head != NULL) 2709 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2710 2711 map = rx_buffer->map; 2712 rx_buffer->map = rdata->rx_sparemap; 2713 rdata->rx_sparemap = map; 2714 2715 rx_buffer->m_head = m; 2716 rx_buffer->paddr = seg.ds_addr; 2717 2718 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2719 return (0); 2720 } 2721 2722 static int 2723 emx_create_rx_ring(struct emx_rxdata *rdata) 2724 { 2725 device_t dev = rdata->sc->dev; 2726 struct emx_rxbuf *rx_buffer; 2727 int i, error, rsize, nrxd; 2728 2729 /* 2730 * Validate number of receive descriptors. It must not exceed 2731 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2732 */ 2733 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2734 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2735 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2736 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2737 EMX_DEFAULT_RXD, nrxd); 2738 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2739 } else { 2740 rdata->num_rx_desc = nrxd; 2741 } 2742 2743 /* 2744 * Allocate Receive Descriptor ring 2745 */ 2746 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2747 EMX_DBA_ALIGN); 2748 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2749 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2750 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2751 &rdata->rx_desc_paddr); 2752 if (rdata->rx_desc == NULL) { 2753 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2754 return ENOMEM; 2755 } 2756 2757 rsize = __VM_CACHELINE_ALIGN( 2758 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2759 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2760 2761 /* 2762 * Create DMA tag for rx buffers 2763 */ 2764 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2765 1, 0, /* alignment, bounds */ 2766 BUS_SPACE_MAXADDR, /* lowaddr */ 2767 BUS_SPACE_MAXADDR, /* highaddr */ 2768 NULL, NULL, /* filter, filterarg */ 2769 MCLBYTES, /* maxsize */ 2770 1, /* nsegments */ 2771 MCLBYTES, /* maxsegsize */ 2772 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2773 &rdata->rxtag); 2774 if (error) { 2775 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2776 kfree(rdata->rx_buf, M_DEVBUF); 2777 rdata->rx_buf = NULL; 2778 return error; 2779 } 2780 2781 /* 2782 * Create spare DMA map for rx buffers 2783 */ 2784 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2785 &rdata->rx_sparemap); 2786 if (error) { 2787 device_printf(dev, "Unable to create spare RX DMA map\n"); 2788 bus_dma_tag_destroy(rdata->rxtag); 2789 kfree(rdata->rx_buf, M_DEVBUF); 2790 rdata->rx_buf = NULL; 2791 return error; 2792 } 2793 2794 /* 2795 * Create DMA maps for rx buffers 2796 */ 2797 for (i = 0; i < rdata->num_rx_desc; i++) { 2798 rx_buffer = &rdata->rx_buf[i]; 2799 2800 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2801 &rx_buffer->map); 2802 if (error) { 2803 device_printf(dev, "Unable to create RX DMA map\n"); 2804 emx_destroy_rx_ring(rdata, i); 2805 return error; 2806 } 2807 } 2808 return (0); 2809 } 2810 2811 static void 2812 emx_free_rx_ring(struct emx_rxdata *rdata) 2813 { 2814 int i; 2815 2816 for (i = 0; i < rdata->num_rx_desc; i++) { 2817 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2818 2819 if (rx_buffer->m_head != NULL) { 2820 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2821 m_freem(rx_buffer->m_head); 2822 rx_buffer->m_head = NULL; 2823 } 2824 } 2825 2826 if (rdata->fmp != NULL) 2827 m_freem(rdata->fmp); 2828 rdata->fmp = NULL; 2829 rdata->lmp = NULL; 2830 } 2831 2832 static void 2833 emx_free_tx_ring(struct emx_txdata *tdata) 2834 { 2835 int i; 2836 2837 for (i = 0; i < tdata->num_tx_desc; i++) { 2838 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2839 2840 if (tx_buffer->m_head != NULL) { 2841 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2842 m_freem(tx_buffer->m_head); 2843 tx_buffer->m_head = NULL; 2844 } 2845 } 2846 2847 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2848 2849 tdata->csum_flags = 0; 2850 tdata->csum_lhlen = 0; 2851 tdata->csum_iphlen = 0; 2852 tdata->csum_thlen = 0; 2853 tdata->csum_mss = 0; 2854 tdata->csum_pktlen = 0; 2855 2856 tdata->tx_dd_head = 0; 2857 tdata->tx_dd_tail = 0; 2858 tdata->tx_nsegs = 0; 2859 } 2860 2861 static int 2862 emx_init_rx_ring(struct emx_rxdata *rdata) 2863 { 2864 int i, error; 2865 2866 /* Reset descriptor ring */ 2867 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2868 2869 /* Allocate new ones. */ 2870 for (i = 0; i < rdata->num_rx_desc; i++) { 2871 error = emx_newbuf(rdata, i, 1); 2872 if (error) 2873 return (error); 2874 } 2875 2876 /* Setup our descriptor pointers */ 2877 rdata->next_rx_desc_to_check = 0; 2878 2879 return (0); 2880 } 2881 2882 static void 2883 emx_init_rx_unit(struct emx_softc *sc) 2884 { 2885 struct ifnet *ifp = &sc->arpcom.ac_if; 2886 uint64_t bus_addr; 2887 uint32_t rctl, itr, rfctl; 2888 int i; 2889 2890 /* 2891 * Make sure receives are disabled while setting 2892 * up the descriptor ring 2893 */ 2894 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2895 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2896 2897 /* 2898 * Set the interrupt throttling rate. Value is calculated 2899 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2900 */ 2901 if (sc->int_throttle_ceil) 2902 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2903 else 2904 itr = 0; 2905 emx_set_itr(sc, itr); 2906 2907 /* Use extended RX descriptor */ 2908 rfctl = E1000_RFCTL_EXTEN; 2909 2910 /* Disable accelerated ackknowledge */ 2911 if (sc->hw.mac.type == e1000_82574) 2912 rfctl |= E1000_RFCTL_ACK_DIS; 2913 2914 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2915 2916 /* 2917 * Receive Checksum Offload for TCP and UDP 2918 * 2919 * Checksum offloading is also enabled if multiple receive 2920 * queue is to be supported, since we need it to figure out 2921 * packet type. 2922 */ 2923 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2924 sc->rx_ring_cnt > 1) { 2925 uint32_t rxcsum; 2926 2927 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2928 2929 /* 2930 * NOTE: 2931 * PCSD must be enabled to enable multiple 2932 * receive queues. 2933 */ 2934 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2935 E1000_RXCSUM_PCSD; 2936 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2937 } 2938 2939 /* 2940 * Configure multiple receive queue (RSS) 2941 */ 2942 if (sc->rx_ring_cnt > 1) { 2943 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2944 uint32_t reta; 2945 2946 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 2947 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 2948 2949 /* 2950 * NOTE: 2951 * When we reach here, RSS has already been disabled 2952 * in emx_stop(), so we could safely configure RSS key 2953 * and redirect table. 2954 */ 2955 2956 /* 2957 * Configure RSS key 2958 */ 2959 toeplitz_get_key(key, sizeof(key)); 2960 for (i = 0; i < EMX_NRSSRK; ++i) { 2961 uint32_t rssrk; 2962 2963 rssrk = EMX_RSSRK_VAL(key, i); 2964 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2965 2966 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2967 } 2968 2969 /* 2970 * Configure RSS redirect table in following fashion: 2971 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2972 */ 2973 reta = 0; 2974 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2975 uint32_t q; 2976 2977 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 2978 reta |= q << (8 * i); 2979 } 2980 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2981 2982 for (i = 0; i < EMX_NRETA; ++i) 2983 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2984 2985 /* 2986 * Enable multiple receive queues. 2987 * Enable IPv4 RSS standard hash functions. 2988 * Disable RSS interrupt. 2989 */ 2990 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2991 E1000_MRQC_ENABLE_RSS_2Q | 2992 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2993 E1000_MRQC_RSS_FIELD_IPV4); 2994 } 2995 2996 /* 2997 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2998 * long latencies are observed, like Lenovo X60. This 2999 * change eliminates the problem, but since having positive 3000 * values in RDTR is a known source of problems on other 3001 * platforms another solution is being sought. 3002 */ 3003 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3004 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3005 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3006 } 3007 3008 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3009 struct emx_rxdata *rdata = &sc->rx_data[i]; 3010 3011 /* 3012 * Setup the Base and Length of the Rx Descriptor Ring 3013 */ 3014 bus_addr = rdata->rx_desc_paddr; 3015 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3016 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3017 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3018 (uint32_t)(bus_addr >> 32)); 3019 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3020 (uint32_t)bus_addr); 3021 3022 /* 3023 * Setup the HW Rx Head and Tail Descriptor Pointers 3024 */ 3025 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3026 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3027 sc->rx_data[i].num_rx_desc - 1); 3028 } 3029 3030 if (sc->hw.mac.type >= e1000_pch2lan) { 3031 if (ifp->if_mtu > ETHERMTU) 3032 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3033 else 3034 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3035 } 3036 3037 /* Setup the Receive Control Register */ 3038 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3039 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3040 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3041 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3042 3043 /* Make sure VLAN Filters are off */ 3044 rctl &= ~E1000_RCTL_VFE; 3045 3046 /* Don't store bad paket */ 3047 rctl &= ~E1000_RCTL_SBP; 3048 3049 /* MCLBYTES */ 3050 rctl |= E1000_RCTL_SZ_2048; 3051 3052 if (ifp->if_mtu > ETHERMTU) 3053 rctl |= E1000_RCTL_LPE; 3054 else 3055 rctl &= ~E1000_RCTL_LPE; 3056 3057 /* Enable Receives */ 3058 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3059 } 3060 3061 static void 3062 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3063 { 3064 struct emx_rxbuf *rx_buffer; 3065 int i; 3066 3067 /* Free Receive Descriptor ring */ 3068 if (rdata->rx_desc) { 3069 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3070 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3071 rdata->rx_desc_dmap); 3072 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3073 3074 rdata->rx_desc = NULL; 3075 } 3076 3077 if (rdata->rx_buf == NULL) 3078 return; 3079 3080 for (i = 0; i < ndesc; i++) { 3081 rx_buffer = &rdata->rx_buf[i]; 3082 3083 KKASSERT(rx_buffer->m_head == NULL); 3084 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3085 } 3086 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3087 bus_dma_tag_destroy(rdata->rxtag); 3088 3089 kfree(rdata->rx_buf, M_DEVBUF); 3090 rdata->rx_buf = NULL; 3091 } 3092 3093 static void 3094 emx_rxeof(struct emx_rxdata *rdata, int count) 3095 { 3096 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3097 uint32_t staterr; 3098 emx_rxdesc_t *current_desc; 3099 struct mbuf *mp; 3100 int i, cpuid = mycpuid; 3101 3102 i = rdata->next_rx_desc_to_check; 3103 current_desc = &rdata->rx_desc[i]; 3104 staterr = le32toh(current_desc->rxd_staterr); 3105 3106 if (!(staterr & E1000_RXD_STAT_DD)) 3107 return; 3108 3109 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3110 struct pktinfo *pi = NULL, pi0; 3111 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3112 struct mbuf *m = NULL; 3113 int eop, len; 3114 3115 logif(pkt_receive); 3116 3117 mp = rx_buf->m_head; 3118 3119 /* 3120 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3121 * needs to access the last received byte in the mbuf. 3122 */ 3123 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3124 BUS_DMASYNC_POSTREAD); 3125 3126 len = le16toh(current_desc->rxd_length); 3127 if (staterr & E1000_RXD_STAT_EOP) { 3128 count--; 3129 eop = 1; 3130 } else { 3131 eop = 0; 3132 } 3133 3134 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3135 uint16_t vlan = 0; 3136 uint32_t mrq, rss_hash; 3137 3138 /* 3139 * Save several necessary information, 3140 * before emx_newbuf() destroy it. 3141 */ 3142 if ((staterr & E1000_RXD_STAT_VP) && eop) 3143 vlan = le16toh(current_desc->rxd_vlan); 3144 3145 mrq = le32toh(current_desc->rxd_mrq); 3146 rss_hash = le32toh(current_desc->rxd_rss); 3147 3148 EMX_RSS_DPRINTF(rdata->sc, 10, 3149 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3150 rdata->idx, mrq, rss_hash); 3151 3152 if (emx_newbuf(rdata, i, 0) != 0) { 3153 IFNET_STAT_INC(ifp, iqdrops, 1); 3154 goto discard; 3155 } 3156 3157 /* Assign correct length to the current fragment */ 3158 mp->m_len = len; 3159 3160 if (rdata->fmp == NULL) { 3161 mp->m_pkthdr.len = len; 3162 rdata->fmp = mp; /* Store the first mbuf */ 3163 rdata->lmp = mp; 3164 } else { 3165 /* 3166 * Chain mbuf's together 3167 */ 3168 rdata->lmp->m_next = mp; 3169 rdata->lmp = rdata->lmp->m_next; 3170 rdata->fmp->m_pkthdr.len += len; 3171 } 3172 3173 if (eop) { 3174 rdata->fmp->m_pkthdr.rcvif = ifp; 3175 IFNET_STAT_INC(ifp, ipackets, 1); 3176 3177 if (ifp->if_capenable & IFCAP_RXCSUM) 3178 emx_rxcsum(staterr, rdata->fmp); 3179 3180 if (staterr & E1000_RXD_STAT_VP) { 3181 rdata->fmp->m_pkthdr.ether_vlantag = 3182 vlan; 3183 rdata->fmp->m_flags |= M_VLANTAG; 3184 } 3185 m = rdata->fmp; 3186 rdata->fmp = NULL; 3187 rdata->lmp = NULL; 3188 3189 if (ifp->if_capenable & IFCAP_RSS) { 3190 pi = emx_rssinfo(m, &pi0, mrq, 3191 rss_hash, staterr); 3192 } 3193 #ifdef EMX_RSS_DEBUG 3194 rdata->rx_pkts++; 3195 #endif 3196 } 3197 } else { 3198 IFNET_STAT_INC(ifp, ierrors, 1); 3199 discard: 3200 emx_setup_rxdesc(current_desc, rx_buf); 3201 if (rdata->fmp != NULL) { 3202 m_freem(rdata->fmp); 3203 rdata->fmp = NULL; 3204 rdata->lmp = NULL; 3205 } 3206 m = NULL; 3207 } 3208 3209 if (m != NULL) 3210 ifp->if_input(ifp, m, pi, cpuid); 3211 3212 /* Advance our pointers to the next descriptor. */ 3213 if (++i == rdata->num_rx_desc) 3214 i = 0; 3215 3216 current_desc = &rdata->rx_desc[i]; 3217 staterr = le32toh(current_desc->rxd_staterr); 3218 } 3219 rdata->next_rx_desc_to_check = i; 3220 3221 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3222 if (--i < 0) 3223 i = rdata->num_rx_desc - 1; 3224 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3225 } 3226 3227 static void 3228 emx_enable_intr(struct emx_softc *sc) 3229 { 3230 uint32_t ims_mask = IMS_ENABLE_MASK; 3231 3232 lwkt_serialize_handler_enable(&sc->main_serialize); 3233 3234 #if 0 3235 if (sc->hw.mac.type == e1000_82574) { 3236 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3237 ims_mask |= EM_MSIX_MASK; 3238 } 3239 #endif 3240 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3241 } 3242 3243 static void 3244 emx_disable_intr(struct emx_softc *sc) 3245 { 3246 if (sc->hw.mac.type == e1000_82574) 3247 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3248 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3249 3250 lwkt_serialize_handler_disable(&sc->main_serialize); 3251 } 3252 3253 /* 3254 * Bit of a misnomer, what this really means is 3255 * to enable OS management of the system... aka 3256 * to disable special hardware management features 3257 */ 3258 static void 3259 emx_get_mgmt(struct emx_softc *sc) 3260 { 3261 /* A shared code workaround */ 3262 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3263 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3264 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3265 3266 /* disable hardware interception of ARP */ 3267 manc &= ~(E1000_MANC_ARP_EN); 3268 3269 /* enable receiving management packets to the host */ 3270 manc |= E1000_MANC_EN_MNG2HOST; 3271 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3272 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3273 manc2h |= E1000_MNG2HOST_PORT_623; 3274 manc2h |= E1000_MNG2HOST_PORT_664; 3275 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3276 3277 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3278 } 3279 } 3280 3281 /* 3282 * Give control back to hardware management 3283 * controller if there is one. 3284 */ 3285 static void 3286 emx_rel_mgmt(struct emx_softc *sc) 3287 { 3288 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3289 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3290 3291 /* re-enable hardware interception of ARP */ 3292 manc |= E1000_MANC_ARP_EN; 3293 manc &= ~E1000_MANC_EN_MNG2HOST; 3294 3295 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3296 } 3297 } 3298 3299 /* 3300 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3301 * For ASF and Pass Through versions of f/w this means that 3302 * the driver is loaded. For AMT version (only with 82573) 3303 * of the f/w this means that the network i/f is open. 3304 */ 3305 static void 3306 emx_get_hw_control(struct emx_softc *sc) 3307 { 3308 /* Let firmware know the driver has taken over */ 3309 if (sc->hw.mac.type == e1000_82573) { 3310 uint32_t swsm; 3311 3312 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3313 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3314 swsm | E1000_SWSM_DRV_LOAD); 3315 } else { 3316 uint32_t ctrl_ext; 3317 3318 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3319 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3320 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3321 } 3322 sc->flags |= EMX_FLAG_HW_CTRL; 3323 } 3324 3325 /* 3326 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3327 * For ASF and Pass Through versions of f/w this means that the 3328 * driver is no longer loaded. For AMT version (only with 82573) 3329 * of the f/w this means that the network i/f is closed. 3330 */ 3331 static void 3332 emx_rel_hw_control(struct emx_softc *sc) 3333 { 3334 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3335 return; 3336 sc->flags &= ~EMX_FLAG_HW_CTRL; 3337 3338 /* Let firmware taken over control of h/w */ 3339 if (sc->hw.mac.type == e1000_82573) { 3340 uint32_t swsm; 3341 3342 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3343 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3344 swsm & ~E1000_SWSM_DRV_LOAD); 3345 } else { 3346 uint32_t ctrl_ext; 3347 3348 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3349 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3350 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3351 } 3352 } 3353 3354 static int 3355 emx_is_valid_eaddr(const uint8_t *addr) 3356 { 3357 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3358 3359 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3360 return (FALSE); 3361 3362 return (TRUE); 3363 } 3364 3365 /* 3366 * Enable PCI Wake On Lan capability 3367 */ 3368 void 3369 emx_enable_wol(device_t dev) 3370 { 3371 uint16_t cap, status; 3372 uint8_t id; 3373 3374 /* First find the capabilities pointer*/ 3375 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3376 3377 /* Read the PM Capabilities */ 3378 id = pci_read_config(dev, cap, 1); 3379 if (id != PCIY_PMG) /* Something wrong */ 3380 return; 3381 3382 /* 3383 * OK, we have the power capabilities, 3384 * so now get the status register 3385 */ 3386 cap += PCIR_POWER_STATUS; 3387 status = pci_read_config(dev, cap, 2); 3388 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3389 pci_write_config(dev, cap, status, 2); 3390 } 3391 3392 static void 3393 emx_update_stats(struct emx_softc *sc) 3394 { 3395 struct ifnet *ifp = &sc->arpcom.ac_if; 3396 3397 if (sc->hw.phy.media_type == e1000_media_type_copper || 3398 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3399 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3400 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3401 } 3402 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3403 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3404 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3405 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3406 3407 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3408 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3409 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3410 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3411 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3412 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3413 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3414 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3415 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3416 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3417 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3418 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3419 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3420 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3421 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3422 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3423 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3424 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3425 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3426 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3427 3428 /* For the 64-bit byte counters the low dword must be read first. */ 3429 /* Both registers clear on the read of the high dword */ 3430 3431 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3432 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3433 3434 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3435 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3436 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3437 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3438 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3439 3440 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3441 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3442 3443 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3444 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3445 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3446 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3447 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3448 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3449 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3450 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3451 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3452 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3453 3454 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3455 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3456 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3457 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3458 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3459 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3460 3461 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3462 3463 /* Rx Errors */ 3464 IFNET_STAT_SET(ifp, ierrors, 3465 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3466 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3467 3468 /* Tx Errors */ 3469 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3470 } 3471 3472 static void 3473 emx_print_debug_info(struct emx_softc *sc) 3474 { 3475 device_t dev = sc->dev; 3476 uint8_t *hw_addr = sc->hw.hw_addr; 3477 int i; 3478 3479 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3480 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3481 E1000_READ_REG(&sc->hw, E1000_CTRL), 3482 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3483 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3484 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3485 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3486 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3487 sc->hw.fc.high_water, sc->hw.fc.low_water); 3488 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3489 E1000_READ_REG(&sc->hw, E1000_TIDV), 3490 E1000_READ_REG(&sc->hw, E1000_TADV)); 3491 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3492 E1000_READ_REG(&sc->hw, E1000_RDTR), 3493 E1000_READ_REG(&sc->hw, E1000_RADV)); 3494 3495 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3496 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3497 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3498 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3499 } 3500 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3501 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3502 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3503 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3504 } 3505 3506 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3507 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3508 sc->tx_data[i].num_tx_desc_avail); 3509 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3510 sc->tx_data[i].tso_segments); 3511 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3512 sc->tx_data[i].tso_ctx_reused); 3513 } 3514 } 3515 3516 static void 3517 emx_print_hw_stats(struct emx_softc *sc) 3518 { 3519 device_t dev = sc->dev; 3520 3521 device_printf(dev, "Excessive collisions = %lld\n", 3522 (long long)sc->stats.ecol); 3523 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3524 device_printf(dev, "Symbol errors = %lld\n", 3525 (long long)sc->stats.symerrs); 3526 #endif 3527 device_printf(dev, "Sequence errors = %lld\n", 3528 (long long)sc->stats.sec); 3529 device_printf(dev, "Defer count = %lld\n", 3530 (long long)sc->stats.dc); 3531 device_printf(dev, "Missed Packets = %lld\n", 3532 (long long)sc->stats.mpc); 3533 device_printf(dev, "Receive No Buffers = %lld\n", 3534 (long long)sc->stats.rnbc); 3535 /* RLEC is inaccurate on some hardware, calculate our own. */ 3536 device_printf(dev, "Receive Length Errors = %lld\n", 3537 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3538 device_printf(dev, "Receive errors = %lld\n", 3539 (long long)sc->stats.rxerrc); 3540 device_printf(dev, "Crc errors = %lld\n", 3541 (long long)sc->stats.crcerrs); 3542 device_printf(dev, "Alignment errors = %lld\n", 3543 (long long)sc->stats.algnerrc); 3544 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3545 (long long)sc->stats.cexterr); 3546 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3547 device_printf(dev, "XON Rcvd = %lld\n", 3548 (long long)sc->stats.xonrxc); 3549 device_printf(dev, "XON Xmtd = %lld\n", 3550 (long long)sc->stats.xontxc); 3551 device_printf(dev, "XOFF Rcvd = %lld\n", 3552 (long long)sc->stats.xoffrxc); 3553 device_printf(dev, "XOFF Xmtd = %lld\n", 3554 (long long)sc->stats.xofftxc); 3555 device_printf(dev, "Good Packets Rcvd = %lld\n", 3556 (long long)sc->stats.gprc); 3557 device_printf(dev, "Good Packets Xmtd = %lld\n", 3558 (long long)sc->stats.gptc); 3559 } 3560 3561 static void 3562 emx_print_nvm_info(struct emx_softc *sc) 3563 { 3564 uint16_t eeprom_data; 3565 int i, j, row = 0; 3566 3567 /* Its a bit crude, but it gets the job done */ 3568 kprintf("\nInterface EEPROM Dump:\n"); 3569 kprintf("Offset\n0x0000 "); 3570 for (i = 0, j = 0; i < 32; i++, j++) { 3571 if (j == 8) { /* Make the offset block */ 3572 j = 0; ++row; 3573 kprintf("\n0x00%x0 ",row); 3574 } 3575 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3576 kprintf("%04x ", eeprom_data); 3577 } 3578 kprintf("\n"); 3579 } 3580 3581 static int 3582 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3583 { 3584 struct emx_softc *sc; 3585 struct ifnet *ifp; 3586 int error, result; 3587 3588 result = -1; 3589 error = sysctl_handle_int(oidp, &result, 0, req); 3590 if (error || !req->newptr) 3591 return (error); 3592 3593 sc = (struct emx_softc *)arg1; 3594 ifp = &sc->arpcom.ac_if; 3595 3596 ifnet_serialize_all(ifp); 3597 3598 if (result == 1) 3599 emx_print_debug_info(sc); 3600 3601 /* 3602 * This value will cause a hex dump of the 3603 * first 32 16-bit words of the EEPROM to 3604 * the screen. 3605 */ 3606 if (result == 2) 3607 emx_print_nvm_info(sc); 3608 3609 ifnet_deserialize_all(ifp); 3610 3611 return (error); 3612 } 3613 3614 static int 3615 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3616 { 3617 int error, result; 3618 3619 result = -1; 3620 error = sysctl_handle_int(oidp, &result, 0, req); 3621 if (error || !req->newptr) 3622 return (error); 3623 3624 if (result == 1) { 3625 struct emx_softc *sc = (struct emx_softc *)arg1; 3626 struct ifnet *ifp = &sc->arpcom.ac_if; 3627 3628 ifnet_serialize_all(ifp); 3629 emx_print_hw_stats(sc); 3630 ifnet_deserialize_all(ifp); 3631 } 3632 return (error); 3633 } 3634 3635 static void 3636 emx_add_sysctl(struct emx_softc *sc) 3637 { 3638 struct sysctl_ctx_list *ctx; 3639 struct sysctl_oid *tree; 3640 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3641 char pkt_desc[32]; 3642 int i; 3643 #endif 3644 3645 ctx = device_get_sysctl_ctx(sc->dev); 3646 tree = device_get_sysctl_tree(sc->dev); 3647 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3648 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3649 emx_sysctl_debug_info, "I", "Debug Information"); 3650 3651 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3652 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3653 emx_sysctl_stats, "I", "Statistics"); 3654 3655 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3656 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3657 "# of RX descs"); 3658 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3659 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3660 "# of TX descs"); 3661 3662 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3663 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3664 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3665 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3666 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3667 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3668 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3669 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3670 emx_sysctl_tx_wreg_nsegs, "I", 3671 "# segments sent before write to hardware register"); 3672 3673 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3674 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3675 "# of RX rings"); 3676 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3677 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3678 "# of TX rings"); 3679 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3680 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3681 "# of TX rings used"); 3682 3683 #ifdef IFPOLL_ENABLE 3684 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3685 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3686 sc, 0, emx_sysctl_npoll_rxoff, "I", 3687 "NPOLLING RX cpu offset"); 3688 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3689 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3690 sc, 0, emx_sysctl_npoll_txoff, "I", 3691 "NPOLLING TX cpu offset"); 3692 #endif 3693 3694 #ifdef EMX_RSS_DEBUG 3695 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3696 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3697 0, "RSS debug level"); 3698 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3699 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3700 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3701 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3702 "RXed packets"); 3703 } 3704 #endif 3705 #ifdef EMX_TSS_DEBUG 3706 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3707 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3708 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3709 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3710 "TXed packets"); 3711 } 3712 #endif 3713 } 3714 3715 static int 3716 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3717 { 3718 struct emx_softc *sc = (void *)arg1; 3719 struct ifnet *ifp = &sc->arpcom.ac_if; 3720 int error, throttle; 3721 3722 throttle = sc->int_throttle_ceil; 3723 error = sysctl_handle_int(oidp, &throttle, 0, req); 3724 if (error || req->newptr == NULL) 3725 return error; 3726 if (throttle < 0 || throttle > 1000000000 / 256) 3727 return EINVAL; 3728 3729 if (throttle) { 3730 /* 3731 * Set the interrupt throttling rate in 256ns increments, 3732 * recalculate sysctl value assignment to get exact frequency. 3733 */ 3734 throttle = 1000000000 / 256 / throttle; 3735 3736 /* Upper 16bits of ITR is reserved and should be zero */ 3737 if (throttle & 0xffff0000) 3738 return EINVAL; 3739 } 3740 3741 ifnet_serialize_all(ifp); 3742 3743 if (throttle) 3744 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3745 else 3746 sc->int_throttle_ceil = 0; 3747 3748 if (ifp->if_flags & IFF_RUNNING) 3749 emx_set_itr(sc, throttle); 3750 3751 ifnet_deserialize_all(ifp); 3752 3753 if (bootverbose) { 3754 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3755 sc->int_throttle_ceil); 3756 } 3757 return 0; 3758 } 3759 3760 static int 3761 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3762 { 3763 struct emx_softc *sc = (void *)arg1; 3764 struct ifnet *ifp = &sc->arpcom.ac_if; 3765 struct emx_txdata *tdata = &sc->tx_data[0]; 3766 int error, segs; 3767 3768 segs = tdata->tx_intr_nsegs; 3769 error = sysctl_handle_int(oidp, &segs, 0, req); 3770 if (error || req->newptr == NULL) 3771 return error; 3772 if (segs <= 0) 3773 return EINVAL; 3774 3775 ifnet_serialize_all(ifp); 3776 3777 /* 3778 * Don't allow tx_intr_nsegs to become: 3779 * o Less the oact_tx_desc 3780 * o Too large that no TX desc will cause TX interrupt to 3781 * be generated (OACTIVE will never recover) 3782 * o Too small that will cause tx_dd[] overflow 3783 */ 3784 if (segs < tdata->oact_tx_desc || 3785 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3786 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3787 error = EINVAL; 3788 } else { 3789 int i; 3790 3791 error = 0; 3792 for (i = 0; i < sc->tx_ring_cnt; ++i) 3793 sc->tx_data[i].tx_intr_nsegs = segs; 3794 } 3795 3796 ifnet_deserialize_all(ifp); 3797 3798 return error; 3799 } 3800 3801 static int 3802 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3803 { 3804 struct emx_softc *sc = (void *)arg1; 3805 struct ifnet *ifp = &sc->arpcom.ac_if; 3806 int error, nsegs, i; 3807 3808 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3809 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3810 if (error || req->newptr == NULL) 3811 return error; 3812 3813 ifnet_serialize_all(ifp); 3814 for (i = 0; i < sc->tx_ring_cnt; ++i) 3815 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3816 ifnet_deserialize_all(ifp); 3817 3818 return 0; 3819 } 3820 3821 #ifdef IFPOLL_ENABLE 3822 3823 static int 3824 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3825 { 3826 struct emx_softc *sc = (void *)arg1; 3827 struct ifnet *ifp = &sc->arpcom.ac_if; 3828 int error, off; 3829 3830 off = sc->rx_npoll_off; 3831 error = sysctl_handle_int(oidp, &off, 0, req); 3832 if (error || req->newptr == NULL) 3833 return error; 3834 if (off < 0) 3835 return EINVAL; 3836 3837 ifnet_serialize_all(ifp); 3838 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3839 error = EINVAL; 3840 } else { 3841 error = 0; 3842 sc->rx_npoll_off = off; 3843 } 3844 ifnet_deserialize_all(ifp); 3845 3846 return error; 3847 } 3848 3849 static int 3850 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3851 { 3852 struct emx_softc *sc = (void *)arg1; 3853 struct ifnet *ifp = &sc->arpcom.ac_if; 3854 int error, off; 3855 3856 off = sc->tx_npoll_off; 3857 error = sysctl_handle_int(oidp, &off, 0, req); 3858 if (error || req->newptr == NULL) 3859 return error; 3860 if (off < 0) 3861 return EINVAL; 3862 3863 ifnet_serialize_all(ifp); 3864 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3865 error = EINVAL; 3866 } else { 3867 error = 0; 3868 sc->tx_npoll_off = off; 3869 } 3870 ifnet_deserialize_all(ifp); 3871 3872 return error; 3873 } 3874 3875 #endif /* IFPOLL_ENABLE */ 3876 3877 static int 3878 emx_dma_alloc(struct emx_softc *sc) 3879 { 3880 int error, i; 3881 3882 /* 3883 * Create top level busdma tag 3884 */ 3885 error = bus_dma_tag_create(NULL, 1, 0, 3886 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3887 NULL, NULL, 3888 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3889 0, &sc->parent_dtag); 3890 if (error) { 3891 device_printf(sc->dev, "could not create top level DMA tag\n"); 3892 return error; 3893 } 3894 3895 /* 3896 * Allocate transmit descriptors ring and buffers 3897 */ 3898 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3899 error = emx_create_tx_ring(&sc->tx_data[i]); 3900 if (error) { 3901 device_printf(sc->dev, 3902 "Could not setup transmit structures\n"); 3903 return error; 3904 } 3905 } 3906 3907 /* 3908 * Allocate receive descriptors ring and buffers 3909 */ 3910 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3911 error = emx_create_rx_ring(&sc->rx_data[i]); 3912 if (error) { 3913 device_printf(sc->dev, 3914 "Could not setup receive structures\n"); 3915 return error; 3916 } 3917 } 3918 return 0; 3919 } 3920 3921 static void 3922 emx_dma_free(struct emx_softc *sc) 3923 { 3924 int i; 3925 3926 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3927 emx_destroy_tx_ring(&sc->tx_data[i], 3928 sc->tx_data[i].num_tx_desc); 3929 } 3930 3931 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3932 emx_destroy_rx_ring(&sc->rx_data[i], 3933 sc->rx_data[i].num_rx_desc); 3934 } 3935 3936 /* Free top level busdma tag */ 3937 if (sc->parent_dtag != NULL) 3938 bus_dma_tag_destroy(sc->parent_dtag); 3939 } 3940 3941 static void 3942 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3943 { 3944 struct emx_softc *sc = ifp->if_softc; 3945 3946 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 3947 } 3948 3949 static void 3950 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3951 { 3952 struct emx_softc *sc = ifp->if_softc; 3953 3954 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 3955 } 3956 3957 static int 3958 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3959 { 3960 struct emx_softc *sc = ifp->if_softc; 3961 3962 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 3963 } 3964 3965 static void 3966 emx_serialize_skipmain(struct emx_softc *sc) 3967 { 3968 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3969 } 3970 3971 static void 3972 emx_deserialize_skipmain(struct emx_softc *sc) 3973 { 3974 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3975 } 3976 3977 #ifdef INVARIANTS 3978 3979 static void 3980 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3981 boolean_t serialized) 3982 { 3983 struct emx_softc *sc = ifp->if_softc; 3984 3985 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 3986 slz, serialized); 3987 } 3988 3989 #endif /* INVARIANTS */ 3990 3991 #ifdef IFPOLL_ENABLE 3992 3993 static void 3994 emx_npoll_status(struct ifnet *ifp) 3995 { 3996 struct emx_softc *sc = ifp->if_softc; 3997 uint32_t reg_icr; 3998 3999 ASSERT_SERIALIZED(&sc->main_serialize); 4000 4001 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4002 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4003 callout_stop(&sc->timer); 4004 sc->hw.mac.get_link_status = 1; 4005 emx_update_link_status(sc); 4006 callout_reset(&sc->timer, hz, emx_timer, sc); 4007 } 4008 } 4009 4010 static void 4011 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4012 { 4013 struct emx_txdata *tdata = arg; 4014 4015 ASSERT_SERIALIZED(&tdata->tx_serialize); 4016 4017 emx_txeof(tdata); 4018 if (!ifsq_is_empty(tdata->ifsq)) 4019 ifsq_devstart(tdata->ifsq); 4020 } 4021 4022 static void 4023 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4024 { 4025 struct emx_rxdata *rdata = arg; 4026 4027 ASSERT_SERIALIZED(&rdata->rx_serialize); 4028 4029 emx_rxeof(rdata, cycle); 4030 } 4031 4032 static void 4033 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4034 { 4035 struct emx_softc *sc = ifp->if_softc; 4036 int i, txr_cnt; 4037 4038 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4039 4040 if (info) { 4041 int off; 4042 4043 info->ifpi_status.status_func = emx_npoll_status; 4044 info->ifpi_status.serializer = &sc->main_serialize; 4045 4046 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4047 off = sc->tx_npoll_off; 4048 for (i = 0; i < txr_cnt; ++i) { 4049 struct emx_txdata *tdata = &sc->tx_data[i]; 4050 int idx = i + off; 4051 4052 KKASSERT(idx < ncpus2); 4053 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4054 info->ifpi_tx[idx].arg = tdata; 4055 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4056 ifsq_set_cpuid(tdata->ifsq, idx); 4057 } 4058 4059 off = sc->rx_npoll_off; 4060 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4061 struct emx_rxdata *rdata = &sc->rx_data[i]; 4062 int idx = i + off; 4063 4064 KKASSERT(idx < ncpus2); 4065 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4066 info->ifpi_rx[idx].arg = rdata; 4067 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4068 } 4069 4070 if (ifp->if_flags & IFF_RUNNING) { 4071 if (txr_cnt == sc->tx_ring_inuse) 4072 emx_disable_intr(sc); 4073 else 4074 emx_init(sc); 4075 } 4076 } else { 4077 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4078 struct emx_txdata *tdata = &sc->tx_data[i]; 4079 4080 ifsq_set_cpuid(tdata->ifsq, 4081 rman_get_cpuid(sc->intr_res)); 4082 } 4083 4084 if (ifp->if_flags & IFF_RUNNING) { 4085 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4086 if (txr_cnt == sc->tx_ring_inuse) 4087 emx_enable_intr(sc); 4088 else 4089 emx_init(sc); 4090 } 4091 } 4092 } 4093 4094 #endif /* IFPOLL_ENABLE */ 4095 4096 static void 4097 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4098 { 4099 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4100 if (sc->hw.mac.type == e1000_82574) { 4101 int i; 4102 4103 /* 4104 * When using MSIX interrupts we need to 4105 * throttle using the EITR register 4106 */ 4107 for (i = 0; i < 4; ++i) 4108 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4109 } 4110 } 4111 4112 /* 4113 * Disable the L0s, 82574L Errata #20 4114 */ 4115 static void 4116 emx_disable_aspm(struct emx_softc *sc) 4117 { 4118 uint16_t link_cap, link_ctrl, disable; 4119 uint8_t pcie_ptr, reg; 4120 device_t dev = sc->dev; 4121 4122 switch (sc->hw.mac.type) { 4123 case e1000_82571: 4124 case e1000_82572: 4125 case e1000_82573: 4126 /* 4127 * 82573 specification update 4128 * errata #8 disable L0s 4129 * errata #41 disable L1 4130 * 4131 * 82571/82572 specification update 4132 # errata #13 disable L1 4133 * errata #68 disable L0s 4134 */ 4135 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4136 break; 4137 4138 case e1000_82574: 4139 /* 4140 * 82574 specification update errata #20 4141 * 4142 * There is no need to disable L1 4143 */ 4144 disable = PCIEM_LNKCTL_ASPM_L0S; 4145 break; 4146 4147 default: 4148 return; 4149 } 4150 4151 pcie_ptr = pci_get_pciecap_ptr(dev); 4152 if (pcie_ptr == 0) 4153 return; 4154 4155 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4156 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4157 return; 4158 4159 if (bootverbose) 4160 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4161 4162 reg = pcie_ptr + PCIER_LINKCTRL; 4163 link_ctrl = pci_read_config(dev, reg, 2); 4164 link_ctrl &= ~disable; 4165 pci_write_config(dev, reg, link_ctrl, 2); 4166 } 4167 4168 static int 4169 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4170 { 4171 int iphlen, hoff, thoff, ex = 0; 4172 struct mbuf *m; 4173 struct ip *ip; 4174 4175 m = *mp; 4176 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4177 4178 iphlen = m->m_pkthdr.csum_iphlen; 4179 thoff = m->m_pkthdr.csum_thlen; 4180 hoff = m->m_pkthdr.csum_lhlen; 4181 4182 KASSERT(iphlen > 0, ("invalid ip hlen")); 4183 KASSERT(thoff > 0, ("invalid tcp hlen")); 4184 KASSERT(hoff > 0, ("invalid ether hlen")); 4185 4186 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4187 ex = 4; 4188 4189 if (m->m_len < hoff + iphlen + thoff + ex) { 4190 m = m_pullup(m, hoff + iphlen + thoff + ex); 4191 if (m == NULL) { 4192 *mp = NULL; 4193 return ENOBUFS; 4194 } 4195 *mp = m; 4196 } 4197 ip = mtodoff(m, struct ip *, hoff); 4198 ip->ip_len = 0; 4199 4200 return 0; 4201 } 4202 4203 static int 4204 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4205 uint32_t *txd_upper, uint32_t *txd_lower) 4206 { 4207 struct e1000_context_desc *TXD; 4208 int hoff, iphlen, thoff, hlen; 4209 int mss, pktlen, curr_txd; 4210 4211 #ifdef EMX_TSO_DEBUG 4212 tdata->tso_segments++; 4213 #endif 4214 4215 iphlen = mp->m_pkthdr.csum_iphlen; 4216 thoff = mp->m_pkthdr.csum_thlen; 4217 hoff = mp->m_pkthdr.csum_lhlen; 4218 mss = mp->m_pkthdr.tso_segsz; 4219 pktlen = mp->m_pkthdr.len; 4220 4221 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4222 tdata->csum_flags == CSUM_TSO && 4223 tdata->csum_iphlen == iphlen && 4224 tdata->csum_lhlen == hoff && 4225 tdata->csum_thlen == thoff && 4226 tdata->csum_mss == mss && 4227 tdata->csum_pktlen == pktlen) { 4228 *txd_upper = tdata->csum_txd_upper; 4229 *txd_lower = tdata->csum_txd_lower; 4230 #ifdef EMX_TSO_DEBUG 4231 tdata->tso_ctx_reused++; 4232 #endif 4233 return 0; 4234 } 4235 hlen = hoff + iphlen + thoff; 4236 4237 /* 4238 * Setup a new TSO context. 4239 */ 4240 4241 curr_txd = tdata->next_avail_tx_desc; 4242 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4243 4244 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4245 E1000_TXD_DTYP_D | /* Data descr type */ 4246 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4247 4248 /* IP and/or TCP header checksum calculation and insertion. */ 4249 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4250 4251 /* 4252 * Start offset for header checksum calculation. 4253 * End offset for header checksum calculation. 4254 * Offset of place put the checksum. 4255 */ 4256 TXD->lower_setup.ip_fields.ipcss = hoff; 4257 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4258 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4259 4260 /* 4261 * Start offset for payload checksum calculation. 4262 * End offset for payload checksum calculation. 4263 * Offset of place to put the checksum. 4264 */ 4265 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4266 TXD->upper_setup.tcp_fields.tucse = 0; 4267 TXD->upper_setup.tcp_fields.tucso = 4268 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4269 4270 /* 4271 * Payload size per packet w/o any headers. 4272 * Length of all headers up to payload. 4273 */ 4274 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4275 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4276 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4277 E1000_TXD_CMD_DEXT | /* Extended descr */ 4278 E1000_TXD_CMD_TSE | /* TSE context */ 4279 E1000_TXD_CMD_IP | /* Do IP csum */ 4280 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4281 (pktlen - hlen)); /* Total len */ 4282 4283 /* Save the information for this TSO context */ 4284 tdata->csum_flags = CSUM_TSO; 4285 tdata->csum_lhlen = hoff; 4286 tdata->csum_iphlen = iphlen; 4287 tdata->csum_thlen = thoff; 4288 tdata->csum_mss = mss; 4289 tdata->csum_pktlen = pktlen; 4290 tdata->csum_txd_upper = *txd_upper; 4291 tdata->csum_txd_lower = *txd_lower; 4292 4293 if (++curr_txd == tdata->num_tx_desc) 4294 curr_txd = 0; 4295 4296 KKASSERT(tdata->num_tx_desc_avail > 0); 4297 tdata->num_tx_desc_avail--; 4298 4299 tdata->next_avail_tx_desc = curr_txd; 4300 return 1; 4301 } 4302 4303 static int 4304 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4305 { 4306 if (polling) 4307 return sc->tx_ring_cnt; 4308 else 4309 return 1; 4310 } 4311