1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/emx/if_emx.h> 112 113 #define DEBUG_HW 0 114 115 #ifdef EMX_RSS_DEBUG 116 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 117 do { \ 118 if (sc->rss_debug >= lvl) \ 119 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 120 } while (0) 121 #else /* !EMX_RSS_DEBUG */ 122 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 123 #endif /* EMX_RSS_DEBUG */ 124 125 #define EMX_NAME "Intel(R) PRO/1000 " 126 127 #define EMX_DEVICE(id) \ 128 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 129 #define EMX_DEVICE_NULL { 0, 0, NULL } 130 131 static const struct emx_device { 132 uint16_t vid; 133 uint16_t did; 134 const char *desc; 135 } emx_devices[] = { 136 EMX_DEVICE(82571EB_COPPER), 137 EMX_DEVICE(82571EB_FIBER), 138 EMX_DEVICE(82571EB_SERDES), 139 EMX_DEVICE(82571EB_SERDES_DUAL), 140 EMX_DEVICE(82571EB_SERDES_QUAD), 141 EMX_DEVICE(82571EB_QUAD_COPPER), 142 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 143 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 144 EMX_DEVICE(82571EB_QUAD_FIBER), 145 EMX_DEVICE(82571PT_QUAD_COPPER), 146 147 EMX_DEVICE(82572EI_COPPER), 148 EMX_DEVICE(82572EI_FIBER), 149 EMX_DEVICE(82572EI_SERDES), 150 EMX_DEVICE(82572EI), 151 152 EMX_DEVICE(82573E), 153 EMX_DEVICE(82573E_IAMT), 154 EMX_DEVICE(82573L), 155 156 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 157 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 158 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 160 161 EMX_DEVICE(82574L), 162 EMX_DEVICE(82574LA), 163 164 EMX_DEVICE(PCH_LPT_I217_LM), 165 EMX_DEVICE(PCH_LPT_I217_V), 166 EMX_DEVICE(PCH_LPTLP_I218_LM), 167 EMX_DEVICE(PCH_LPTLP_I218_V), 168 EMX_DEVICE(PCH_I218_LM2), 169 EMX_DEVICE(PCH_I218_V2), 170 EMX_DEVICE(PCH_I218_LM3), 171 EMX_DEVICE(PCH_I218_V3), 172 173 /* required last entry */ 174 EMX_DEVICE_NULL 175 }; 176 177 static int emx_probe(device_t); 178 static int emx_attach(device_t); 179 static int emx_detach(device_t); 180 static int emx_shutdown(device_t); 181 static int emx_suspend(device_t); 182 static int emx_resume(device_t); 183 184 static void emx_init(void *); 185 static void emx_stop(struct emx_softc *); 186 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 187 static void emx_start(struct ifnet *, struct ifaltq_subque *); 188 #ifdef IFPOLL_ENABLE 189 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 190 static void emx_npoll_status(struct ifnet *); 191 static void emx_npoll_tx(struct ifnet *, void *, int); 192 static void emx_npoll_rx(struct ifnet *, void *, int); 193 #endif 194 static void emx_watchdog(struct ifaltq_subque *); 195 static void emx_media_status(struct ifnet *, struct ifmediareq *); 196 static int emx_media_change(struct ifnet *); 197 static void emx_timer(void *); 198 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 199 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 200 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 201 #ifdef INVARIANTS 202 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 203 boolean_t); 204 #endif 205 206 static void emx_intr(void *); 207 static void emx_intr_mask(void *); 208 static void emx_intr_body(struct emx_softc *, boolean_t); 209 static void emx_rxeof(struct emx_rxdata *, int); 210 static void emx_txeof(struct emx_txdata *); 211 static void emx_tx_collect(struct emx_txdata *); 212 static void emx_tx_purge(struct emx_softc *); 213 static void emx_enable_intr(struct emx_softc *); 214 static void emx_disable_intr(struct emx_softc *); 215 216 static int emx_dma_alloc(struct emx_softc *); 217 static void emx_dma_free(struct emx_softc *); 218 static void emx_init_tx_ring(struct emx_txdata *); 219 static int emx_init_rx_ring(struct emx_rxdata *); 220 static void emx_free_tx_ring(struct emx_txdata *); 221 static void emx_free_rx_ring(struct emx_rxdata *); 222 static int emx_create_tx_ring(struct emx_txdata *); 223 static int emx_create_rx_ring(struct emx_rxdata *); 224 static void emx_destroy_tx_ring(struct emx_txdata *, int); 225 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 226 static int emx_newbuf(struct emx_rxdata *, int, int); 227 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 228 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 229 uint32_t *, uint32_t *); 230 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 231 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 232 uint32_t *, uint32_t *); 233 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 234 235 static int emx_is_valid_eaddr(const uint8_t *); 236 static int emx_reset(struct emx_softc *); 237 static void emx_setup_ifp(struct emx_softc *); 238 static void emx_init_tx_unit(struct emx_softc *); 239 static void emx_init_rx_unit(struct emx_softc *); 240 static void emx_update_stats(struct emx_softc *); 241 static void emx_set_promisc(struct emx_softc *); 242 static void emx_disable_promisc(struct emx_softc *); 243 static void emx_set_multi(struct emx_softc *); 244 static void emx_update_link_status(struct emx_softc *); 245 static void emx_smartspeed(struct emx_softc *); 246 static void emx_set_itr(struct emx_softc *, uint32_t); 247 static void emx_disable_aspm(struct emx_softc *); 248 249 static void emx_print_debug_info(struct emx_softc *); 250 static void emx_print_nvm_info(struct emx_softc *); 251 static void emx_print_hw_stats(struct emx_softc *); 252 253 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 254 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 255 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 256 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 257 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 258 #ifdef IFPOLL_ENABLE 259 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 261 #endif 262 static void emx_add_sysctl(struct emx_softc *); 263 264 static void emx_serialize_skipmain(struct emx_softc *); 265 static void emx_deserialize_skipmain(struct emx_softc *); 266 267 /* Management and WOL Support */ 268 static void emx_get_mgmt(struct emx_softc *); 269 static void emx_rel_mgmt(struct emx_softc *); 270 static void emx_get_hw_control(struct emx_softc *); 271 static void emx_rel_hw_control(struct emx_softc *); 272 static void emx_enable_wol(device_t); 273 274 static device_method_t emx_methods[] = { 275 /* Device interface */ 276 DEVMETHOD(device_probe, emx_probe), 277 DEVMETHOD(device_attach, emx_attach), 278 DEVMETHOD(device_detach, emx_detach), 279 DEVMETHOD(device_shutdown, emx_shutdown), 280 DEVMETHOD(device_suspend, emx_suspend), 281 DEVMETHOD(device_resume, emx_resume), 282 DEVMETHOD_END 283 }; 284 285 static driver_t emx_driver = { 286 "emx", 287 emx_methods, 288 sizeof(struct emx_softc), 289 }; 290 291 static devclass_t emx_devclass; 292 293 DECLARE_DUMMY_MODULE(if_emx); 294 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 295 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 296 297 /* 298 * Tunables 299 */ 300 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 301 static int emx_rxd = EMX_DEFAULT_RXD; 302 static int emx_txd = EMX_DEFAULT_TXD; 303 static int emx_smart_pwr_down = 0; 304 static int emx_rxr = 0; 305 static int emx_txr = 1; 306 307 /* Controls whether promiscuous also shows bad packets */ 308 static int emx_debug_sbp = 0; 309 310 static int emx_82573_workaround = 1; 311 static int emx_msi_enable = 1; 312 313 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 314 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 315 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 316 TUNABLE_INT("hw.emx.txd", &emx_txd); 317 TUNABLE_INT("hw.emx.txr", &emx_txr); 318 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 319 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 320 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 321 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 322 323 /* Global used in WOL setup with multiport cards */ 324 static int emx_global_quad_port_a = 0; 325 326 /* Set this to one to display debug statistics */ 327 static int emx_display_debug_stats = 0; 328 329 #if !defined(KTR_IF_EMX) 330 #define KTR_IF_EMX KTR_ALL 331 #endif 332 KTR_INFO_MASTER(if_emx); 333 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 334 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 335 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 336 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 337 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 338 #define logif(name) KTR_LOG(if_emx_ ## name) 339 340 static __inline void 341 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 342 { 343 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 344 /* DD bit must be cleared */ 345 rxd->rxd_staterr = 0; 346 } 347 348 static __inline void 349 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 350 { 351 /* Ignore Checksum bit is set */ 352 if (staterr & E1000_RXD_STAT_IXSM) 353 return; 354 355 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 356 E1000_RXD_STAT_IPCS) 357 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 358 359 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 360 E1000_RXD_STAT_TCPCS) { 361 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 362 CSUM_PSEUDO_HDR | 363 CSUM_FRAG_NOT_CHECKED; 364 mp->m_pkthdr.csum_data = htons(0xffff); 365 } 366 } 367 368 static __inline struct pktinfo * 369 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 370 uint32_t mrq, uint32_t hash, uint32_t staterr) 371 { 372 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 373 case EMX_RXDMRQ_IPV4_TCP: 374 pi->pi_netisr = NETISR_IP; 375 pi->pi_flags = 0; 376 pi->pi_l3proto = IPPROTO_TCP; 377 break; 378 379 case EMX_RXDMRQ_IPV6_TCP: 380 pi->pi_netisr = NETISR_IPV6; 381 pi->pi_flags = 0; 382 pi->pi_l3proto = IPPROTO_TCP; 383 break; 384 385 case EMX_RXDMRQ_IPV4: 386 if (staterr & E1000_RXD_STAT_IXSM) 387 return NULL; 388 389 if ((staterr & 390 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 391 E1000_RXD_STAT_TCPCS) { 392 pi->pi_netisr = NETISR_IP; 393 pi->pi_flags = 0; 394 pi->pi_l3proto = IPPROTO_UDP; 395 break; 396 } 397 /* FALL THROUGH */ 398 default: 399 return NULL; 400 } 401 402 m->m_flags |= M_HASH; 403 m->m_pkthdr.hash = toeplitz_hash(hash); 404 return pi; 405 } 406 407 static int 408 emx_probe(device_t dev) 409 { 410 const struct emx_device *d; 411 uint16_t vid, did; 412 413 vid = pci_get_vendor(dev); 414 did = pci_get_device(dev); 415 416 for (d = emx_devices; d->desc != NULL; ++d) { 417 if (vid == d->vid && did == d->did) { 418 device_set_desc(dev, d->desc); 419 device_set_async_attach(dev, TRUE); 420 return 0; 421 } 422 } 423 return ENXIO; 424 } 425 426 static int 427 emx_attach(device_t dev) 428 { 429 struct emx_softc *sc = device_get_softc(dev); 430 int error = 0, i, throttle, msi_enable, tx_ring_max; 431 u_int intr_flags; 432 uint16_t eeprom_data, device_id, apme_mask; 433 driver_intr_t *intr_func; 434 #ifdef IFPOLL_ENABLE 435 int offset, offset_def; 436 #endif 437 438 /* 439 * Setup RX rings 440 */ 441 for (i = 0; i < EMX_NRX_RING; ++i) { 442 sc->rx_data[i].sc = sc; 443 sc->rx_data[i].idx = i; 444 } 445 446 /* 447 * Setup TX ring 448 */ 449 for (i = 0; i < EMX_NTX_RING; ++i) { 450 sc->tx_data[i].sc = sc; 451 sc->tx_data[i].idx = i; 452 } 453 454 /* 455 * Initialize serializers 456 */ 457 lwkt_serialize_init(&sc->main_serialize); 458 for (i = 0; i < EMX_NTX_RING; ++i) 459 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 460 for (i = 0; i < EMX_NRX_RING; ++i) 461 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 462 463 /* 464 * Initialize serializer array 465 */ 466 i = 0; 467 468 KKASSERT(i < EMX_NSERIALIZE); 469 sc->serializes[i++] = &sc->main_serialize; 470 471 KKASSERT(i < EMX_NSERIALIZE); 472 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 473 KKASSERT(i < EMX_NSERIALIZE); 474 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 475 476 KKASSERT(i < EMX_NSERIALIZE); 477 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 478 KKASSERT(i < EMX_NSERIALIZE); 479 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 480 481 KKASSERT(i == EMX_NSERIALIZE); 482 483 ifmedia_init(&sc->media, IFM_IMASK, emx_media_change, emx_media_status); 484 callout_init_mp(&sc->timer); 485 486 sc->dev = sc->osdep.dev = dev; 487 488 /* 489 * Determine hardware and mac type 490 */ 491 sc->hw.vendor_id = pci_get_vendor(dev); 492 sc->hw.device_id = pci_get_device(dev); 493 sc->hw.revision_id = pci_get_revid(dev); 494 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 495 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 496 497 if (e1000_set_mac_type(&sc->hw)) 498 return ENXIO; 499 500 /* Enable bus mastering */ 501 pci_enable_busmaster(dev); 502 503 /* 504 * Allocate IO memory 505 */ 506 sc->memory_rid = EMX_BAR_MEM; 507 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 508 &sc->memory_rid, RF_ACTIVE); 509 if (sc->memory == NULL) { 510 device_printf(dev, "Unable to allocate bus resource: memory\n"); 511 error = ENXIO; 512 goto fail; 513 } 514 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 515 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 516 517 /* XXX This is quite goofy, it is not actually used */ 518 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 519 520 /* 521 * Don't enable MSI-X on 82574, see: 522 * 82574 specification update errata #15 523 * 524 * Don't enable MSI on 82571/82572, see: 525 * 82571/82572 specification update errata #63 526 */ 527 msi_enable = emx_msi_enable; 528 if (msi_enable && 529 (sc->hw.mac.type == e1000_82571 || 530 sc->hw.mac.type == e1000_82572)) 531 msi_enable = 0; 532 533 /* 534 * Allocate interrupt 535 */ 536 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 537 &sc->intr_rid, &intr_flags); 538 539 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 540 int unshared; 541 542 unshared = device_getenv_int(dev, "irq.unshared", 0); 543 if (!unshared) { 544 sc->flags |= EMX_FLAG_SHARED_INTR; 545 if (bootverbose) 546 device_printf(dev, "IRQ shared\n"); 547 } else { 548 intr_flags &= ~RF_SHAREABLE; 549 if (bootverbose) 550 device_printf(dev, "IRQ unshared\n"); 551 } 552 } 553 554 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 555 intr_flags); 556 if (sc->intr_res == NULL) { 557 device_printf(dev, "Unable to allocate bus resource: " 558 "interrupt\n"); 559 error = ENXIO; 560 goto fail; 561 } 562 563 /* Save PCI command register for Shared Code */ 564 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 565 sc->hw.back = &sc->osdep; 566 567 /* 568 * For I217/I218, we need to map the flash memory and this 569 * must happen after the MAC is identified. 570 */ 571 if (sc->hw.mac.type == e1000_pch_lpt) { 572 sc->flash_rid = EMX_BAR_FLASH; 573 574 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 575 &sc->flash_rid, RF_ACTIVE); 576 if (sc->flash == NULL) { 577 device_printf(dev, "Mapping of Flash failed\n"); 578 error = ENXIO; 579 goto fail; 580 } 581 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 582 sc->osdep.flash_bus_space_handle = 583 rman_get_bushandle(sc->flash); 584 585 /* 586 * This is used in the shared code 587 * XXX this goof is actually not used. 588 */ 589 sc->hw.flash_address = (uint8_t *)sc->flash; 590 } 591 592 /* Do Shared Code initialization */ 593 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 594 device_printf(dev, "Setup of Shared code failed\n"); 595 error = ENXIO; 596 goto fail; 597 } 598 e1000_get_bus_info(&sc->hw); 599 600 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 601 sc->hw.phy.autoneg_wait_to_complete = FALSE; 602 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 603 604 /* 605 * Interrupt throttle rate 606 */ 607 throttle = device_getenv_int(dev, "int_throttle_ceil", 608 emx_int_throttle_ceil); 609 if (throttle == 0) { 610 sc->int_throttle_ceil = 0; 611 } else { 612 if (throttle < 0) 613 throttle = EMX_DEFAULT_ITR; 614 615 /* Recalculate the tunable value to get the exact frequency. */ 616 throttle = 1000000000 / 256 / throttle; 617 618 /* Upper 16bits of ITR is reserved and should be zero */ 619 if (throttle & 0xffff0000) 620 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 621 622 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 623 } 624 625 e1000_init_script_state_82541(&sc->hw, TRUE); 626 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 627 628 /* Copper options */ 629 if (sc->hw.phy.media_type == e1000_media_type_copper) { 630 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 631 sc->hw.phy.disable_polarity_correction = FALSE; 632 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 633 } 634 635 /* Set the frame limits assuming standard ethernet sized frames. */ 636 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 637 638 /* This controls when hardware reports transmit completion status. */ 639 sc->hw.mac.report_tx_early = 1; 640 641 /* Calculate # of RX rings */ 642 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 643 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 644 645 /* 646 * Calculate # of TX rings 647 * 648 * XXX 649 * I217/I218 claims to have 2 TX queues 650 * 651 * NOTE: 652 * Don't enable multiple TX queues on 82574; it always gives 653 * watchdog timeout on TX queue0, when multiple TCP streams are 654 * received. It was originally suspected that the hardware TX 655 * checksum offloading caused this watchdog timeout, since only 656 * TCP ACKs are sent during TCP receiving tests. However, even 657 * if the hardware TX checksum offloading is disable, TX queue0 658 * still will give watchdog. 659 */ 660 tx_ring_max = 1; 661 if (sc->hw.mac.type == e1000_82571 || 662 sc->hw.mac.type == e1000_82572 || 663 sc->hw.mac.type == e1000_80003es2lan || 664 sc->hw.mac.type == e1000_pch_lpt || 665 sc->hw.mac.type == e1000_82574) 666 tx_ring_max = EMX_NTX_RING; 667 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 668 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 669 670 /* Allocate RX/TX rings' busdma(9) stuffs */ 671 error = emx_dma_alloc(sc); 672 if (error) 673 goto fail; 674 675 /* Allocate multicast array memory. */ 676 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 677 M_DEVBUF, M_WAITOK); 678 679 /* Indicate SOL/IDER usage */ 680 if (e1000_check_reset_block(&sc->hw)) { 681 device_printf(dev, 682 "PHY reset is blocked due to SOL/IDER session.\n"); 683 } 684 685 /* Disable EEE on I217/I218 */ 686 sc->hw.dev_spec.ich8lan.eee_disable = 1; 687 688 /* 689 * Start from a known state, this is important in reading the 690 * nvm and mac from that. 691 */ 692 e1000_reset_hw(&sc->hw); 693 694 /* Make sure we have a good EEPROM before we read from it */ 695 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 696 /* 697 * Some PCI-E parts fail the first check due to 698 * the link being in sleep state, call it again, 699 * if it fails a second time its a real issue. 700 */ 701 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 702 device_printf(dev, 703 "The EEPROM Checksum Is Not Valid\n"); 704 error = EIO; 705 goto fail; 706 } 707 } 708 709 /* Copy the permanent MAC address out of the EEPROM */ 710 if (e1000_read_mac_addr(&sc->hw) < 0) { 711 device_printf(dev, "EEPROM read error while reading MAC" 712 " address\n"); 713 error = EIO; 714 goto fail; 715 } 716 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 717 device_printf(dev, "Invalid MAC address\n"); 718 error = EIO; 719 goto fail; 720 } 721 722 /* Disable ULP support */ 723 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 724 725 /* Determine if we have to control management hardware */ 726 if (e1000_enable_mng_pass_thru(&sc->hw)) 727 sc->flags |= EMX_FLAG_HAS_MGMT; 728 729 /* 730 * Setup Wake-on-Lan 731 */ 732 apme_mask = EMX_EEPROM_APME; 733 eeprom_data = 0; 734 switch (sc->hw.mac.type) { 735 case e1000_82573: 736 sc->flags |= EMX_FLAG_HAS_AMT; 737 /* FALL THROUGH */ 738 739 case e1000_82571: 740 case e1000_82572: 741 case e1000_80003es2lan: 742 if (sc->hw.bus.func == 1) { 743 e1000_read_nvm(&sc->hw, 744 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 745 } else { 746 e1000_read_nvm(&sc->hw, 747 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 748 } 749 break; 750 751 default: 752 e1000_read_nvm(&sc->hw, 753 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 754 break; 755 } 756 if (eeprom_data & apme_mask) 757 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 758 759 /* 760 * We have the eeprom settings, now apply the special cases 761 * where the eeprom may be wrong or the board won't support 762 * wake on lan on a particular port 763 */ 764 device_id = pci_get_device(dev); 765 switch (device_id) { 766 case E1000_DEV_ID_82571EB_FIBER: 767 /* 768 * Wake events only supported on port A for dual fiber 769 * regardless of eeprom setting 770 */ 771 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 772 E1000_STATUS_FUNC_1) 773 sc->wol = 0; 774 break; 775 776 case E1000_DEV_ID_82571EB_QUAD_COPPER: 777 case E1000_DEV_ID_82571EB_QUAD_FIBER: 778 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 779 /* if quad port sc, disable WoL on all but port A */ 780 if (emx_global_quad_port_a != 0) 781 sc->wol = 0; 782 /* Reset for multiple quad port adapters */ 783 if (++emx_global_quad_port_a == 4) 784 emx_global_quad_port_a = 0; 785 break; 786 } 787 788 /* XXX disable wol */ 789 sc->wol = 0; 790 791 #ifdef IFPOLL_ENABLE 792 /* 793 * NPOLLING RX CPU offset 794 */ 795 if (sc->rx_ring_cnt == ncpus2) { 796 offset = 0; 797 } else { 798 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 799 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 800 if (offset >= ncpus2 || 801 offset % sc->rx_ring_cnt != 0) { 802 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 803 offset, offset_def); 804 offset = offset_def; 805 } 806 } 807 sc->rx_npoll_off = offset; 808 809 /* 810 * NPOLLING TX CPU offset 811 */ 812 if (sc->tx_ring_cnt == ncpus2) { 813 offset = 0; 814 } else { 815 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 816 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 817 if (offset >= ncpus2 || 818 offset % sc->tx_ring_cnt != 0) { 819 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 820 offset, offset_def); 821 offset = offset_def; 822 } 823 } 824 sc->tx_npoll_off = offset; 825 #endif 826 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 827 828 /* Setup OS specific network interface */ 829 emx_setup_ifp(sc); 830 831 /* Add sysctl tree, must after em_setup_ifp() */ 832 emx_add_sysctl(sc); 833 834 /* Reset the hardware */ 835 error = emx_reset(sc); 836 if (error) { 837 /* 838 * Some 82573 parts fail the first reset, call it again, 839 * if it fails a second time its a real issue. 840 */ 841 error = emx_reset(sc); 842 if (error) { 843 device_printf(dev, "Unable to reset the hardware\n"); 844 ether_ifdetach(&sc->arpcom.ac_if); 845 goto fail; 846 } 847 } 848 849 /* Initialize statistics */ 850 emx_update_stats(sc); 851 852 sc->hw.mac.get_link_status = 1; 853 emx_update_link_status(sc); 854 855 /* Non-AMT based hardware can now take control from firmware */ 856 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 857 EMX_FLAG_HAS_MGMT) 858 emx_get_hw_control(sc); 859 860 /* 861 * Missing Interrupt Following ICR read: 862 * 863 * 82571/82572 specification update errata #76 864 * 82573 specification update errata #31 865 * 82574 specification update errata #12 866 */ 867 intr_func = emx_intr; 868 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 869 (sc->hw.mac.type == e1000_82571 || 870 sc->hw.mac.type == e1000_82572 || 871 sc->hw.mac.type == e1000_82573 || 872 sc->hw.mac.type == e1000_82574)) 873 intr_func = emx_intr_mask; 874 875 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 876 &sc->intr_tag, &sc->main_serialize); 877 if (error) { 878 device_printf(dev, "Failed to register interrupt handler"); 879 ether_ifdetach(&sc->arpcom.ac_if); 880 goto fail; 881 } 882 return (0); 883 fail: 884 emx_detach(dev); 885 return (error); 886 } 887 888 static int 889 emx_detach(device_t dev) 890 { 891 struct emx_softc *sc = device_get_softc(dev); 892 893 if (device_is_attached(dev)) { 894 struct ifnet *ifp = &sc->arpcom.ac_if; 895 896 ifnet_serialize_all(ifp); 897 898 emx_stop(sc); 899 900 e1000_phy_hw_reset(&sc->hw); 901 902 emx_rel_mgmt(sc); 903 emx_rel_hw_control(sc); 904 905 if (sc->wol) { 906 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 907 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 908 emx_enable_wol(dev); 909 } 910 911 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 912 913 ifnet_deserialize_all(ifp); 914 915 ether_ifdetach(ifp); 916 } else if (sc->memory != NULL) { 917 emx_rel_hw_control(sc); 918 } 919 920 ifmedia_removeall(&sc->media); 921 bus_generic_detach(dev); 922 923 if (sc->intr_res != NULL) { 924 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 925 sc->intr_res); 926 } 927 928 if (sc->intr_type == PCI_INTR_TYPE_MSI) 929 pci_release_msi(dev); 930 931 if (sc->memory != NULL) { 932 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 933 sc->memory); 934 } 935 936 if (sc->flash != NULL) { 937 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 938 sc->flash); 939 } 940 941 emx_dma_free(sc); 942 943 if (sc->mta != NULL) 944 kfree(sc->mta, M_DEVBUF); 945 946 return (0); 947 } 948 949 static int 950 emx_shutdown(device_t dev) 951 { 952 return emx_suspend(dev); 953 } 954 955 static int 956 emx_suspend(device_t dev) 957 { 958 struct emx_softc *sc = device_get_softc(dev); 959 struct ifnet *ifp = &sc->arpcom.ac_if; 960 961 ifnet_serialize_all(ifp); 962 963 emx_stop(sc); 964 965 emx_rel_mgmt(sc); 966 emx_rel_hw_control(sc); 967 968 if (sc->wol) { 969 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 970 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 971 emx_enable_wol(dev); 972 } 973 974 ifnet_deserialize_all(ifp); 975 976 return bus_generic_suspend(dev); 977 } 978 979 static int 980 emx_resume(device_t dev) 981 { 982 struct emx_softc *sc = device_get_softc(dev); 983 struct ifnet *ifp = &sc->arpcom.ac_if; 984 int i; 985 986 ifnet_serialize_all(ifp); 987 988 emx_init(sc); 989 emx_get_mgmt(sc); 990 for (i = 0; i < sc->tx_ring_inuse; ++i) 991 ifsq_devstart_sched(sc->tx_data[i].ifsq); 992 993 ifnet_deserialize_all(ifp); 994 995 return bus_generic_resume(dev); 996 } 997 998 static void 999 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1000 { 1001 struct emx_softc *sc = ifp->if_softc; 1002 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1003 struct mbuf *m_head; 1004 int idx = -1, nsegs = 0; 1005 1006 KKASSERT(tdata->ifsq == ifsq); 1007 ASSERT_SERIALIZED(&tdata->tx_serialize); 1008 1009 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1010 return; 1011 1012 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1013 ifsq_purge(ifsq); 1014 return; 1015 } 1016 1017 while (!ifsq_is_empty(ifsq)) { 1018 /* Now do we at least have a minimal? */ 1019 if (EMX_IS_OACTIVE(tdata)) { 1020 emx_tx_collect(tdata); 1021 if (EMX_IS_OACTIVE(tdata)) { 1022 ifsq_set_oactive(ifsq); 1023 break; 1024 } 1025 } 1026 1027 logif(pkt_txqueue); 1028 m_head = ifsq_dequeue(ifsq); 1029 if (m_head == NULL) 1030 break; 1031 1032 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1033 IFNET_STAT_INC(ifp, oerrors, 1); 1034 emx_tx_collect(tdata); 1035 continue; 1036 } 1037 1038 /* 1039 * TX interrupt are aggressively aggregated, so increasing 1040 * opackets at TX interrupt time will make the opackets 1041 * statistics vastly inaccurate; we do the opackets increment 1042 * now. 1043 */ 1044 IFNET_STAT_INC(ifp, opackets, 1); 1045 1046 if (nsegs >= tdata->tx_wreg_nsegs) { 1047 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1048 nsegs = 0; 1049 idx = -1; 1050 } 1051 1052 /* Send a copy of the frame to the BPF listener */ 1053 ETHER_BPF_MTAP(ifp, m_head); 1054 1055 /* Set timeout in case hardware has problems transmitting. */ 1056 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1057 } 1058 if (idx >= 0) 1059 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1060 } 1061 1062 static int 1063 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1064 { 1065 struct emx_softc *sc = ifp->if_softc; 1066 struct ifreq *ifr = (struct ifreq *)data; 1067 uint16_t eeprom_data = 0; 1068 int max_frame_size, mask, reinit; 1069 int error = 0; 1070 1071 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1072 1073 switch (command) { 1074 case SIOCSIFMTU: 1075 switch (sc->hw.mac.type) { 1076 case e1000_82573: 1077 /* 1078 * 82573 only supports jumbo frames 1079 * if ASPM is disabled. 1080 */ 1081 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1082 &eeprom_data); 1083 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1084 max_frame_size = ETHER_MAX_LEN; 1085 break; 1086 } 1087 /* FALL THROUGH */ 1088 1089 /* Limit Jumbo Frame size */ 1090 case e1000_82571: 1091 case e1000_82572: 1092 case e1000_82574: 1093 case e1000_pch_lpt: 1094 case e1000_80003es2lan: 1095 max_frame_size = 9234; 1096 break; 1097 1098 default: 1099 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1100 break; 1101 } 1102 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1103 ETHER_CRC_LEN) { 1104 error = EINVAL; 1105 break; 1106 } 1107 1108 ifp->if_mtu = ifr->ifr_mtu; 1109 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1110 ETHER_CRC_LEN; 1111 1112 if (ifp->if_flags & IFF_RUNNING) 1113 emx_init(sc); 1114 break; 1115 1116 case SIOCSIFFLAGS: 1117 if (ifp->if_flags & IFF_UP) { 1118 if ((ifp->if_flags & IFF_RUNNING)) { 1119 if ((ifp->if_flags ^ sc->if_flags) & 1120 (IFF_PROMISC | IFF_ALLMULTI)) { 1121 emx_disable_promisc(sc); 1122 emx_set_promisc(sc); 1123 } 1124 } else { 1125 emx_init(sc); 1126 } 1127 } else if (ifp->if_flags & IFF_RUNNING) { 1128 emx_stop(sc); 1129 } 1130 sc->if_flags = ifp->if_flags; 1131 break; 1132 1133 case SIOCADDMULTI: 1134 case SIOCDELMULTI: 1135 if (ifp->if_flags & IFF_RUNNING) { 1136 emx_disable_intr(sc); 1137 emx_set_multi(sc); 1138 #ifdef IFPOLL_ENABLE 1139 if (!(ifp->if_flags & IFF_NPOLLING)) 1140 #endif 1141 emx_enable_intr(sc); 1142 } 1143 break; 1144 1145 case SIOCSIFMEDIA: 1146 /* Check SOL/IDER usage */ 1147 if (e1000_check_reset_block(&sc->hw)) { 1148 device_printf(sc->dev, "Media change is" 1149 " blocked due to SOL/IDER session.\n"); 1150 break; 1151 } 1152 /* FALL THROUGH */ 1153 1154 case SIOCGIFMEDIA: 1155 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1156 break; 1157 1158 case SIOCSIFCAP: 1159 reinit = 0; 1160 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1161 if (mask & IFCAP_RXCSUM) { 1162 ifp->if_capenable ^= IFCAP_RXCSUM; 1163 reinit = 1; 1164 } 1165 if (mask & IFCAP_VLAN_HWTAGGING) { 1166 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1167 reinit = 1; 1168 } 1169 if (mask & IFCAP_TXCSUM) { 1170 ifp->if_capenable ^= IFCAP_TXCSUM; 1171 if (ifp->if_capenable & IFCAP_TXCSUM) 1172 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1173 else 1174 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1175 } 1176 if (mask & IFCAP_TSO) { 1177 ifp->if_capenable ^= IFCAP_TSO; 1178 if (ifp->if_capenable & IFCAP_TSO) 1179 ifp->if_hwassist |= CSUM_TSO; 1180 else 1181 ifp->if_hwassist &= ~CSUM_TSO; 1182 } 1183 if (mask & IFCAP_RSS) 1184 ifp->if_capenable ^= IFCAP_RSS; 1185 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1186 emx_init(sc); 1187 break; 1188 1189 default: 1190 error = ether_ioctl(ifp, command, data); 1191 break; 1192 } 1193 return (error); 1194 } 1195 1196 static void 1197 emx_watchdog(struct ifaltq_subque *ifsq) 1198 { 1199 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1200 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1201 struct emx_softc *sc = ifp->if_softc; 1202 int i; 1203 1204 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1205 1206 /* 1207 * The timer is set to 5 every time start queues a packet. 1208 * Then txeof keeps resetting it as long as it cleans at 1209 * least one descriptor. 1210 * Finally, anytime all descriptors are clean the timer is 1211 * set to 0. 1212 */ 1213 1214 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1215 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1216 /* 1217 * If we reach here, all TX jobs are completed and 1218 * the TX engine should have been idled for some time. 1219 * We don't need to call ifsq_devstart_sched() here. 1220 */ 1221 ifsq_clr_oactive(ifsq); 1222 tdata->tx_watchdog.wd_timer = 0; 1223 return; 1224 } 1225 1226 /* 1227 * If we are in this routine because of pause frames, then 1228 * don't reset the hardware. 1229 */ 1230 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1231 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1232 return; 1233 } 1234 1235 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1236 1237 IFNET_STAT_INC(ifp, oerrors, 1); 1238 1239 emx_init(sc); 1240 for (i = 0; i < sc->tx_ring_inuse; ++i) 1241 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1242 } 1243 1244 static void 1245 emx_init(void *xsc) 1246 { 1247 struct emx_softc *sc = xsc; 1248 struct ifnet *ifp = &sc->arpcom.ac_if; 1249 device_t dev = sc->dev; 1250 boolean_t polling; 1251 int i; 1252 1253 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1254 1255 emx_stop(sc); 1256 1257 /* Get the latest mac address, User can use a LAA */ 1258 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1259 1260 /* Put the address into the Receive Address Array */ 1261 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1262 1263 /* 1264 * With the 82571 sc, RAR[0] may be overwritten 1265 * when the other port is reset, we make a duplicate 1266 * in RAR[14] for that eventuality, this assures 1267 * the interface continues to function. 1268 */ 1269 if (sc->hw.mac.type == e1000_82571) { 1270 e1000_set_laa_state_82571(&sc->hw, TRUE); 1271 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1272 E1000_RAR_ENTRIES - 1); 1273 } 1274 1275 /* Initialize the hardware */ 1276 if (emx_reset(sc)) { 1277 device_printf(dev, "Unable to reset the hardware\n"); 1278 /* XXX emx_stop()? */ 1279 return; 1280 } 1281 emx_update_link_status(sc); 1282 1283 /* Setup VLAN support, basic and offload if available */ 1284 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1285 1286 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1287 uint32_t ctrl; 1288 1289 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1290 ctrl |= E1000_CTRL_VME; 1291 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1292 } 1293 1294 /* Configure for OS presence */ 1295 emx_get_mgmt(sc); 1296 1297 polling = FALSE; 1298 #ifdef IFPOLL_ENABLE 1299 if (ifp->if_flags & IFF_NPOLLING) 1300 polling = TRUE; 1301 #endif 1302 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1303 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1304 1305 /* Prepare transmit descriptors and buffers */ 1306 for (i = 0; i < sc->tx_ring_inuse; ++i) 1307 emx_init_tx_ring(&sc->tx_data[i]); 1308 emx_init_tx_unit(sc); 1309 1310 /* Setup Multicast table */ 1311 emx_set_multi(sc); 1312 1313 /* Prepare receive descriptors and buffers */ 1314 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1315 if (emx_init_rx_ring(&sc->rx_data[i])) { 1316 device_printf(dev, 1317 "Could not setup receive structures\n"); 1318 emx_stop(sc); 1319 return; 1320 } 1321 } 1322 emx_init_rx_unit(sc); 1323 1324 /* Don't lose promiscuous settings */ 1325 emx_set_promisc(sc); 1326 1327 ifp->if_flags |= IFF_RUNNING; 1328 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1329 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1330 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1331 } 1332 1333 callout_reset(&sc->timer, hz, emx_timer, sc); 1334 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1335 1336 /* MSI/X configuration for 82574 */ 1337 if (sc->hw.mac.type == e1000_82574) { 1338 int tmp; 1339 1340 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1341 tmp |= E1000_CTRL_EXT_PBA_CLR; 1342 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1343 /* 1344 * XXX MSIX 1345 * Set the IVAR - interrupt vector routing. 1346 * Each nibble represents a vector, high bit 1347 * is enable, other 3 bits are the MSIX table 1348 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1349 * Link (other) to 2, hence the magic number. 1350 */ 1351 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1352 } 1353 1354 /* 1355 * Only enable interrupts if we are not polling, make sure 1356 * they are off otherwise. 1357 */ 1358 if (polling) 1359 emx_disable_intr(sc); 1360 else 1361 emx_enable_intr(sc); 1362 1363 /* AMT based hardware can now take control from firmware */ 1364 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1365 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1366 emx_get_hw_control(sc); 1367 } 1368 1369 static void 1370 emx_intr(void *xsc) 1371 { 1372 emx_intr_body(xsc, TRUE); 1373 } 1374 1375 static void 1376 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1377 { 1378 struct ifnet *ifp = &sc->arpcom.ac_if; 1379 uint32_t reg_icr; 1380 1381 logif(intr_beg); 1382 ASSERT_SERIALIZED(&sc->main_serialize); 1383 1384 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1385 1386 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1387 logif(intr_end); 1388 return; 1389 } 1390 1391 /* 1392 * XXX: some laptops trigger several spurious interrupts 1393 * on emx(4) when in the resume cycle. The ICR register 1394 * reports all-ones value in this case. Processing such 1395 * interrupts would lead to a freeze. I don't know why. 1396 */ 1397 if (reg_icr == 0xffffffff) { 1398 logif(intr_end); 1399 return; 1400 } 1401 1402 if (ifp->if_flags & IFF_RUNNING) { 1403 if (reg_icr & 1404 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1405 int i; 1406 1407 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1408 lwkt_serialize_enter( 1409 &sc->rx_data[i].rx_serialize); 1410 emx_rxeof(&sc->rx_data[i], -1); 1411 lwkt_serialize_exit( 1412 &sc->rx_data[i].rx_serialize); 1413 } 1414 } 1415 if (reg_icr & E1000_ICR_TXDW) { 1416 struct emx_txdata *tdata = &sc->tx_data[0]; 1417 1418 lwkt_serialize_enter(&tdata->tx_serialize); 1419 emx_txeof(tdata); 1420 if (!ifsq_is_empty(tdata->ifsq)) 1421 ifsq_devstart(tdata->ifsq); 1422 lwkt_serialize_exit(&tdata->tx_serialize); 1423 } 1424 } 1425 1426 /* Link status change */ 1427 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1428 emx_serialize_skipmain(sc); 1429 1430 callout_stop(&sc->timer); 1431 sc->hw.mac.get_link_status = 1; 1432 emx_update_link_status(sc); 1433 1434 /* Deal with TX cruft when link lost */ 1435 emx_tx_purge(sc); 1436 1437 callout_reset(&sc->timer, hz, emx_timer, sc); 1438 1439 emx_deserialize_skipmain(sc); 1440 } 1441 1442 if (reg_icr & E1000_ICR_RXO) 1443 sc->rx_overruns++; 1444 1445 logif(intr_end); 1446 } 1447 1448 static void 1449 emx_intr_mask(void *xsc) 1450 { 1451 struct emx_softc *sc = xsc; 1452 1453 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1454 /* 1455 * NOTE: 1456 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1457 * so don't check it. 1458 */ 1459 emx_intr_body(sc, FALSE); 1460 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1461 } 1462 1463 static void 1464 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1465 { 1466 struct emx_softc *sc = ifp->if_softc; 1467 1468 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1469 1470 emx_update_link_status(sc); 1471 1472 ifmr->ifm_status = IFM_AVALID; 1473 ifmr->ifm_active = IFM_ETHER; 1474 1475 if (!sc->link_active) 1476 return; 1477 1478 ifmr->ifm_status |= IFM_ACTIVE; 1479 1480 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1481 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1482 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1483 } else { 1484 switch (sc->link_speed) { 1485 case 10: 1486 ifmr->ifm_active |= IFM_10_T; 1487 break; 1488 case 100: 1489 ifmr->ifm_active |= IFM_100_TX; 1490 break; 1491 1492 case 1000: 1493 ifmr->ifm_active |= IFM_1000_T; 1494 break; 1495 } 1496 if (sc->link_duplex == FULL_DUPLEX) 1497 ifmr->ifm_active |= IFM_FDX; 1498 else 1499 ifmr->ifm_active |= IFM_HDX; 1500 } 1501 } 1502 1503 static int 1504 emx_media_change(struct ifnet *ifp) 1505 { 1506 struct emx_softc *sc = ifp->if_softc; 1507 struct ifmedia *ifm = &sc->media; 1508 1509 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1510 1511 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1512 return (EINVAL); 1513 1514 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1515 case IFM_AUTO: 1516 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1517 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1518 break; 1519 1520 case IFM_1000_LX: 1521 case IFM_1000_SX: 1522 case IFM_1000_T: 1523 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1524 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1525 break; 1526 1527 case IFM_100_TX: 1528 sc->hw.mac.autoneg = FALSE; 1529 sc->hw.phy.autoneg_advertised = 0; 1530 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1531 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1532 else 1533 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1534 break; 1535 1536 case IFM_10_T: 1537 sc->hw.mac.autoneg = FALSE; 1538 sc->hw.phy.autoneg_advertised = 0; 1539 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1540 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1541 else 1542 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1543 break; 1544 1545 default: 1546 if_printf(ifp, "Unsupported media type\n"); 1547 break; 1548 } 1549 1550 emx_init(sc); 1551 1552 return (0); 1553 } 1554 1555 static int 1556 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1557 int *segs_used, int *idx) 1558 { 1559 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1560 bus_dmamap_t map; 1561 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1562 struct e1000_tx_desc *ctxd = NULL; 1563 struct mbuf *m_head = *m_headp; 1564 uint32_t txd_upper, txd_lower, cmd = 0; 1565 int maxsegs, nsegs, i, j, first, last = 0, error; 1566 1567 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1568 error = emx_tso_pullup(tdata, m_headp); 1569 if (error) 1570 return error; 1571 m_head = *m_headp; 1572 } 1573 1574 txd_upper = txd_lower = 0; 1575 1576 /* 1577 * Capture the first descriptor index, this descriptor 1578 * will have the index of the EOP which is the only one 1579 * that now gets a DONE bit writeback. 1580 */ 1581 first = tdata->next_avail_tx_desc; 1582 tx_buffer = &tdata->tx_buf[first]; 1583 tx_buffer_mapped = tx_buffer; 1584 map = tx_buffer->map; 1585 1586 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1587 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1588 if (maxsegs > EMX_MAX_SCATTER) 1589 maxsegs = EMX_MAX_SCATTER; 1590 1591 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1592 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1593 if (error) { 1594 m_freem(*m_headp); 1595 *m_headp = NULL; 1596 return error; 1597 } 1598 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1599 1600 m_head = *m_headp; 1601 tdata->tx_nsegs += nsegs; 1602 *segs_used += nsegs; 1603 1604 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1605 /* TSO will consume one TX desc */ 1606 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1607 tdata->tx_nsegs += i; 1608 *segs_used += i; 1609 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1610 /* TX csum offloading will consume one TX desc */ 1611 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1612 tdata->tx_nsegs += i; 1613 *segs_used += i; 1614 } 1615 1616 /* Handle VLAN tag */ 1617 if (m_head->m_flags & M_VLANTAG) { 1618 /* Set the vlan id. */ 1619 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1620 /* Tell hardware to add tag */ 1621 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1622 } 1623 1624 i = tdata->next_avail_tx_desc; 1625 1626 /* Set up our transmit descriptors */ 1627 for (j = 0; j < nsegs; j++) { 1628 tx_buffer = &tdata->tx_buf[i]; 1629 ctxd = &tdata->tx_desc_base[i]; 1630 1631 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1632 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1633 txd_lower | segs[j].ds_len); 1634 ctxd->upper.data = htole32(txd_upper); 1635 1636 last = i; 1637 if (++i == tdata->num_tx_desc) 1638 i = 0; 1639 } 1640 1641 tdata->next_avail_tx_desc = i; 1642 1643 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1644 tdata->num_tx_desc_avail -= nsegs; 1645 1646 tx_buffer->m_head = m_head; 1647 tx_buffer_mapped->map = tx_buffer->map; 1648 tx_buffer->map = map; 1649 1650 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1651 tdata->tx_nsegs = 0; 1652 1653 /* 1654 * Report Status (RS) is turned on 1655 * every tx_intr_nsegs descriptors. 1656 */ 1657 cmd = E1000_TXD_CMD_RS; 1658 1659 /* 1660 * Keep track of the descriptor, which will 1661 * be written back by hardware. 1662 */ 1663 tdata->tx_dd[tdata->tx_dd_tail] = last; 1664 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1665 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1666 } 1667 1668 /* 1669 * Last Descriptor of Packet needs End Of Packet (EOP) 1670 */ 1671 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1672 1673 /* 1674 * Defer TDT updating, until enough descriptors are setup 1675 */ 1676 *idx = i; 1677 1678 #ifdef EMX_TSS_DEBUG 1679 tdata->tx_pkts++; 1680 #endif 1681 1682 return (0); 1683 } 1684 1685 static void 1686 emx_set_promisc(struct emx_softc *sc) 1687 { 1688 struct ifnet *ifp = &sc->arpcom.ac_if; 1689 uint32_t reg_rctl; 1690 1691 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1692 1693 if (ifp->if_flags & IFF_PROMISC) { 1694 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1695 /* Turn this on if you want to see bad packets */ 1696 if (emx_debug_sbp) 1697 reg_rctl |= E1000_RCTL_SBP; 1698 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1699 } else if (ifp->if_flags & IFF_ALLMULTI) { 1700 reg_rctl |= E1000_RCTL_MPE; 1701 reg_rctl &= ~E1000_RCTL_UPE; 1702 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1703 } 1704 } 1705 1706 static void 1707 emx_disable_promisc(struct emx_softc *sc) 1708 { 1709 uint32_t reg_rctl; 1710 1711 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1712 1713 reg_rctl &= ~E1000_RCTL_UPE; 1714 reg_rctl &= ~E1000_RCTL_MPE; 1715 reg_rctl &= ~E1000_RCTL_SBP; 1716 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1717 } 1718 1719 static void 1720 emx_set_multi(struct emx_softc *sc) 1721 { 1722 struct ifnet *ifp = &sc->arpcom.ac_if; 1723 struct ifmultiaddr *ifma; 1724 uint32_t reg_rctl = 0; 1725 uint8_t *mta; 1726 int mcnt = 0; 1727 1728 mta = sc->mta; 1729 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1730 1731 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1732 if (ifma->ifma_addr->sa_family != AF_LINK) 1733 continue; 1734 1735 if (mcnt == EMX_MCAST_ADDR_MAX) 1736 break; 1737 1738 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1739 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1740 mcnt++; 1741 } 1742 1743 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1744 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1745 reg_rctl |= E1000_RCTL_MPE; 1746 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1747 } else { 1748 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1749 } 1750 } 1751 1752 /* 1753 * This routine checks for link status and updates statistics. 1754 */ 1755 static void 1756 emx_timer(void *xsc) 1757 { 1758 struct emx_softc *sc = xsc; 1759 struct ifnet *ifp = &sc->arpcom.ac_if; 1760 1761 lwkt_serialize_enter(&sc->main_serialize); 1762 1763 emx_update_link_status(sc); 1764 emx_update_stats(sc); 1765 1766 /* Reset LAA into RAR[0] on 82571 */ 1767 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1768 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1769 1770 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1771 emx_print_hw_stats(sc); 1772 1773 emx_smartspeed(sc); 1774 1775 callout_reset(&sc->timer, hz, emx_timer, sc); 1776 1777 lwkt_serialize_exit(&sc->main_serialize); 1778 } 1779 1780 static void 1781 emx_update_link_status(struct emx_softc *sc) 1782 { 1783 struct e1000_hw *hw = &sc->hw; 1784 struct ifnet *ifp = &sc->arpcom.ac_if; 1785 device_t dev = sc->dev; 1786 uint32_t link_check = 0; 1787 1788 /* Get the cached link value or read phy for real */ 1789 switch (hw->phy.media_type) { 1790 case e1000_media_type_copper: 1791 if (hw->mac.get_link_status) { 1792 /* Do the work to read phy */ 1793 e1000_check_for_link(hw); 1794 link_check = !hw->mac.get_link_status; 1795 if (link_check) /* ESB2 fix */ 1796 e1000_cfg_on_link_up(hw); 1797 } else { 1798 link_check = TRUE; 1799 } 1800 break; 1801 1802 case e1000_media_type_fiber: 1803 e1000_check_for_link(hw); 1804 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1805 break; 1806 1807 case e1000_media_type_internal_serdes: 1808 e1000_check_for_link(hw); 1809 link_check = sc->hw.mac.serdes_has_link; 1810 break; 1811 1812 case e1000_media_type_unknown: 1813 default: 1814 break; 1815 } 1816 1817 /* Now check for a transition */ 1818 if (link_check && sc->link_active == 0) { 1819 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1820 &sc->link_duplex); 1821 1822 /* 1823 * Check if we should enable/disable SPEED_MODE bit on 1824 * 82571EB/82572EI 1825 */ 1826 if (sc->link_speed != SPEED_1000 && 1827 (hw->mac.type == e1000_82571 || 1828 hw->mac.type == e1000_82572)) { 1829 int tarc0; 1830 1831 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1832 tarc0 &= ~EMX_TARC_SPEED_MODE; 1833 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1834 } 1835 if (bootverbose) { 1836 device_printf(dev, "Link is up %d Mbps %s\n", 1837 sc->link_speed, 1838 ((sc->link_duplex == FULL_DUPLEX) ? 1839 "Full Duplex" : "Half Duplex")); 1840 } 1841 sc->link_active = 1; 1842 sc->smartspeed = 0; 1843 ifp->if_baudrate = sc->link_speed * 1000000; 1844 ifp->if_link_state = LINK_STATE_UP; 1845 if_link_state_change(ifp); 1846 } else if (!link_check && sc->link_active == 1) { 1847 ifp->if_baudrate = sc->link_speed = 0; 1848 sc->link_duplex = 0; 1849 if (bootverbose) 1850 device_printf(dev, "Link is Down\n"); 1851 sc->link_active = 0; 1852 ifp->if_link_state = LINK_STATE_DOWN; 1853 if_link_state_change(ifp); 1854 } 1855 } 1856 1857 static void 1858 emx_stop(struct emx_softc *sc) 1859 { 1860 struct ifnet *ifp = &sc->arpcom.ac_if; 1861 int i; 1862 1863 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1864 1865 emx_disable_intr(sc); 1866 1867 callout_stop(&sc->timer); 1868 1869 ifp->if_flags &= ~IFF_RUNNING; 1870 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1871 struct emx_txdata *tdata = &sc->tx_data[i]; 1872 1873 ifsq_clr_oactive(tdata->ifsq); 1874 ifsq_watchdog_stop(&tdata->tx_watchdog); 1875 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1876 } 1877 1878 /* 1879 * Disable multiple receive queues. 1880 * 1881 * NOTE: 1882 * We should disable multiple receive queues before 1883 * resetting the hardware. 1884 */ 1885 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1886 1887 e1000_reset_hw(&sc->hw); 1888 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1889 1890 for (i = 0; i < sc->tx_ring_cnt; ++i) 1891 emx_free_tx_ring(&sc->tx_data[i]); 1892 for (i = 0; i < sc->rx_ring_cnt; ++i) 1893 emx_free_rx_ring(&sc->rx_data[i]); 1894 } 1895 1896 static int 1897 emx_reset(struct emx_softc *sc) 1898 { 1899 device_t dev = sc->dev; 1900 uint16_t rx_buffer_size; 1901 uint32_t pba; 1902 1903 /* Set up smart power down as default off on newer adapters. */ 1904 if (!emx_smart_pwr_down && 1905 (sc->hw.mac.type == e1000_82571 || 1906 sc->hw.mac.type == e1000_82572)) { 1907 uint16_t phy_tmp = 0; 1908 1909 /* Speed up time to link by disabling smart power down. */ 1910 e1000_read_phy_reg(&sc->hw, 1911 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1912 phy_tmp &= ~IGP02E1000_PM_SPD; 1913 e1000_write_phy_reg(&sc->hw, 1914 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1915 } 1916 1917 /* 1918 * Packet Buffer Allocation (PBA) 1919 * Writing PBA sets the receive portion of the buffer 1920 * the remainder is used for the transmit buffer. 1921 */ 1922 switch (sc->hw.mac.type) { 1923 /* Total Packet Buffer on these is 48K */ 1924 case e1000_82571: 1925 case e1000_82572: 1926 case e1000_80003es2lan: 1927 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1928 break; 1929 1930 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1931 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1932 break; 1933 1934 case e1000_82574: 1935 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1936 break; 1937 1938 case e1000_pch_lpt: 1939 pba = E1000_PBA_26K; 1940 break; 1941 1942 default: 1943 /* Devices before 82547 had a Packet Buffer of 64K. */ 1944 if (sc->hw.mac.max_frame_size > 8192) 1945 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1946 else 1947 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1948 } 1949 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1950 1951 /* 1952 * These parameters control the automatic generation (Tx) and 1953 * response (Rx) to Ethernet PAUSE frames. 1954 * - High water mark should allow for at least two frames to be 1955 * received after sending an XOFF. 1956 * - Low water mark works best when it is very near the high water mark. 1957 * This allows the receiver to restart by sending XON when it has 1958 * drained a bit. Here we use an arbitary value of 1500 which will 1959 * restart after one full frame is pulled from the buffer. There 1960 * could be several smaller frames in the buffer and if so they will 1961 * not trigger the XON until their total number reduces the buffer 1962 * by 1500. 1963 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1964 */ 1965 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1966 1967 sc->hw.fc.high_water = rx_buffer_size - 1968 roundup2(sc->hw.mac.max_frame_size, 1024); 1969 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1970 1971 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1972 sc->hw.fc.send_xon = TRUE; 1973 sc->hw.fc.requested_mode = e1000_fc_full; 1974 1975 /* 1976 * Device specific overrides/settings 1977 */ 1978 if (sc->hw.mac.type == e1000_pch_lpt) { 1979 sc->hw.fc.high_water = 0x5C20; 1980 sc->hw.fc.low_water = 0x5048; 1981 sc->hw.fc.pause_time = 0x0650; 1982 sc->hw.fc.refresh_time = 0x0400; 1983 /* Jumbos need adjusted PBA */ 1984 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 1985 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 1986 else 1987 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 1988 } else if (sc->hw.mac.type == e1000_80003es2lan) { 1989 sc->hw.fc.pause_time = 0xFFFF; 1990 } 1991 1992 /* Issue a global reset */ 1993 e1000_reset_hw(&sc->hw); 1994 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1995 emx_disable_aspm(sc); 1996 1997 if (e1000_init_hw(&sc->hw) < 0) { 1998 device_printf(dev, "Hardware Initialization Failed\n"); 1999 return (EIO); 2000 } 2001 2002 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2003 e1000_get_phy_info(&sc->hw); 2004 e1000_check_for_link(&sc->hw); 2005 2006 return (0); 2007 } 2008 2009 static void 2010 emx_setup_ifp(struct emx_softc *sc) 2011 { 2012 struct ifnet *ifp = &sc->arpcom.ac_if; 2013 int i; 2014 2015 if_initname(ifp, device_get_name(sc->dev), 2016 device_get_unit(sc->dev)); 2017 ifp->if_softc = sc; 2018 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2019 ifp->if_init = emx_init; 2020 ifp->if_ioctl = emx_ioctl; 2021 ifp->if_start = emx_start; 2022 #ifdef IFPOLL_ENABLE 2023 ifp->if_npoll = emx_npoll; 2024 #endif 2025 ifp->if_serialize = emx_serialize; 2026 ifp->if_deserialize = emx_deserialize; 2027 ifp->if_tryserialize = emx_tryserialize; 2028 #ifdef INVARIANTS 2029 ifp->if_serialize_assert = emx_serialize_assert; 2030 #endif 2031 2032 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2033 2034 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2035 ifq_set_ready(&ifp->if_snd); 2036 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2037 2038 ifp->if_mapsubq = ifq_mapsubq_mask; 2039 ifq_set_subq_mask(&ifp->if_snd, 0); 2040 2041 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2042 2043 ifp->if_capabilities = IFCAP_HWCSUM | 2044 IFCAP_VLAN_HWTAGGING | 2045 IFCAP_VLAN_MTU | 2046 IFCAP_TSO; 2047 if (sc->rx_ring_cnt > 1) 2048 ifp->if_capabilities |= IFCAP_RSS; 2049 ifp->if_capenable = ifp->if_capabilities; 2050 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2051 2052 /* 2053 * Tell the upper layer(s) we support long frames. 2054 */ 2055 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2056 2057 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2058 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2059 struct emx_txdata *tdata = &sc->tx_data[i]; 2060 2061 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2062 ifsq_set_priv(ifsq, tdata); 2063 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2064 tdata->ifsq = ifsq; 2065 2066 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2067 } 2068 2069 /* 2070 * Specify the media types supported by this sc and register 2071 * callbacks to update media and link information 2072 */ 2073 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2074 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2075 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2076 0, NULL); 2077 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2078 } else { 2079 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2080 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2081 0, NULL); 2082 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2083 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2084 0, NULL); 2085 if (sc->hw.phy.type != e1000_phy_ife) { 2086 ifmedia_add(&sc->media, 2087 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2088 ifmedia_add(&sc->media, 2089 IFM_ETHER | IFM_1000_T, 0, NULL); 2090 } 2091 } 2092 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2093 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 2094 } 2095 2096 /* 2097 * Workaround for SmartSpeed on 82541 and 82547 controllers 2098 */ 2099 static void 2100 emx_smartspeed(struct emx_softc *sc) 2101 { 2102 uint16_t phy_tmp; 2103 2104 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2105 sc->hw.mac.autoneg == 0 || 2106 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2107 return; 2108 2109 if (sc->smartspeed == 0) { 2110 /* 2111 * If Master/Slave config fault is asserted twice, 2112 * we assume back-to-back 2113 */ 2114 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2115 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2116 return; 2117 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2118 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2119 e1000_read_phy_reg(&sc->hw, 2120 PHY_1000T_CTRL, &phy_tmp); 2121 if (phy_tmp & CR_1000T_MS_ENABLE) { 2122 phy_tmp &= ~CR_1000T_MS_ENABLE; 2123 e1000_write_phy_reg(&sc->hw, 2124 PHY_1000T_CTRL, phy_tmp); 2125 sc->smartspeed++; 2126 if (sc->hw.mac.autoneg && 2127 !e1000_phy_setup_autoneg(&sc->hw) && 2128 !e1000_read_phy_reg(&sc->hw, 2129 PHY_CONTROL, &phy_tmp)) { 2130 phy_tmp |= MII_CR_AUTO_NEG_EN | 2131 MII_CR_RESTART_AUTO_NEG; 2132 e1000_write_phy_reg(&sc->hw, 2133 PHY_CONTROL, phy_tmp); 2134 } 2135 } 2136 } 2137 return; 2138 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2139 /* If still no link, perhaps using 2/3 pair cable */ 2140 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2141 phy_tmp |= CR_1000T_MS_ENABLE; 2142 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2143 if (sc->hw.mac.autoneg && 2144 !e1000_phy_setup_autoneg(&sc->hw) && 2145 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2146 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2147 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2148 } 2149 } 2150 2151 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2152 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2153 sc->smartspeed = 0; 2154 } 2155 2156 static int 2157 emx_create_tx_ring(struct emx_txdata *tdata) 2158 { 2159 device_t dev = tdata->sc->dev; 2160 struct emx_txbuf *tx_buffer; 2161 int error, i, tsize, ntxd; 2162 2163 /* 2164 * Validate number of transmit descriptors. It must not exceed 2165 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2166 */ 2167 ntxd = device_getenv_int(dev, "txd", emx_txd); 2168 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2169 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2170 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2171 EMX_DEFAULT_TXD, ntxd); 2172 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2173 } else { 2174 tdata->num_tx_desc = ntxd; 2175 } 2176 2177 /* 2178 * Allocate Transmit Descriptor ring 2179 */ 2180 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2181 EMX_DBA_ALIGN); 2182 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2183 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2184 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2185 &tdata->tx_desc_paddr); 2186 if (tdata->tx_desc_base == NULL) { 2187 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2188 return ENOMEM; 2189 } 2190 2191 tsize = __VM_CACHELINE_ALIGN( 2192 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2193 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2194 2195 /* 2196 * Create DMA tags for tx buffers 2197 */ 2198 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2199 1, 0, /* alignment, bounds */ 2200 BUS_SPACE_MAXADDR, /* lowaddr */ 2201 BUS_SPACE_MAXADDR, /* highaddr */ 2202 NULL, NULL, /* filter, filterarg */ 2203 EMX_TSO_SIZE, /* maxsize */ 2204 EMX_MAX_SCATTER, /* nsegments */ 2205 EMX_MAX_SEGSIZE, /* maxsegsize */ 2206 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2207 BUS_DMA_ONEBPAGE, /* flags */ 2208 &tdata->txtag); 2209 if (error) { 2210 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2211 kfree(tdata->tx_buf, M_DEVBUF); 2212 tdata->tx_buf = NULL; 2213 return error; 2214 } 2215 2216 /* 2217 * Create DMA maps for tx buffers 2218 */ 2219 for (i = 0; i < tdata->num_tx_desc; i++) { 2220 tx_buffer = &tdata->tx_buf[i]; 2221 2222 error = bus_dmamap_create(tdata->txtag, 2223 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2224 &tx_buffer->map); 2225 if (error) { 2226 device_printf(dev, "Unable to create TX DMA map\n"); 2227 emx_destroy_tx_ring(tdata, i); 2228 return error; 2229 } 2230 } 2231 2232 /* 2233 * Setup TX parameters 2234 */ 2235 tdata->spare_tx_desc = EMX_TX_SPARE; 2236 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2237 2238 /* 2239 * Keep following relationship between spare_tx_desc, oact_tx_desc 2240 * and tx_intr_nsegs: 2241 * (spare_tx_desc + EMX_TX_RESERVED) <= 2242 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2243 */ 2244 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2245 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2246 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2247 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2248 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2249 2250 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2251 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2252 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2253 2254 /* 2255 * Pullup extra 4bytes into the first data segment for TSO, see: 2256 * 82571/82572 specification update errata #7 2257 * 2258 * Same applies to I217 (and maybe I218). 2259 * 2260 * NOTE: 2261 * 4bytes instead of 2bytes, which are mentioned in the errata, 2262 * are pulled; mainly to keep rest of the data properly aligned. 2263 */ 2264 if (tdata->sc->hw.mac.type == e1000_82571 || 2265 tdata->sc->hw.mac.type == e1000_82572 || 2266 tdata->sc->hw.mac.type == e1000_pch_lpt) 2267 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2268 2269 return (0); 2270 } 2271 2272 static void 2273 emx_init_tx_ring(struct emx_txdata *tdata) 2274 { 2275 /* Clear the old ring contents */ 2276 bzero(tdata->tx_desc_base, 2277 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2278 2279 /* Reset state */ 2280 tdata->next_avail_tx_desc = 0; 2281 tdata->next_tx_to_clean = 0; 2282 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2283 2284 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2285 if (tdata->sc->tx_ring_inuse > 1) { 2286 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2287 if (bootverbose) { 2288 if_printf(&tdata->sc->arpcom.ac_if, 2289 "TX %d force ctx setup\n", tdata->idx); 2290 } 2291 } 2292 } 2293 2294 static void 2295 emx_init_tx_unit(struct emx_softc *sc) 2296 { 2297 uint32_t tctl, tarc, tipg = 0, txdctl; 2298 int i; 2299 2300 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2301 struct emx_txdata *tdata = &sc->tx_data[i]; 2302 uint64_t bus_addr; 2303 2304 /* Setup the Base and Length of the Tx Descriptor Ring */ 2305 bus_addr = tdata->tx_desc_paddr; 2306 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2307 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2308 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2309 (uint32_t)(bus_addr >> 32)); 2310 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2311 (uint32_t)bus_addr); 2312 /* Setup the HW Tx Head and Tail descriptor pointers */ 2313 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2314 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2315 } 2316 2317 /* Set the default values for the Tx Inter Packet Gap timer */ 2318 switch (sc->hw.mac.type) { 2319 case e1000_80003es2lan: 2320 tipg = DEFAULT_82543_TIPG_IPGR1; 2321 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2322 E1000_TIPG_IPGR2_SHIFT; 2323 break; 2324 2325 default: 2326 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2327 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2328 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2329 else 2330 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2331 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2332 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2333 break; 2334 } 2335 2336 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2337 2338 /* NOTE: 0 is not allowed for TIDV */ 2339 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2340 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2341 2342 /* 2343 * Errata workaround (obtained from Linux). This is necessary 2344 * to make multiple TX queues work on 82574. 2345 * XXX can't find it in any published errata though. 2346 */ 2347 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2348 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2349 2350 if (sc->hw.mac.type == e1000_82571 || 2351 sc->hw.mac.type == e1000_82572) { 2352 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2353 tarc |= EMX_TARC_SPEED_MODE; 2354 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2355 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2356 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2357 tarc |= 1; 2358 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2359 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2360 tarc |= 1; 2361 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2362 } 2363 2364 /* Program the Transmit Control Register */ 2365 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2366 tctl &= ~E1000_TCTL_CT; 2367 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2368 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2369 tctl |= E1000_TCTL_MULR; 2370 2371 /* This write will effectively turn on the transmit unit. */ 2372 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2373 2374 if (sc->hw.mac.type == e1000_82571 || 2375 sc->hw.mac.type == e1000_82572 || 2376 sc->hw.mac.type == e1000_80003es2lan) { 2377 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2378 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2379 tarc &= ~(1 << 28); 2380 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2381 } 2382 2383 if (sc->tx_ring_inuse > 1) { 2384 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2385 tarc &= ~EMX_TARC_COUNT_MASK; 2386 tarc |= 1; 2387 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2388 2389 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2390 tarc &= ~EMX_TARC_COUNT_MASK; 2391 tarc |= 1; 2392 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2393 } 2394 } 2395 2396 static void 2397 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2398 { 2399 struct emx_txbuf *tx_buffer; 2400 int i; 2401 2402 /* Free Transmit Descriptor ring */ 2403 if (tdata->tx_desc_base) { 2404 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2405 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2406 tdata->tx_desc_dmap); 2407 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2408 2409 tdata->tx_desc_base = NULL; 2410 } 2411 2412 if (tdata->tx_buf == NULL) 2413 return; 2414 2415 for (i = 0; i < ndesc; i++) { 2416 tx_buffer = &tdata->tx_buf[i]; 2417 2418 KKASSERT(tx_buffer->m_head == NULL); 2419 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2420 } 2421 bus_dma_tag_destroy(tdata->txtag); 2422 2423 kfree(tdata->tx_buf, M_DEVBUF); 2424 tdata->tx_buf = NULL; 2425 } 2426 2427 /* 2428 * The offload context needs to be set when we transfer the first 2429 * packet of a particular protocol (TCP/UDP). This routine has been 2430 * enhanced to deal with inserted VLAN headers. 2431 * 2432 * If the new packet's ether header length, ip header length and 2433 * csum offloading type are same as the previous packet, we should 2434 * avoid allocating a new csum context descriptor; mainly to take 2435 * advantage of the pipeline effect of the TX data read request. 2436 * 2437 * This function returns number of TX descrptors allocated for 2438 * csum context. 2439 */ 2440 static int 2441 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2442 uint32_t *txd_upper, uint32_t *txd_lower) 2443 { 2444 struct e1000_context_desc *TXD; 2445 int curr_txd, ehdrlen, csum_flags; 2446 uint32_t cmd, hdr_len, ip_hlen; 2447 2448 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2449 ip_hlen = mp->m_pkthdr.csum_iphlen; 2450 ehdrlen = mp->m_pkthdr.csum_lhlen; 2451 2452 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2453 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2454 tdata->csum_flags == csum_flags) { 2455 /* 2456 * Same csum offload context as the previous packets; 2457 * just return. 2458 */ 2459 *txd_upper = tdata->csum_txd_upper; 2460 *txd_lower = tdata->csum_txd_lower; 2461 return 0; 2462 } 2463 2464 /* 2465 * Setup a new csum offload context. 2466 */ 2467 2468 curr_txd = tdata->next_avail_tx_desc; 2469 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2470 2471 cmd = 0; 2472 2473 /* Setup of IP header checksum. */ 2474 if (csum_flags & CSUM_IP) { 2475 /* 2476 * Start offset for header checksum calculation. 2477 * End offset for header checksum calculation. 2478 * Offset of place to put the checksum. 2479 */ 2480 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2481 TXD->lower_setup.ip_fields.ipcse = 2482 htole16(ehdrlen + ip_hlen - 1); 2483 TXD->lower_setup.ip_fields.ipcso = 2484 ehdrlen + offsetof(struct ip, ip_sum); 2485 cmd |= E1000_TXD_CMD_IP; 2486 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2487 } 2488 hdr_len = ehdrlen + ip_hlen; 2489 2490 if (csum_flags & CSUM_TCP) { 2491 /* 2492 * Start offset for payload checksum calculation. 2493 * End offset for payload checksum calculation. 2494 * Offset of place to put the checksum. 2495 */ 2496 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2497 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2498 TXD->upper_setup.tcp_fields.tucso = 2499 hdr_len + offsetof(struct tcphdr, th_sum); 2500 cmd |= E1000_TXD_CMD_TCP; 2501 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2502 } else if (csum_flags & CSUM_UDP) { 2503 /* 2504 * Start offset for header checksum calculation. 2505 * End offset for header checksum calculation. 2506 * Offset of place to put the checksum. 2507 */ 2508 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2509 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2510 TXD->upper_setup.tcp_fields.tucso = 2511 hdr_len + offsetof(struct udphdr, uh_sum); 2512 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2513 } 2514 2515 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2516 E1000_TXD_DTYP_D; /* Data descr */ 2517 2518 /* Save the information for this csum offloading context */ 2519 tdata->csum_lhlen = ehdrlen; 2520 tdata->csum_iphlen = ip_hlen; 2521 tdata->csum_flags = csum_flags; 2522 tdata->csum_txd_upper = *txd_upper; 2523 tdata->csum_txd_lower = *txd_lower; 2524 2525 TXD->tcp_seg_setup.data = htole32(0); 2526 TXD->cmd_and_length = 2527 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2528 2529 if (++curr_txd == tdata->num_tx_desc) 2530 curr_txd = 0; 2531 2532 KKASSERT(tdata->num_tx_desc_avail > 0); 2533 tdata->num_tx_desc_avail--; 2534 2535 tdata->next_avail_tx_desc = curr_txd; 2536 return 1; 2537 } 2538 2539 static void 2540 emx_txeof(struct emx_txdata *tdata) 2541 { 2542 struct emx_txbuf *tx_buffer; 2543 int first, num_avail; 2544 2545 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2546 return; 2547 2548 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2549 return; 2550 2551 num_avail = tdata->num_tx_desc_avail; 2552 first = tdata->next_tx_to_clean; 2553 2554 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2555 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2556 struct e1000_tx_desc *tx_desc; 2557 2558 tx_desc = &tdata->tx_desc_base[dd_idx]; 2559 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2560 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2561 2562 if (++dd_idx == tdata->num_tx_desc) 2563 dd_idx = 0; 2564 2565 while (first != dd_idx) { 2566 logif(pkt_txclean); 2567 2568 num_avail++; 2569 2570 tx_buffer = &tdata->tx_buf[first]; 2571 if (tx_buffer->m_head) { 2572 bus_dmamap_unload(tdata->txtag, 2573 tx_buffer->map); 2574 m_freem(tx_buffer->m_head); 2575 tx_buffer->m_head = NULL; 2576 } 2577 2578 if (++first == tdata->num_tx_desc) 2579 first = 0; 2580 } 2581 } else { 2582 break; 2583 } 2584 } 2585 tdata->next_tx_to_clean = first; 2586 tdata->num_tx_desc_avail = num_avail; 2587 2588 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2589 tdata->tx_dd_head = 0; 2590 tdata->tx_dd_tail = 0; 2591 } 2592 2593 if (!EMX_IS_OACTIVE(tdata)) { 2594 ifsq_clr_oactive(tdata->ifsq); 2595 2596 /* All clean, turn off the timer */ 2597 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2598 tdata->tx_watchdog.wd_timer = 0; 2599 } 2600 } 2601 2602 static void 2603 emx_tx_collect(struct emx_txdata *tdata) 2604 { 2605 struct emx_txbuf *tx_buffer; 2606 int tdh, first, num_avail, dd_idx = -1; 2607 2608 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2609 return; 2610 2611 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2612 if (tdh == tdata->next_tx_to_clean) 2613 return; 2614 2615 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2616 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2617 2618 num_avail = tdata->num_tx_desc_avail; 2619 first = tdata->next_tx_to_clean; 2620 2621 while (first != tdh) { 2622 logif(pkt_txclean); 2623 2624 num_avail++; 2625 2626 tx_buffer = &tdata->tx_buf[first]; 2627 if (tx_buffer->m_head) { 2628 bus_dmamap_unload(tdata->txtag, 2629 tx_buffer->map); 2630 m_freem(tx_buffer->m_head); 2631 tx_buffer->m_head = NULL; 2632 } 2633 2634 if (first == dd_idx) { 2635 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2636 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2637 tdata->tx_dd_head = 0; 2638 tdata->tx_dd_tail = 0; 2639 dd_idx = -1; 2640 } else { 2641 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2642 } 2643 } 2644 2645 if (++first == tdata->num_tx_desc) 2646 first = 0; 2647 } 2648 tdata->next_tx_to_clean = first; 2649 tdata->num_tx_desc_avail = num_avail; 2650 2651 if (!EMX_IS_OACTIVE(tdata)) { 2652 ifsq_clr_oactive(tdata->ifsq); 2653 2654 /* All clean, turn off the timer */ 2655 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2656 tdata->tx_watchdog.wd_timer = 0; 2657 } 2658 } 2659 2660 /* 2661 * When Link is lost sometimes there is work still in the TX ring 2662 * which will result in a watchdog, rather than allow that do an 2663 * attempted cleanup and then reinit here. Note that this has been 2664 * seens mostly with fiber adapters. 2665 */ 2666 static void 2667 emx_tx_purge(struct emx_softc *sc) 2668 { 2669 int i; 2670 2671 if (sc->link_active) 2672 return; 2673 2674 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2675 struct emx_txdata *tdata = &sc->tx_data[i]; 2676 2677 if (tdata->tx_watchdog.wd_timer) { 2678 emx_tx_collect(tdata); 2679 if (tdata->tx_watchdog.wd_timer) { 2680 if_printf(&sc->arpcom.ac_if, 2681 "Link lost, TX pending, reinit\n"); 2682 emx_init(sc); 2683 return; 2684 } 2685 } 2686 } 2687 } 2688 2689 static int 2690 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2691 { 2692 struct mbuf *m; 2693 bus_dma_segment_t seg; 2694 bus_dmamap_t map; 2695 struct emx_rxbuf *rx_buffer; 2696 int error, nseg; 2697 2698 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2699 if (m == NULL) { 2700 if (init) { 2701 if_printf(&rdata->sc->arpcom.ac_if, 2702 "Unable to allocate RX mbuf\n"); 2703 } 2704 return (ENOBUFS); 2705 } 2706 m->m_len = m->m_pkthdr.len = MCLBYTES; 2707 2708 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2709 m_adj(m, ETHER_ALIGN); 2710 2711 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2712 rdata->rx_sparemap, m, 2713 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2714 if (error) { 2715 m_freem(m); 2716 if (init) { 2717 if_printf(&rdata->sc->arpcom.ac_if, 2718 "Unable to load RX mbuf\n"); 2719 } 2720 return (error); 2721 } 2722 2723 rx_buffer = &rdata->rx_buf[i]; 2724 if (rx_buffer->m_head != NULL) 2725 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2726 2727 map = rx_buffer->map; 2728 rx_buffer->map = rdata->rx_sparemap; 2729 rdata->rx_sparemap = map; 2730 2731 rx_buffer->m_head = m; 2732 rx_buffer->paddr = seg.ds_addr; 2733 2734 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2735 return (0); 2736 } 2737 2738 static int 2739 emx_create_rx_ring(struct emx_rxdata *rdata) 2740 { 2741 device_t dev = rdata->sc->dev; 2742 struct emx_rxbuf *rx_buffer; 2743 int i, error, rsize, nrxd; 2744 2745 /* 2746 * Validate number of receive descriptors. It must not exceed 2747 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2748 */ 2749 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2750 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2751 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2752 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2753 EMX_DEFAULT_RXD, nrxd); 2754 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2755 } else { 2756 rdata->num_rx_desc = nrxd; 2757 } 2758 2759 /* 2760 * Allocate Receive Descriptor ring 2761 */ 2762 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2763 EMX_DBA_ALIGN); 2764 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2765 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2766 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2767 &rdata->rx_desc_paddr); 2768 if (rdata->rx_desc == NULL) { 2769 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2770 return ENOMEM; 2771 } 2772 2773 rsize = __VM_CACHELINE_ALIGN( 2774 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2775 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2776 2777 /* 2778 * Create DMA tag for rx buffers 2779 */ 2780 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2781 1, 0, /* alignment, bounds */ 2782 BUS_SPACE_MAXADDR, /* lowaddr */ 2783 BUS_SPACE_MAXADDR, /* highaddr */ 2784 NULL, NULL, /* filter, filterarg */ 2785 MCLBYTES, /* maxsize */ 2786 1, /* nsegments */ 2787 MCLBYTES, /* maxsegsize */ 2788 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2789 &rdata->rxtag); 2790 if (error) { 2791 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2792 kfree(rdata->rx_buf, M_DEVBUF); 2793 rdata->rx_buf = NULL; 2794 return error; 2795 } 2796 2797 /* 2798 * Create spare DMA map for rx buffers 2799 */ 2800 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2801 &rdata->rx_sparemap); 2802 if (error) { 2803 device_printf(dev, "Unable to create spare RX DMA map\n"); 2804 bus_dma_tag_destroy(rdata->rxtag); 2805 kfree(rdata->rx_buf, M_DEVBUF); 2806 rdata->rx_buf = NULL; 2807 return error; 2808 } 2809 2810 /* 2811 * Create DMA maps for rx buffers 2812 */ 2813 for (i = 0; i < rdata->num_rx_desc; i++) { 2814 rx_buffer = &rdata->rx_buf[i]; 2815 2816 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2817 &rx_buffer->map); 2818 if (error) { 2819 device_printf(dev, "Unable to create RX DMA map\n"); 2820 emx_destroy_rx_ring(rdata, i); 2821 return error; 2822 } 2823 } 2824 return (0); 2825 } 2826 2827 static void 2828 emx_free_rx_ring(struct emx_rxdata *rdata) 2829 { 2830 int i; 2831 2832 for (i = 0; i < rdata->num_rx_desc; i++) { 2833 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2834 2835 if (rx_buffer->m_head != NULL) { 2836 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2837 m_freem(rx_buffer->m_head); 2838 rx_buffer->m_head = NULL; 2839 } 2840 } 2841 2842 if (rdata->fmp != NULL) 2843 m_freem(rdata->fmp); 2844 rdata->fmp = NULL; 2845 rdata->lmp = NULL; 2846 } 2847 2848 static void 2849 emx_free_tx_ring(struct emx_txdata *tdata) 2850 { 2851 int i; 2852 2853 for (i = 0; i < tdata->num_tx_desc; i++) { 2854 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2855 2856 if (tx_buffer->m_head != NULL) { 2857 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2858 m_freem(tx_buffer->m_head); 2859 tx_buffer->m_head = NULL; 2860 } 2861 } 2862 2863 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2864 2865 tdata->csum_flags = 0; 2866 tdata->csum_lhlen = 0; 2867 tdata->csum_iphlen = 0; 2868 tdata->csum_thlen = 0; 2869 tdata->csum_mss = 0; 2870 tdata->csum_pktlen = 0; 2871 2872 tdata->tx_dd_head = 0; 2873 tdata->tx_dd_tail = 0; 2874 tdata->tx_nsegs = 0; 2875 } 2876 2877 static int 2878 emx_init_rx_ring(struct emx_rxdata *rdata) 2879 { 2880 int i, error; 2881 2882 /* Reset descriptor ring */ 2883 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2884 2885 /* Allocate new ones. */ 2886 for (i = 0; i < rdata->num_rx_desc; i++) { 2887 error = emx_newbuf(rdata, i, 1); 2888 if (error) 2889 return (error); 2890 } 2891 2892 /* Setup our descriptor pointers */ 2893 rdata->next_rx_desc_to_check = 0; 2894 2895 return (0); 2896 } 2897 2898 static void 2899 emx_init_rx_unit(struct emx_softc *sc) 2900 { 2901 struct ifnet *ifp = &sc->arpcom.ac_if; 2902 uint64_t bus_addr; 2903 uint32_t rctl, itr, rfctl; 2904 int i; 2905 2906 /* 2907 * Make sure receives are disabled while setting 2908 * up the descriptor ring 2909 */ 2910 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2911 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2912 2913 /* 2914 * Set the interrupt throttling rate. Value is calculated 2915 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2916 */ 2917 if (sc->int_throttle_ceil) 2918 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2919 else 2920 itr = 0; 2921 emx_set_itr(sc, itr); 2922 2923 /* Use extended RX descriptor */ 2924 rfctl = E1000_RFCTL_EXTEN; 2925 2926 /* Disable accelerated ackknowledge */ 2927 if (sc->hw.mac.type == e1000_82574) 2928 rfctl |= E1000_RFCTL_ACK_DIS; 2929 2930 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2931 2932 /* 2933 * Receive Checksum Offload for TCP and UDP 2934 * 2935 * Checksum offloading is also enabled if multiple receive 2936 * queue is to be supported, since we need it to figure out 2937 * packet type. 2938 */ 2939 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2940 sc->rx_ring_cnt > 1) { 2941 uint32_t rxcsum; 2942 2943 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2944 2945 /* 2946 * NOTE: 2947 * PCSD must be enabled to enable multiple 2948 * receive queues. 2949 */ 2950 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2951 E1000_RXCSUM_PCSD; 2952 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2953 } 2954 2955 /* 2956 * Configure multiple receive queue (RSS) 2957 */ 2958 if (sc->rx_ring_cnt > 1) { 2959 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2960 uint32_t reta; 2961 2962 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 2963 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 2964 2965 /* 2966 * NOTE: 2967 * When we reach here, RSS has already been disabled 2968 * in emx_stop(), so we could safely configure RSS key 2969 * and redirect table. 2970 */ 2971 2972 /* 2973 * Configure RSS key 2974 */ 2975 toeplitz_get_key(key, sizeof(key)); 2976 for (i = 0; i < EMX_NRSSRK; ++i) { 2977 uint32_t rssrk; 2978 2979 rssrk = EMX_RSSRK_VAL(key, i); 2980 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2981 2982 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2983 } 2984 2985 /* 2986 * Configure RSS redirect table in following fashion: 2987 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2988 */ 2989 reta = 0; 2990 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2991 uint32_t q; 2992 2993 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 2994 reta |= q << (8 * i); 2995 } 2996 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2997 2998 for (i = 0; i < EMX_NRETA; ++i) 2999 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 3000 3001 /* 3002 * Enable multiple receive queues. 3003 * Enable IPv4 RSS standard hash functions. 3004 * Disable RSS interrupt. 3005 */ 3006 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3007 E1000_MRQC_ENABLE_RSS_2Q | 3008 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3009 E1000_MRQC_RSS_FIELD_IPV4); 3010 } 3011 3012 /* 3013 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3014 * long latencies are observed, like Lenovo X60. This 3015 * change eliminates the problem, but since having positive 3016 * values in RDTR is a known source of problems on other 3017 * platforms another solution is being sought. 3018 */ 3019 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3020 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3021 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3022 } 3023 3024 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3025 struct emx_rxdata *rdata = &sc->rx_data[i]; 3026 3027 /* 3028 * Setup the Base and Length of the Rx Descriptor Ring 3029 */ 3030 bus_addr = rdata->rx_desc_paddr; 3031 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3032 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3033 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3034 (uint32_t)(bus_addr >> 32)); 3035 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3036 (uint32_t)bus_addr); 3037 3038 /* 3039 * Setup the HW Rx Head and Tail Descriptor Pointers 3040 */ 3041 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3042 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3043 sc->rx_data[i].num_rx_desc - 1); 3044 } 3045 3046 if (sc->hw.mac.type >= e1000_pch2lan) { 3047 if (ifp->if_mtu > ETHERMTU) 3048 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3049 else 3050 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3051 } 3052 3053 /* Setup the Receive Control Register */ 3054 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3055 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3056 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3057 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3058 3059 /* Make sure VLAN Filters are off */ 3060 rctl &= ~E1000_RCTL_VFE; 3061 3062 /* Don't store bad paket */ 3063 rctl &= ~E1000_RCTL_SBP; 3064 3065 /* MCLBYTES */ 3066 rctl |= E1000_RCTL_SZ_2048; 3067 3068 if (ifp->if_mtu > ETHERMTU) 3069 rctl |= E1000_RCTL_LPE; 3070 else 3071 rctl &= ~E1000_RCTL_LPE; 3072 3073 /* Enable Receives */ 3074 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3075 } 3076 3077 static void 3078 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3079 { 3080 struct emx_rxbuf *rx_buffer; 3081 int i; 3082 3083 /* Free Receive Descriptor ring */ 3084 if (rdata->rx_desc) { 3085 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3086 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3087 rdata->rx_desc_dmap); 3088 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3089 3090 rdata->rx_desc = NULL; 3091 } 3092 3093 if (rdata->rx_buf == NULL) 3094 return; 3095 3096 for (i = 0; i < ndesc; i++) { 3097 rx_buffer = &rdata->rx_buf[i]; 3098 3099 KKASSERT(rx_buffer->m_head == NULL); 3100 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3101 } 3102 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3103 bus_dma_tag_destroy(rdata->rxtag); 3104 3105 kfree(rdata->rx_buf, M_DEVBUF); 3106 rdata->rx_buf = NULL; 3107 } 3108 3109 static void 3110 emx_rxeof(struct emx_rxdata *rdata, int count) 3111 { 3112 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3113 uint32_t staterr; 3114 emx_rxdesc_t *current_desc; 3115 struct mbuf *mp; 3116 int i, cpuid = mycpuid; 3117 3118 i = rdata->next_rx_desc_to_check; 3119 current_desc = &rdata->rx_desc[i]; 3120 staterr = le32toh(current_desc->rxd_staterr); 3121 3122 if (!(staterr & E1000_RXD_STAT_DD)) 3123 return; 3124 3125 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3126 struct pktinfo *pi = NULL, pi0; 3127 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3128 struct mbuf *m = NULL; 3129 int eop, len; 3130 3131 logif(pkt_receive); 3132 3133 mp = rx_buf->m_head; 3134 3135 /* 3136 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3137 * needs to access the last received byte in the mbuf. 3138 */ 3139 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3140 BUS_DMASYNC_POSTREAD); 3141 3142 len = le16toh(current_desc->rxd_length); 3143 if (staterr & E1000_RXD_STAT_EOP) { 3144 count--; 3145 eop = 1; 3146 } else { 3147 eop = 0; 3148 } 3149 3150 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3151 uint16_t vlan = 0; 3152 uint32_t mrq, rss_hash; 3153 3154 /* 3155 * Save several necessary information, 3156 * before emx_newbuf() destroy it. 3157 */ 3158 if ((staterr & E1000_RXD_STAT_VP) && eop) 3159 vlan = le16toh(current_desc->rxd_vlan); 3160 3161 mrq = le32toh(current_desc->rxd_mrq); 3162 rss_hash = le32toh(current_desc->rxd_rss); 3163 3164 EMX_RSS_DPRINTF(rdata->sc, 10, 3165 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3166 rdata->idx, mrq, rss_hash); 3167 3168 if (emx_newbuf(rdata, i, 0) != 0) { 3169 IFNET_STAT_INC(ifp, iqdrops, 1); 3170 goto discard; 3171 } 3172 3173 /* Assign correct length to the current fragment */ 3174 mp->m_len = len; 3175 3176 if (rdata->fmp == NULL) { 3177 mp->m_pkthdr.len = len; 3178 rdata->fmp = mp; /* Store the first mbuf */ 3179 rdata->lmp = mp; 3180 } else { 3181 /* 3182 * Chain mbuf's together 3183 */ 3184 rdata->lmp->m_next = mp; 3185 rdata->lmp = rdata->lmp->m_next; 3186 rdata->fmp->m_pkthdr.len += len; 3187 } 3188 3189 if (eop) { 3190 rdata->fmp->m_pkthdr.rcvif = ifp; 3191 IFNET_STAT_INC(ifp, ipackets, 1); 3192 3193 if (ifp->if_capenable & IFCAP_RXCSUM) 3194 emx_rxcsum(staterr, rdata->fmp); 3195 3196 if (staterr & E1000_RXD_STAT_VP) { 3197 rdata->fmp->m_pkthdr.ether_vlantag = 3198 vlan; 3199 rdata->fmp->m_flags |= M_VLANTAG; 3200 } 3201 m = rdata->fmp; 3202 rdata->fmp = NULL; 3203 rdata->lmp = NULL; 3204 3205 if (ifp->if_capenable & IFCAP_RSS) { 3206 pi = emx_rssinfo(m, &pi0, mrq, 3207 rss_hash, staterr); 3208 } 3209 #ifdef EMX_RSS_DEBUG 3210 rdata->rx_pkts++; 3211 #endif 3212 } 3213 } else { 3214 IFNET_STAT_INC(ifp, ierrors, 1); 3215 discard: 3216 emx_setup_rxdesc(current_desc, rx_buf); 3217 if (rdata->fmp != NULL) { 3218 m_freem(rdata->fmp); 3219 rdata->fmp = NULL; 3220 rdata->lmp = NULL; 3221 } 3222 m = NULL; 3223 } 3224 3225 if (m != NULL) 3226 ifp->if_input(ifp, m, pi, cpuid); 3227 3228 /* Advance our pointers to the next descriptor. */ 3229 if (++i == rdata->num_rx_desc) 3230 i = 0; 3231 3232 current_desc = &rdata->rx_desc[i]; 3233 staterr = le32toh(current_desc->rxd_staterr); 3234 } 3235 rdata->next_rx_desc_to_check = i; 3236 3237 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3238 if (--i < 0) 3239 i = rdata->num_rx_desc - 1; 3240 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3241 } 3242 3243 static void 3244 emx_enable_intr(struct emx_softc *sc) 3245 { 3246 uint32_t ims_mask = IMS_ENABLE_MASK; 3247 3248 lwkt_serialize_handler_enable(&sc->main_serialize); 3249 3250 #if 0 3251 if (sc->hw.mac.type == e1000_82574) { 3252 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3253 ims_mask |= EM_MSIX_MASK; 3254 } 3255 #endif 3256 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3257 } 3258 3259 static void 3260 emx_disable_intr(struct emx_softc *sc) 3261 { 3262 if (sc->hw.mac.type == e1000_82574) 3263 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3264 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3265 3266 lwkt_serialize_handler_disable(&sc->main_serialize); 3267 } 3268 3269 /* 3270 * Bit of a misnomer, what this really means is 3271 * to enable OS management of the system... aka 3272 * to disable special hardware management features 3273 */ 3274 static void 3275 emx_get_mgmt(struct emx_softc *sc) 3276 { 3277 /* A shared code workaround */ 3278 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3279 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3280 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3281 3282 /* disable hardware interception of ARP */ 3283 manc &= ~(E1000_MANC_ARP_EN); 3284 3285 /* enable receiving management packets to the host */ 3286 manc |= E1000_MANC_EN_MNG2HOST; 3287 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3288 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3289 manc2h |= E1000_MNG2HOST_PORT_623; 3290 manc2h |= E1000_MNG2HOST_PORT_664; 3291 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3292 3293 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3294 } 3295 } 3296 3297 /* 3298 * Give control back to hardware management 3299 * controller if there is one. 3300 */ 3301 static void 3302 emx_rel_mgmt(struct emx_softc *sc) 3303 { 3304 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3305 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3306 3307 /* re-enable hardware interception of ARP */ 3308 manc |= E1000_MANC_ARP_EN; 3309 manc &= ~E1000_MANC_EN_MNG2HOST; 3310 3311 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3312 } 3313 } 3314 3315 /* 3316 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3317 * For ASF and Pass Through versions of f/w this means that 3318 * the driver is loaded. For AMT version (only with 82573) 3319 * of the f/w this means that the network i/f is open. 3320 */ 3321 static void 3322 emx_get_hw_control(struct emx_softc *sc) 3323 { 3324 /* Let firmware know the driver has taken over */ 3325 if (sc->hw.mac.type == e1000_82573) { 3326 uint32_t swsm; 3327 3328 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3329 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3330 swsm | E1000_SWSM_DRV_LOAD); 3331 } else { 3332 uint32_t ctrl_ext; 3333 3334 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3335 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3336 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3337 } 3338 sc->flags |= EMX_FLAG_HW_CTRL; 3339 } 3340 3341 /* 3342 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3343 * For ASF and Pass Through versions of f/w this means that the 3344 * driver is no longer loaded. For AMT version (only with 82573) 3345 * of the f/w this means that the network i/f is closed. 3346 */ 3347 static void 3348 emx_rel_hw_control(struct emx_softc *sc) 3349 { 3350 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3351 return; 3352 sc->flags &= ~EMX_FLAG_HW_CTRL; 3353 3354 /* Let firmware taken over control of h/w */ 3355 if (sc->hw.mac.type == e1000_82573) { 3356 uint32_t swsm; 3357 3358 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3359 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3360 swsm & ~E1000_SWSM_DRV_LOAD); 3361 } else { 3362 uint32_t ctrl_ext; 3363 3364 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3365 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3366 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3367 } 3368 } 3369 3370 static int 3371 emx_is_valid_eaddr(const uint8_t *addr) 3372 { 3373 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3374 3375 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3376 return (FALSE); 3377 3378 return (TRUE); 3379 } 3380 3381 /* 3382 * Enable PCI Wake On Lan capability 3383 */ 3384 void 3385 emx_enable_wol(device_t dev) 3386 { 3387 uint16_t cap, status; 3388 uint8_t id; 3389 3390 /* First find the capabilities pointer*/ 3391 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3392 3393 /* Read the PM Capabilities */ 3394 id = pci_read_config(dev, cap, 1); 3395 if (id != PCIY_PMG) /* Something wrong */ 3396 return; 3397 3398 /* 3399 * OK, we have the power capabilities, 3400 * so now get the status register 3401 */ 3402 cap += PCIR_POWER_STATUS; 3403 status = pci_read_config(dev, cap, 2); 3404 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3405 pci_write_config(dev, cap, status, 2); 3406 } 3407 3408 static void 3409 emx_update_stats(struct emx_softc *sc) 3410 { 3411 struct ifnet *ifp = &sc->arpcom.ac_if; 3412 3413 if (sc->hw.phy.media_type == e1000_media_type_copper || 3414 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3415 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3416 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3417 } 3418 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3419 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3420 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3421 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3422 3423 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3424 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3425 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3426 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3427 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3428 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3429 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3430 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3431 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3432 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3433 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3434 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3435 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3436 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3437 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3438 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3439 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3440 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3441 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3442 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3443 3444 /* For the 64-bit byte counters the low dword must be read first. */ 3445 /* Both registers clear on the read of the high dword */ 3446 3447 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3448 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3449 3450 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3451 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3452 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3453 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3454 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3455 3456 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3457 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3458 3459 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3460 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3461 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3462 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3463 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3464 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3465 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3466 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3467 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3468 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3469 3470 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3471 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3472 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3473 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3474 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3475 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3476 3477 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3478 3479 /* Rx Errors */ 3480 IFNET_STAT_SET(ifp, ierrors, 3481 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3482 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3483 3484 /* Tx Errors */ 3485 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3486 } 3487 3488 static void 3489 emx_print_debug_info(struct emx_softc *sc) 3490 { 3491 device_t dev = sc->dev; 3492 uint8_t *hw_addr = sc->hw.hw_addr; 3493 int i; 3494 3495 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3496 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3497 E1000_READ_REG(&sc->hw, E1000_CTRL), 3498 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3499 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3500 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3501 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3502 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3503 sc->hw.fc.high_water, sc->hw.fc.low_water); 3504 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3505 E1000_READ_REG(&sc->hw, E1000_TIDV), 3506 E1000_READ_REG(&sc->hw, E1000_TADV)); 3507 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3508 E1000_READ_REG(&sc->hw, E1000_RDTR), 3509 E1000_READ_REG(&sc->hw, E1000_RADV)); 3510 3511 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3512 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3513 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3514 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3515 } 3516 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3517 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3518 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3519 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3520 } 3521 3522 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3523 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3524 sc->tx_data[i].num_tx_desc_avail); 3525 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3526 sc->tx_data[i].tso_segments); 3527 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3528 sc->tx_data[i].tso_ctx_reused); 3529 } 3530 } 3531 3532 static void 3533 emx_print_hw_stats(struct emx_softc *sc) 3534 { 3535 device_t dev = sc->dev; 3536 3537 device_printf(dev, "Excessive collisions = %lld\n", 3538 (long long)sc->stats.ecol); 3539 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3540 device_printf(dev, "Symbol errors = %lld\n", 3541 (long long)sc->stats.symerrs); 3542 #endif 3543 device_printf(dev, "Sequence errors = %lld\n", 3544 (long long)sc->stats.sec); 3545 device_printf(dev, "Defer count = %lld\n", 3546 (long long)sc->stats.dc); 3547 device_printf(dev, "Missed Packets = %lld\n", 3548 (long long)sc->stats.mpc); 3549 device_printf(dev, "Receive No Buffers = %lld\n", 3550 (long long)sc->stats.rnbc); 3551 /* RLEC is inaccurate on some hardware, calculate our own. */ 3552 device_printf(dev, "Receive Length Errors = %lld\n", 3553 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3554 device_printf(dev, "Receive errors = %lld\n", 3555 (long long)sc->stats.rxerrc); 3556 device_printf(dev, "Crc errors = %lld\n", 3557 (long long)sc->stats.crcerrs); 3558 device_printf(dev, "Alignment errors = %lld\n", 3559 (long long)sc->stats.algnerrc); 3560 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3561 (long long)sc->stats.cexterr); 3562 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3563 device_printf(dev, "XON Rcvd = %lld\n", 3564 (long long)sc->stats.xonrxc); 3565 device_printf(dev, "XON Xmtd = %lld\n", 3566 (long long)sc->stats.xontxc); 3567 device_printf(dev, "XOFF Rcvd = %lld\n", 3568 (long long)sc->stats.xoffrxc); 3569 device_printf(dev, "XOFF Xmtd = %lld\n", 3570 (long long)sc->stats.xofftxc); 3571 device_printf(dev, "Good Packets Rcvd = %lld\n", 3572 (long long)sc->stats.gprc); 3573 device_printf(dev, "Good Packets Xmtd = %lld\n", 3574 (long long)sc->stats.gptc); 3575 } 3576 3577 static void 3578 emx_print_nvm_info(struct emx_softc *sc) 3579 { 3580 uint16_t eeprom_data; 3581 int i, j, row = 0; 3582 3583 /* Its a bit crude, but it gets the job done */ 3584 kprintf("\nInterface EEPROM Dump:\n"); 3585 kprintf("Offset\n0x0000 "); 3586 for (i = 0, j = 0; i < 32; i++, j++) { 3587 if (j == 8) { /* Make the offset block */ 3588 j = 0; ++row; 3589 kprintf("\n0x00%x0 ",row); 3590 } 3591 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3592 kprintf("%04x ", eeprom_data); 3593 } 3594 kprintf("\n"); 3595 } 3596 3597 static int 3598 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3599 { 3600 struct emx_softc *sc; 3601 struct ifnet *ifp; 3602 int error, result; 3603 3604 result = -1; 3605 error = sysctl_handle_int(oidp, &result, 0, req); 3606 if (error || !req->newptr) 3607 return (error); 3608 3609 sc = (struct emx_softc *)arg1; 3610 ifp = &sc->arpcom.ac_if; 3611 3612 ifnet_serialize_all(ifp); 3613 3614 if (result == 1) 3615 emx_print_debug_info(sc); 3616 3617 /* 3618 * This value will cause a hex dump of the 3619 * first 32 16-bit words of the EEPROM to 3620 * the screen. 3621 */ 3622 if (result == 2) 3623 emx_print_nvm_info(sc); 3624 3625 ifnet_deserialize_all(ifp); 3626 3627 return (error); 3628 } 3629 3630 static int 3631 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3632 { 3633 int error, result; 3634 3635 result = -1; 3636 error = sysctl_handle_int(oidp, &result, 0, req); 3637 if (error || !req->newptr) 3638 return (error); 3639 3640 if (result == 1) { 3641 struct emx_softc *sc = (struct emx_softc *)arg1; 3642 struct ifnet *ifp = &sc->arpcom.ac_if; 3643 3644 ifnet_serialize_all(ifp); 3645 emx_print_hw_stats(sc); 3646 ifnet_deserialize_all(ifp); 3647 } 3648 return (error); 3649 } 3650 3651 static void 3652 emx_add_sysctl(struct emx_softc *sc) 3653 { 3654 struct sysctl_ctx_list *ctx; 3655 struct sysctl_oid *tree; 3656 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3657 char pkt_desc[32]; 3658 int i; 3659 #endif 3660 3661 ctx = device_get_sysctl_ctx(sc->dev); 3662 tree = device_get_sysctl_tree(sc->dev); 3663 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3664 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3665 emx_sysctl_debug_info, "I", "Debug Information"); 3666 3667 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3668 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3669 emx_sysctl_stats, "I", "Statistics"); 3670 3671 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3672 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3673 "# of RX descs"); 3674 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3675 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3676 "# of TX descs"); 3677 3678 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3679 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3680 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3681 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3682 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3683 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3684 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3685 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3686 emx_sysctl_tx_wreg_nsegs, "I", 3687 "# segments sent before write to hardware register"); 3688 3689 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3690 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3691 "# of RX rings"); 3692 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3693 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3694 "# of TX rings"); 3695 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3696 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3697 "# of TX rings used"); 3698 3699 #ifdef IFPOLL_ENABLE 3700 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3701 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3702 sc, 0, emx_sysctl_npoll_rxoff, "I", 3703 "NPOLLING RX cpu offset"); 3704 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3705 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3706 sc, 0, emx_sysctl_npoll_txoff, "I", 3707 "NPOLLING TX cpu offset"); 3708 #endif 3709 3710 #ifdef EMX_RSS_DEBUG 3711 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3712 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3713 0, "RSS debug level"); 3714 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3715 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3716 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3717 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3718 "RXed packets"); 3719 } 3720 #endif 3721 #ifdef EMX_TSS_DEBUG 3722 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3723 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3724 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3725 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3726 "TXed packets"); 3727 } 3728 #endif 3729 } 3730 3731 static int 3732 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3733 { 3734 struct emx_softc *sc = (void *)arg1; 3735 struct ifnet *ifp = &sc->arpcom.ac_if; 3736 int error, throttle; 3737 3738 throttle = sc->int_throttle_ceil; 3739 error = sysctl_handle_int(oidp, &throttle, 0, req); 3740 if (error || req->newptr == NULL) 3741 return error; 3742 if (throttle < 0 || throttle > 1000000000 / 256) 3743 return EINVAL; 3744 3745 if (throttle) { 3746 /* 3747 * Set the interrupt throttling rate in 256ns increments, 3748 * recalculate sysctl value assignment to get exact frequency. 3749 */ 3750 throttle = 1000000000 / 256 / throttle; 3751 3752 /* Upper 16bits of ITR is reserved and should be zero */ 3753 if (throttle & 0xffff0000) 3754 return EINVAL; 3755 } 3756 3757 ifnet_serialize_all(ifp); 3758 3759 if (throttle) 3760 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3761 else 3762 sc->int_throttle_ceil = 0; 3763 3764 if (ifp->if_flags & IFF_RUNNING) 3765 emx_set_itr(sc, throttle); 3766 3767 ifnet_deserialize_all(ifp); 3768 3769 if (bootverbose) { 3770 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3771 sc->int_throttle_ceil); 3772 } 3773 return 0; 3774 } 3775 3776 static int 3777 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3778 { 3779 struct emx_softc *sc = (void *)arg1; 3780 struct ifnet *ifp = &sc->arpcom.ac_if; 3781 struct emx_txdata *tdata = &sc->tx_data[0]; 3782 int error, segs; 3783 3784 segs = tdata->tx_intr_nsegs; 3785 error = sysctl_handle_int(oidp, &segs, 0, req); 3786 if (error || req->newptr == NULL) 3787 return error; 3788 if (segs <= 0) 3789 return EINVAL; 3790 3791 ifnet_serialize_all(ifp); 3792 3793 /* 3794 * Don't allow tx_intr_nsegs to become: 3795 * o Less the oact_tx_desc 3796 * o Too large that no TX desc will cause TX interrupt to 3797 * be generated (OACTIVE will never recover) 3798 * o Too small that will cause tx_dd[] overflow 3799 */ 3800 if (segs < tdata->oact_tx_desc || 3801 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3802 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3803 error = EINVAL; 3804 } else { 3805 int i; 3806 3807 error = 0; 3808 for (i = 0; i < sc->tx_ring_cnt; ++i) 3809 sc->tx_data[i].tx_intr_nsegs = segs; 3810 } 3811 3812 ifnet_deserialize_all(ifp); 3813 3814 return error; 3815 } 3816 3817 static int 3818 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3819 { 3820 struct emx_softc *sc = (void *)arg1; 3821 struct ifnet *ifp = &sc->arpcom.ac_if; 3822 int error, nsegs, i; 3823 3824 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3825 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3826 if (error || req->newptr == NULL) 3827 return error; 3828 3829 ifnet_serialize_all(ifp); 3830 for (i = 0; i < sc->tx_ring_cnt; ++i) 3831 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3832 ifnet_deserialize_all(ifp); 3833 3834 return 0; 3835 } 3836 3837 #ifdef IFPOLL_ENABLE 3838 3839 static int 3840 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3841 { 3842 struct emx_softc *sc = (void *)arg1; 3843 struct ifnet *ifp = &sc->arpcom.ac_if; 3844 int error, off; 3845 3846 off = sc->rx_npoll_off; 3847 error = sysctl_handle_int(oidp, &off, 0, req); 3848 if (error || req->newptr == NULL) 3849 return error; 3850 if (off < 0) 3851 return EINVAL; 3852 3853 ifnet_serialize_all(ifp); 3854 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3855 error = EINVAL; 3856 } else { 3857 error = 0; 3858 sc->rx_npoll_off = off; 3859 } 3860 ifnet_deserialize_all(ifp); 3861 3862 return error; 3863 } 3864 3865 static int 3866 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3867 { 3868 struct emx_softc *sc = (void *)arg1; 3869 struct ifnet *ifp = &sc->arpcom.ac_if; 3870 int error, off; 3871 3872 off = sc->tx_npoll_off; 3873 error = sysctl_handle_int(oidp, &off, 0, req); 3874 if (error || req->newptr == NULL) 3875 return error; 3876 if (off < 0) 3877 return EINVAL; 3878 3879 ifnet_serialize_all(ifp); 3880 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3881 error = EINVAL; 3882 } else { 3883 error = 0; 3884 sc->tx_npoll_off = off; 3885 } 3886 ifnet_deserialize_all(ifp); 3887 3888 return error; 3889 } 3890 3891 #endif /* IFPOLL_ENABLE */ 3892 3893 static int 3894 emx_dma_alloc(struct emx_softc *sc) 3895 { 3896 int error, i; 3897 3898 /* 3899 * Create top level busdma tag 3900 */ 3901 error = bus_dma_tag_create(NULL, 1, 0, 3902 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3903 NULL, NULL, 3904 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3905 0, &sc->parent_dtag); 3906 if (error) { 3907 device_printf(sc->dev, "could not create top level DMA tag\n"); 3908 return error; 3909 } 3910 3911 /* 3912 * Allocate transmit descriptors ring and buffers 3913 */ 3914 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3915 error = emx_create_tx_ring(&sc->tx_data[i]); 3916 if (error) { 3917 device_printf(sc->dev, 3918 "Could not setup transmit structures\n"); 3919 return error; 3920 } 3921 } 3922 3923 /* 3924 * Allocate receive descriptors ring and buffers 3925 */ 3926 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3927 error = emx_create_rx_ring(&sc->rx_data[i]); 3928 if (error) { 3929 device_printf(sc->dev, 3930 "Could not setup receive structures\n"); 3931 return error; 3932 } 3933 } 3934 return 0; 3935 } 3936 3937 static void 3938 emx_dma_free(struct emx_softc *sc) 3939 { 3940 int i; 3941 3942 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3943 emx_destroy_tx_ring(&sc->tx_data[i], 3944 sc->tx_data[i].num_tx_desc); 3945 } 3946 3947 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3948 emx_destroy_rx_ring(&sc->rx_data[i], 3949 sc->rx_data[i].num_rx_desc); 3950 } 3951 3952 /* Free top level busdma tag */ 3953 if (sc->parent_dtag != NULL) 3954 bus_dma_tag_destroy(sc->parent_dtag); 3955 } 3956 3957 static void 3958 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3959 { 3960 struct emx_softc *sc = ifp->if_softc; 3961 3962 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 3963 } 3964 3965 static void 3966 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3967 { 3968 struct emx_softc *sc = ifp->if_softc; 3969 3970 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 3971 } 3972 3973 static int 3974 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3975 { 3976 struct emx_softc *sc = ifp->if_softc; 3977 3978 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 3979 } 3980 3981 static void 3982 emx_serialize_skipmain(struct emx_softc *sc) 3983 { 3984 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3985 } 3986 3987 static void 3988 emx_deserialize_skipmain(struct emx_softc *sc) 3989 { 3990 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3991 } 3992 3993 #ifdef INVARIANTS 3994 3995 static void 3996 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3997 boolean_t serialized) 3998 { 3999 struct emx_softc *sc = ifp->if_softc; 4000 4001 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4002 slz, serialized); 4003 } 4004 4005 #endif /* INVARIANTS */ 4006 4007 #ifdef IFPOLL_ENABLE 4008 4009 static void 4010 emx_npoll_status(struct ifnet *ifp) 4011 { 4012 struct emx_softc *sc = ifp->if_softc; 4013 uint32_t reg_icr; 4014 4015 ASSERT_SERIALIZED(&sc->main_serialize); 4016 4017 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4018 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4019 callout_stop(&sc->timer); 4020 sc->hw.mac.get_link_status = 1; 4021 emx_update_link_status(sc); 4022 callout_reset(&sc->timer, hz, emx_timer, sc); 4023 } 4024 } 4025 4026 static void 4027 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4028 { 4029 struct emx_txdata *tdata = arg; 4030 4031 ASSERT_SERIALIZED(&tdata->tx_serialize); 4032 4033 emx_txeof(tdata); 4034 if (!ifsq_is_empty(tdata->ifsq)) 4035 ifsq_devstart(tdata->ifsq); 4036 } 4037 4038 static void 4039 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4040 { 4041 struct emx_rxdata *rdata = arg; 4042 4043 ASSERT_SERIALIZED(&rdata->rx_serialize); 4044 4045 emx_rxeof(rdata, cycle); 4046 } 4047 4048 static void 4049 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4050 { 4051 struct emx_softc *sc = ifp->if_softc; 4052 int i, txr_cnt; 4053 4054 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4055 4056 if (info) { 4057 int off; 4058 4059 info->ifpi_status.status_func = emx_npoll_status; 4060 info->ifpi_status.serializer = &sc->main_serialize; 4061 4062 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4063 off = sc->tx_npoll_off; 4064 for (i = 0; i < txr_cnt; ++i) { 4065 struct emx_txdata *tdata = &sc->tx_data[i]; 4066 int idx = i + off; 4067 4068 KKASSERT(idx < ncpus2); 4069 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4070 info->ifpi_tx[idx].arg = tdata; 4071 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4072 ifsq_set_cpuid(tdata->ifsq, idx); 4073 } 4074 4075 off = sc->rx_npoll_off; 4076 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4077 struct emx_rxdata *rdata = &sc->rx_data[i]; 4078 int idx = i + off; 4079 4080 KKASSERT(idx < ncpus2); 4081 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4082 info->ifpi_rx[idx].arg = rdata; 4083 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4084 } 4085 4086 if (ifp->if_flags & IFF_RUNNING) { 4087 if (txr_cnt == sc->tx_ring_inuse) 4088 emx_disable_intr(sc); 4089 else 4090 emx_init(sc); 4091 } 4092 } else { 4093 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4094 struct emx_txdata *tdata = &sc->tx_data[i]; 4095 4096 ifsq_set_cpuid(tdata->ifsq, 4097 rman_get_cpuid(sc->intr_res)); 4098 } 4099 4100 if (ifp->if_flags & IFF_RUNNING) { 4101 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4102 if (txr_cnt == sc->tx_ring_inuse) 4103 emx_enable_intr(sc); 4104 else 4105 emx_init(sc); 4106 } 4107 } 4108 } 4109 4110 #endif /* IFPOLL_ENABLE */ 4111 4112 static void 4113 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4114 { 4115 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4116 if (sc->hw.mac.type == e1000_82574) { 4117 int i; 4118 4119 /* 4120 * When using MSIX interrupts we need to 4121 * throttle using the EITR register 4122 */ 4123 for (i = 0; i < 4; ++i) 4124 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4125 } 4126 } 4127 4128 /* 4129 * Disable the L0s, 82574L Errata #20 4130 */ 4131 static void 4132 emx_disable_aspm(struct emx_softc *sc) 4133 { 4134 uint16_t link_cap, link_ctrl, disable; 4135 uint8_t pcie_ptr, reg; 4136 device_t dev = sc->dev; 4137 4138 switch (sc->hw.mac.type) { 4139 case e1000_82571: 4140 case e1000_82572: 4141 case e1000_82573: 4142 /* 4143 * 82573 specification update 4144 * errata #8 disable L0s 4145 * errata #41 disable L1 4146 * 4147 * 82571/82572 specification update 4148 # errata #13 disable L1 4149 * errata #68 disable L0s 4150 */ 4151 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4152 break; 4153 4154 case e1000_82574: 4155 /* 4156 * 82574 specification update errata #20 4157 * 4158 * There is no need to disable L1 4159 */ 4160 disable = PCIEM_LNKCTL_ASPM_L0S; 4161 break; 4162 4163 default: 4164 return; 4165 } 4166 4167 pcie_ptr = pci_get_pciecap_ptr(dev); 4168 if (pcie_ptr == 0) 4169 return; 4170 4171 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4172 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4173 return; 4174 4175 if (bootverbose) 4176 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4177 4178 reg = pcie_ptr + PCIER_LINKCTRL; 4179 link_ctrl = pci_read_config(dev, reg, 2); 4180 link_ctrl &= ~disable; 4181 pci_write_config(dev, reg, link_ctrl, 2); 4182 } 4183 4184 static int 4185 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4186 { 4187 int iphlen, hoff, thoff, ex = 0; 4188 struct mbuf *m; 4189 struct ip *ip; 4190 4191 m = *mp; 4192 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4193 4194 iphlen = m->m_pkthdr.csum_iphlen; 4195 thoff = m->m_pkthdr.csum_thlen; 4196 hoff = m->m_pkthdr.csum_lhlen; 4197 4198 KASSERT(iphlen > 0, ("invalid ip hlen")); 4199 KASSERT(thoff > 0, ("invalid tcp hlen")); 4200 KASSERT(hoff > 0, ("invalid ether hlen")); 4201 4202 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4203 ex = 4; 4204 4205 if (m->m_len < hoff + iphlen + thoff + ex) { 4206 m = m_pullup(m, hoff + iphlen + thoff + ex); 4207 if (m == NULL) { 4208 *mp = NULL; 4209 return ENOBUFS; 4210 } 4211 *mp = m; 4212 } 4213 ip = mtodoff(m, struct ip *, hoff); 4214 ip->ip_len = 0; 4215 4216 return 0; 4217 } 4218 4219 static int 4220 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4221 uint32_t *txd_upper, uint32_t *txd_lower) 4222 { 4223 struct e1000_context_desc *TXD; 4224 int hoff, iphlen, thoff, hlen; 4225 int mss, pktlen, curr_txd; 4226 4227 #ifdef EMX_TSO_DEBUG 4228 tdata->tso_segments++; 4229 #endif 4230 4231 iphlen = mp->m_pkthdr.csum_iphlen; 4232 thoff = mp->m_pkthdr.csum_thlen; 4233 hoff = mp->m_pkthdr.csum_lhlen; 4234 mss = mp->m_pkthdr.tso_segsz; 4235 pktlen = mp->m_pkthdr.len; 4236 4237 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4238 tdata->csum_flags == CSUM_TSO && 4239 tdata->csum_iphlen == iphlen && 4240 tdata->csum_lhlen == hoff && 4241 tdata->csum_thlen == thoff && 4242 tdata->csum_mss == mss && 4243 tdata->csum_pktlen == pktlen) { 4244 *txd_upper = tdata->csum_txd_upper; 4245 *txd_lower = tdata->csum_txd_lower; 4246 #ifdef EMX_TSO_DEBUG 4247 tdata->tso_ctx_reused++; 4248 #endif 4249 return 0; 4250 } 4251 hlen = hoff + iphlen + thoff; 4252 4253 /* 4254 * Setup a new TSO context. 4255 */ 4256 4257 curr_txd = tdata->next_avail_tx_desc; 4258 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4259 4260 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4261 E1000_TXD_DTYP_D | /* Data descr type */ 4262 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4263 4264 /* IP and/or TCP header checksum calculation and insertion. */ 4265 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4266 4267 /* 4268 * Start offset for header checksum calculation. 4269 * End offset for header checksum calculation. 4270 * Offset of place put the checksum. 4271 */ 4272 TXD->lower_setup.ip_fields.ipcss = hoff; 4273 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4274 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4275 4276 /* 4277 * Start offset for payload checksum calculation. 4278 * End offset for payload checksum calculation. 4279 * Offset of place to put the checksum. 4280 */ 4281 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4282 TXD->upper_setup.tcp_fields.tucse = 0; 4283 TXD->upper_setup.tcp_fields.tucso = 4284 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4285 4286 /* 4287 * Payload size per packet w/o any headers. 4288 * Length of all headers up to payload. 4289 */ 4290 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4291 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4292 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4293 E1000_TXD_CMD_DEXT | /* Extended descr */ 4294 E1000_TXD_CMD_TSE | /* TSE context */ 4295 E1000_TXD_CMD_IP | /* Do IP csum */ 4296 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4297 (pktlen - hlen)); /* Total len */ 4298 4299 /* Save the information for this TSO context */ 4300 tdata->csum_flags = CSUM_TSO; 4301 tdata->csum_lhlen = hoff; 4302 tdata->csum_iphlen = iphlen; 4303 tdata->csum_thlen = thoff; 4304 tdata->csum_mss = mss; 4305 tdata->csum_pktlen = pktlen; 4306 tdata->csum_txd_upper = *txd_upper; 4307 tdata->csum_txd_lower = *txd_lower; 4308 4309 if (++curr_txd == tdata->num_tx_desc) 4310 curr_txd = 0; 4311 4312 KKASSERT(tdata->num_tx_desc_avail > 0); 4313 tdata->num_tx_desc_avail--; 4314 4315 tdata->next_avail_tx_desc = curr_txd; 4316 return 1; 4317 } 4318 4319 static int 4320 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4321 { 4322 if (polling) 4323 return sc->tx_ring_cnt; 4324 else 4325 return 1; 4326 } 4327