1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/emx/if_emx.h> 112 113 #define DEBUG_HW 0 114 115 #ifdef EMX_RSS_DEBUG 116 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 117 do { \ 118 if (sc->rss_debug >= lvl) \ 119 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 120 } while (0) 121 #else /* !EMX_RSS_DEBUG */ 122 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 123 #endif /* EMX_RSS_DEBUG */ 124 125 #define EMX_NAME "Intel(R) PRO/1000 " 126 127 #define EMX_DEVICE(id) \ 128 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 129 #define EMX_DEVICE_NULL { 0, 0, NULL } 130 131 static const struct emx_device { 132 uint16_t vid; 133 uint16_t did; 134 const char *desc; 135 } emx_devices[] = { 136 EMX_DEVICE(82571EB_COPPER), 137 EMX_DEVICE(82571EB_FIBER), 138 EMX_DEVICE(82571EB_SERDES), 139 EMX_DEVICE(82571EB_SERDES_DUAL), 140 EMX_DEVICE(82571EB_SERDES_QUAD), 141 EMX_DEVICE(82571EB_QUAD_COPPER), 142 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 143 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 144 EMX_DEVICE(82571EB_QUAD_FIBER), 145 EMX_DEVICE(82571PT_QUAD_COPPER), 146 147 EMX_DEVICE(82572EI_COPPER), 148 EMX_DEVICE(82572EI_FIBER), 149 EMX_DEVICE(82572EI_SERDES), 150 EMX_DEVICE(82572EI), 151 152 EMX_DEVICE(82573E), 153 EMX_DEVICE(82573E_IAMT), 154 EMX_DEVICE(82573L), 155 156 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 157 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 158 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 160 161 EMX_DEVICE(82574L), 162 EMX_DEVICE(82574LA), 163 164 EMX_DEVICE(PCH_LPT_I217_LM), 165 EMX_DEVICE(PCH_LPT_I217_V), 166 EMX_DEVICE(PCH_LPTLP_I218_LM), 167 EMX_DEVICE(PCH_LPTLP_I218_V), 168 169 /* required last entry */ 170 EMX_DEVICE_NULL 171 }; 172 173 static int emx_probe(device_t); 174 static int emx_attach(device_t); 175 static int emx_detach(device_t); 176 static int emx_shutdown(device_t); 177 static int emx_suspend(device_t); 178 static int emx_resume(device_t); 179 180 static void emx_init(void *); 181 static void emx_stop(struct emx_softc *); 182 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 183 static void emx_start(struct ifnet *, struct ifaltq_subque *); 184 #ifdef IFPOLL_ENABLE 185 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 186 static void emx_npoll_status(struct ifnet *); 187 static void emx_npoll_tx(struct ifnet *, void *, int); 188 static void emx_npoll_rx(struct ifnet *, void *, int); 189 #endif 190 static void emx_watchdog(struct ifaltq_subque *); 191 static void emx_media_status(struct ifnet *, struct ifmediareq *); 192 static int emx_media_change(struct ifnet *); 193 static void emx_timer(void *); 194 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 195 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 196 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 197 #ifdef INVARIANTS 198 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 199 boolean_t); 200 #endif 201 202 static void emx_intr(void *); 203 static void emx_intr_mask(void *); 204 static void emx_intr_body(struct emx_softc *, boolean_t); 205 static void emx_rxeof(struct emx_rxdata *, int); 206 static void emx_txeof(struct emx_txdata *); 207 static void emx_tx_collect(struct emx_txdata *); 208 static void emx_tx_purge(struct emx_softc *); 209 static void emx_enable_intr(struct emx_softc *); 210 static void emx_disable_intr(struct emx_softc *); 211 212 static int emx_dma_alloc(struct emx_softc *); 213 static void emx_dma_free(struct emx_softc *); 214 static void emx_init_tx_ring(struct emx_txdata *); 215 static int emx_init_rx_ring(struct emx_rxdata *); 216 static void emx_free_tx_ring(struct emx_txdata *); 217 static void emx_free_rx_ring(struct emx_rxdata *); 218 static int emx_create_tx_ring(struct emx_txdata *); 219 static int emx_create_rx_ring(struct emx_rxdata *); 220 static void emx_destroy_tx_ring(struct emx_txdata *, int); 221 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 222 static int emx_newbuf(struct emx_rxdata *, int, int); 223 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 224 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 225 uint32_t *, uint32_t *); 226 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 227 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 228 uint32_t *, uint32_t *); 229 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 230 231 static int emx_is_valid_eaddr(const uint8_t *); 232 static int emx_reset(struct emx_softc *); 233 static void emx_setup_ifp(struct emx_softc *); 234 static void emx_init_tx_unit(struct emx_softc *); 235 static void emx_init_rx_unit(struct emx_softc *); 236 static void emx_update_stats(struct emx_softc *); 237 static void emx_set_promisc(struct emx_softc *); 238 static void emx_disable_promisc(struct emx_softc *); 239 static void emx_set_multi(struct emx_softc *); 240 static void emx_update_link_status(struct emx_softc *); 241 static void emx_smartspeed(struct emx_softc *); 242 static void emx_set_itr(struct emx_softc *, uint32_t); 243 static void emx_disable_aspm(struct emx_softc *); 244 245 static void emx_print_debug_info(struct emx_softc *); 246 static void emx_print_nvm_info(struct emx_softc *); 247 static void emx_print_hw_stats(struct emx_softc *); 248 249 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 250 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 251 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 252 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 253 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 254 #ifdef IFPOLL_ENABLE 255 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 256 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 257 #endif 258 static void emx_add_sysctl(struct emx_softc *); 259 260 static void emx_serialize_skipmain(struct emx_softc *); 261 static void emx_deserialize_skipmain(struct emx_softc *); 262 263 /* Management and WOL Support */ 264 static void emx_get_mgmt(struct emx_softc *); 265 static void emx_rel_mgmt(struct emx_softc *); 266 static void emx_get_hw_control(struct emx_softc *); 267 static void emx_rel_hw_control(struct emx_softc *); 268 static void emx_enable_wol(device_t); 269 270 static device_method_t emx_methods[] = { 271 /* Device interface */ 272 DEVMETHOD(device_probe, emx_probe), 273 DEVMETHOD(device_attach, emx_attach), 274 DEVMETHOD(device_detach, emx_detach), 275 DEVMETHOD(device_shutdown, emx_shutdown), 276 DEVMETHOD(device_suspend, emx_suspend), 277 DEVMETHOD(device_resume, emx_resume), 278 DEVMETHOD_END 279 }; 280 281 static driver_t emx_driver = { 282 "emx", 283 emx_methods, 284 sizeof(struct emx_softc), 285 }; 286 287 static devclass_t emx_devclass; 288 289 DECLARE_DUMMY_MODULE(if_emx); 290 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 291 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 292 293 /* 294 * Tunables 295 */ 296 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 297 static int emx_rxd = EMX_DEFAULT_RXD; 298 static int emx_txd = EMX_DEFAULT_TXD; 299 static int emx_smart_pwr_down = 0; 300 static int emx_rxr = 0; 301 static int emx_txr = 1; 302 303 /* Controls whether promiscuous also shows bad packets */ 304 static int emx_debug_sbp = 0; 305 306 static int emx_82573_workaround = 1; 307 static int emx_msi_enable = 1; 308 309 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 310 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 311 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 312 TUNABLE_INT("hw.emx.txd", &emx_txd); 313 TUNABLE_INT("hw.emx.txr", &emx_txr); 314 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 315 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 316 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 317 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 318 319 /* Global used in WOL setup with multiport cards */ 320 static int emx_global_quad_port_a = 0; 321 322 /* Set this to one to display debug statistics */ 323 static int emx_display_debug_stats = 0; 324 325 #if !defined(KTR_IF_EMX) 326 #define KTR_IF_EMX KTR_ALL 327 #endif 328 KTR_INFO_MASTER(if_emx); 329 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 330 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 331 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 332 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 333 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 334 #define logif(name) KTR_LOG(if_emx_ ## name) 335 336 static __inline void 337 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 338 { 339 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 340 /* DD bit must be cleared */ 341 rxd->rxd_staterr = 0; 342 } 343 344 static __inline void 345 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 346 { 347 /* Ignore Checksum bit is set */ 348 if (staterr & E1000_RXD_STAT_IXSM) 349 return; 350 351 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 352 E1000_RXD_STAT_IPCS) 353 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 354 355 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 356 E1000_RXD_STAT_TCPCS) { 357 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 358 CSUM_PSEUDO_HDR | 359 CSUM_FRAG_NOT_CHECKED; 360 mp->m_pkthdr.csum_data = htons(0xffff); 361 } 362 } 363 364 static __inline struct pktinfo * 365 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 366 uint32_t mrq, uint32_t hash, uint32_t staterr) 367 { 368 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 369 case EMX_RXDMRQ_IPV4_TCP: 370 pi->pi_netisr = NETISR_IP; 371 pi->pi_flags = 0; 372 pi->pi_l3proto = IPPROTO_TCP; 373 break; 374 375 case EMX_RXDMRQ_IPV6_TCP: 376 pi->pi_netisr = NETISR_IPV6; 377 pi->pi_flags = 0; 378 pi->pi_l3proto = IPPROTO_TCP; 379 break; 380 381 case EMX_RXDMRQ_IPV4: 382 if (staterr & E1000_RXD_STAT_IXSM) 383 return NULL; 384 385 if ((staterr & 386 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 387 E1000_RXD_STAT_TCPCS) { 388 pi->pi_netisr = NETISR_IP; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_UDP; 391 break; 392 } 393 /* FALL THROUGH */ 394 default: 395 return NULL; 396 } 397 398 m->m_flags |= M_HASH; 399 m->m_pkthdr.hash = toeplitz_hash(hash); 400 return pi; 401 } 402 403 static int 404 emx_probe(device_t dev) 405 { 406 const struct emx_device *d; 407 uint16_t vid, did; 408 409 vid = pci_get_vendor(dev); 410 did = pci_get_device(dev); 411 412 for (d = emx_devices; d->desc != NULL; ++d) { 413 if (vid == d->vid && did == d->did) { 414 device_set_desc(dev, d->desc); 415 device_set_async_attach(dev, TRUE); 416 return 0; 417 } 418 } 419 return ENXIO; 420 } 421 422 static int 423 emx_attach(device_t dev) 424 { 425 struct emx_softc *sc = device_get_softc(dev); 426 int error = 0, i, throttle, msi_enable, tx_ring_max; 427 u_int intr_flags; 428 uint16_t eeprom_data, device_id, apme_mask; 429 driver_intr_t *intr_func; 430 #ifdef IFPOLL_ENABLE 431 int offset, offset_def; 432 #endif 433 434 /* 435 * Setup RX rings 436 */ 437 for (i = 0; i < EMX_NRX_RING; ++i) { 438 sc->rx_data[i].sc = sc; 439 sc->rx_data[i].idx = i; 440 } 441 442 /* 443 * Setup TX ring 444 */ 445 for (i = 0; i < EMX_NTX_RING; ++i) { 446 sc->tx_data[i].sc = sc; 447 sc->tx_data[i].idx = i; 448 } 449 450 /* 451 * Initialize serializers 452 */ 453 lwkt_serialize_init(&sc->main_serialize); 454 for (i = 0; i < EMX_NTX_RING; ++i) 455 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 456 for (i = 0; i < EMX_NRX_RING; ++i) 457 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 458 459 /* 460 * Initialize serializer array 461 */ 462 i = 0; 463 464 KKASSERT(i < EMX_NSERIALIZE); 465 sc->serializes[i++] = &sc->main_serialize; 466 467 KKASSERT(i < EMX_NSERIALIZE); 468 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 469 KKASSERT(i < EMX_NSERIALIZE); 470 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 471 472 KKASSERT(i < EMX_NSERIALIZE); 473 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 474 KKASSERT(i < EMX_NSERIALIZE); 475 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 476 477 KKASSERT(i == EMX_NSERIALIZE); 478 479 ifmedia_init(&sc->media, IFM_IMASK, emx_media_change, emx_media_status); 480 callout_init_mp(&sc->timer); 481 482 sc->dev = sc->osdep.dev = dev; 483 484 /* 485 * Determine hardware and mac type 486 */ 487 sc->hw.vendor_id = pci_get_vendor(dev); 488 sc->hw.device_id = pci_get_device(dev); 489 sc->hw.revision_id = pci_get_revid(dev); 490 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 491 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 492 493 if (e1000_set_mac_type(&sc->hw)) 494 return ENXIO; 495 496 /* Enable bus mastering */ 497 pci_enable_busmaster(dev); 498 499 /* 500 * Allocate IO memory 501 */ 502 sc->memory_rid = EMX_BAR_MEM; 503 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 504 &sc->memory_rid, RF_ACTIVE); 505 if (sc->memory == NULL) { 506 device_printf(dev, "Unable to allocate bus resource: memory\n"); 507 error = ENXIO; 508 goto fail; 509 } 510 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 511 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 512 513 /* XXX This is quite goofy, it is not actually used */ 514 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 515 516 /* 517 * Don't enable MSI-X on 82574, see: 518 * 82574 specification update errata #15 519 * 520 * Don't enable MSI on 82571/82572, see: 521 * 82571/82572 specification update errata #63 522 */ 523 msi_enable = emx_msi_enable; 524 if (msi_enable && 525 (sc->hw.mac.type == e1000_82571 || 526 sc->hw.mac.type == e1000_82572)) 527 msi_enable = 0; 528 529 /* 530 * Allocate interrupt 531 */ 532 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 533 &sc->intr_rid, &intr_flags); 534 535 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 536 int unshared; 537 538 unshared = device_getenv_int(dev, "irq.unshared", 0); 539 if (!unshared) { 540 sc->flags |= EMX_FLAG_SHARED_INTR; 541 if (bootverbose) 542 device_printf(dev, "IRQ shared\n"); 543 } else { 544 intr_flags &= ~RF_SHAREABLE; 545 if (bootverbose) 546 device_printf(dev, "IRQ unshared\n"); 547 } 548 } 549 550 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 551 intr_flags); 552 if (sc->intr_res == NULL) { 553 device_printf(dev, "Unable to allocate bus resource: " 554 "interrupt\n"); 555 error = ENXIO; 556 goto fail; 557 } 558 559 /* Save PCI command register for Shared Code */ 560 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 561 sc->hw.back = &sc->osdep; 562 563 /* 564 * For I217/I218, we need to map the flash memory and this 565 * must happen after the MAC is identified. 566 */ 567 if (sc->hw.mac.type == e1000_pch_lpt) { 568 sc->flash_rid = EMX_BAR_FLASH; 569 570 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 571 &sc->flash_rid, RF_ACTIVE); 572 if (sc->flash == NULL) { 573 device_printf(dev, "Mapping of Flash failed\n"); 574 error = ENXIO; 575 goto fail; 576 } 577 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 578 sc->osdep.flash_bus_space_handle = 579 rman_get_bushandle(sc->flash); 580 581 /* 582 * This is used in the shared code 583 * XXX this goof is actually not used. 584 */ 585 sc->hw.flash_address = (uint8_t *)sc->flash; 586 } 587 588 /* Do Shared Code initialization */ 589 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 590 device_printf(dev, "Setup of Shared code failed\n"); 591 error = ENXIO; 592 goto fail; 593 } 594 e1000_get_bus_info(&sc->hw); 595 596 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 597 sc->hw.phy.autoneg_wait_to_complete = FALSE; 598 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 599 600 /* 601 * Interrupt throttle rate 602 */ 603 throttle = device_getenv_int(dev, "int_throttle_ceil", 604 emx_int_throttle_ceil); 605 if (throttle == 0) { 606 sc->int_throttle_ceil = 0; 607 } else { 608 if (throttle < 0) 609 throttle = EMX_DEFAULT_ITR; 610 611 /* Recalculate the tunable value to get the exact frequency. */ 612 throttle = 1000000000 / 256 / throttle; 613 614 /* Upper 16bits of ITR is reserved and should be zero */ 615 if (throttle & 0xffff0000) 616 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 617 618 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 619 } 620 621 e1000_init_script_state_82541(&sc->hw, TRUE); 622 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 623 624 /* Copper options */ 625 if (sc->hw.phy.media_type == e1000_media_type_copper) { 626 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 627 sc->hw.phy.disable_polarity_correction = FALSE; 628 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 629 } 630 631 /* Set the frame limits assuming standard ethernet sized frames. */ 632 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 633 634 /* This controls when hardware reports transmit completion status. */ 635 sc->hw.mac.report_tx_early = 1; 636 637 /* Calculate # of RX rings */ 638 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 639 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 640 641 /* 642 * Calculate # of TX rings 643 * 644 * XXX 645 * I217/I218 claims to have 2 TX queues 646 * 647 * NOTE: 648 * Don't enable multiple TX queues on 82574; it always gives 649 * watchdog timeout on TX queue0, when multiple TCP streams are 650 * received. It was originally suspected that the hardware TX 651 * checksum offloading caused this watchdog timeout, since only 652 * TCP ACKs are sent during TCP receiving tests. However, even 653 * if the hardware TX checksum offloading is disable, TX queue0 654 * still will give watchdog. 655 */ 656 tx_ring_max = 1; 657 if (sc->hw.mac.type == e1000_82571 || 658 sc->hw.mac.type == e1000_82572 || 659 sc->hw.mac.type == e1000_80003es2lan) 660 tx_ring_max = EMX_NTX_RING; 661 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 662 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 663 664 /* Allocate RX/TX rings' busdma(9) stuffs */ 665 error = emx_dma_alloc(sc); 666 if (error) 667 goto fail; 668 669 /* Allocate multicast array memory. */ 670 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 671 M_DEVBUF, M_WAITOK); 672 673 /* Indicate SOL/IDER usage */ 674 if (e1000_check_reset_block(&sc->hw)) { 675 device_printf(dev, 676 "PHY reset is blocked due to SOL/IDER session.\n"); 677 } 678 679 /* Disable EEE on I217/I218 */ 680 sc->hw.dev_spec.ich8lan.eee_disable = 1; 681 682 /* 683 * Start from a known state, this is important in reading the 684 * nvm and mac from that. 685 */ 686 e1000_reset_hw(&sc->hw); 687 688 /* Make sure we have a good EEPROM before we read from it */ 689 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 690 /* 691 * Some PCI-E parts fail the first check due to 692 * the link being in sleep state, call it again, 693 * if it fails a second time its a real issue. 694 */ 695 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 696 device_printf(dev, 697 "The EEPROM Checksum Is Not Valid\n"); 698 error = EIO; 699 goto fail; 700 } 701 } 702 703 /* Copy the permanent MAC address out of the EEPROM */ 704 if (e1000_read_mac_addr(&sc->hw) < 0) { 705 device_printf(dev, "EEPROM read error while reading MAC" 706 " address\n"); 707 error = EIO; 708 goto fail; 709 } 710 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 711 device_printf(dev, "Invalid MAC address\n"); 712 error = EIO; 713 goto fail; 714 } 715 716 /* Determine if we have to control management hardware */ 717 if (e1000_enable_mng_pass_thru(&sc->hw)) 718 sc->flags |= EMX_FLAG_HAS_MGMT; 719 720 /* 721 * Setup Wake-on-Lan 722 */ 723 apme_mask = EMX_EEPROM_APME; 724 eeprom_data = 0; 725 switch (sc->hw.mac.type) { 726 case e1000_82573: 727 sc->flags |= EMX_FLAG_HAS_AMT; 728 /* FALL THROUGH */ 729 730 case e1000_82571: 731 case e1000_82572: 732 case e1000_80003es2lan: 733 if (sc->hw.bus.func == 1) { 734 e1000_read_nvm(&sc->hw, 735 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 736 } else { 737 e1000_read_nvm(&sc->hw, 738 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 739 } 740 break; 741 742 default: 743 e1000_read_nvm(&sc->hw, 744 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 745 break; 746 } 747 if (eeprom_data & apme_mask) 748 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 749 750 /* 751 * We have the eeprom settings, now apply the special cases 752 * where the eeprom may be wrong or the board won't support 753 * wake on lan on a particular port 754 */ 755 device_id = pci_get_device(dev); 756 switch (device_id) { 757 case E1000_DEV_ID_82571EB_FIBER: 758 /* 759 * Wake events only supported on port A for dual fiber 760 * regardless of eeprom setting 761 */ 762 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 763 E1000_STATUS_FUNC_1) 764 sc->wol = 0; 765 break; 766 767 case E1000_DEV_ID_82571EB_QUAD_COPPER: 768 case E1000_DEV_ID_82571EB_QUAD_FIBER: 769 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 770 /* if quad port sc, disable WoL on all but port A */ 771 if (emx_global_quad_port_a != 0) 772 sc->wol = 0; 773 /* Reset for multiple quad port adapters */ 774 if (++emx_global_quad_port_a == 4) 775 emx_global_quad_port_a = 0; 776 break; 777 } 778 779 /* XXX disable wol */ 780 sc->wol = 0; 781 782 #ifdef IFPOLL_ENABLE 783 /* 784 * NPOLLING RX CPU offset 785 */ 786 if (sc->rx_ring_cnt == ncpus2) { 787 offset = 0; 788 } else { 789 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 790 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 791 if (offset >= ncpus2 || 792 offset % sc->rx_ring_cnt != 0) { 793 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 794 offset, offset_def); 795 offset = offset_def; 796 } 797 } 798 sc->rx_npoll_off = offset; 799 800 /* 801 * NPOLLING TX CPU offset 802 */ 803 if (sc->tx_ring_cnt == ncpus2) { 804 offset = 0; 805 } else { 806 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 807 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 808 if (offset >= ncpus2 || 809 offset % sc->tx_ring_cnt != 0) { 810 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 811 offset, offset_def); 812 offset = offset_def; 813 } 814 } 815 sc->tx_npoll_off = offset; 816 #endif 817 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 818 819 /* Setup OS specific network interface */ 820 emx_setup_ifp(sc); 821 822 /* Add sysctl tree, must after em_setup_ifp() */ 823 emx_add_sysctl(sc); 824 825 /* Reset the hardware */ 826 error = emx_reset(sc); 827 if (error) { 828 /* 829 * Some 82573 parts fail the first reset, call it again, 830 * if it fails a second time its a real issue. 831 */ 832 error = emx_reset(sc); 833 if (error) { 834 device_printf(dev, "Unable to reset the hardware\n"); 835 ether_ifdetach(&sc->arpcom.ac_if); 836 goto fail; 837 } 838 } 839 840 /* Initialize statistics */ 841 emx_update_stats(sc); 842 843 sc->hw.mac.get_link_status = 1; 844 emx_update_link_status(sc); 845 846 /* Non-AMT based hardware can now take control from firmware */ 847 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 848 EMX_FLAG_HAS_MGMT) 849 emx_get_hw_control(sc); 850 851 /* 852 * Missing Interrupt Following ICR read: 853 * 854 * 82571/82572 specification update errata #76 855 * 82573 specification update errata #31 856 * 82574 specification update errata #12 857 */ 858 intr_func = emx_intr; 859 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 860 (sc->hw.mac.type == e1000_82571 || 861 sc->hw.mac.type == e1000_82572 || 862 sc->hw.mac.type == e1000_82573 || 863 sc->hw.mac.type == e1000_82574)) 864 intr_func = emx_intr_mask; 865 866 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 867 &sc->intr_tag, &sc->main_serialize); 868 if (error) { 869 device_printf(dev, "Failed to register interrupt handler"); 870 ether_ifdetach(&sc->arpcom.ac_if); 871 goto fail; 872 } 873 return (0); 874 fail: 875 emx_detach(dev); 876 return (error); 877 } 878 879 static int 880 emx_detach(device_t dev) 881 { 882 struct emx_softc *sc = device_get_softc(dev); 883 884 if (device_is_attached(dev)) { 885 struct ifnet *ifp = &sc->arpcom.ac_if; 886 887 ifnet_serialize_all(ifp); 888 889 emx_stop(sc); 890 891 e1000_phy_hw_reset(&sc->hw); 892 893 emx_rel_mgmt(sc); 894 emx_rel_hw_control(sc); 895 896 if (sc->wol) { 897 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 898 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 899 emx_enable_wol(dev); 900 } 901 902 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 903 904 ifnet_deserialize_all(ifp); 905 906 ether_ifdetach(ifp); 907 } else if (sc->memory != NULL) { 908 emx_rel_hw_control(sc); 909 } 910 911 ifmedia_removeall(&sc->media); 912 bus_generic_detach(dev); 913 914 if (sc->intr_res != NULL) { 915 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 916 sc->intr_res); 917 } 918 919 if (sc->intr_type == PCI_INTR_TYPE_MSI) 920 pci_release_msi(dev); 921 922 if (sc->memory != NULL) { 923 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 924 sc->memory); 925 } 926 927 if (sc->flash != NULL) { 928 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 929 sc->flash); 930 } 931 932 emx_dma_free(sc); 933 934 if (sc->mta != NULL) 935 kfree(sc->mta, M_DEVBUF); 936 937 return (0); 938 } 939 940 static int 941 emx_shutdown(device_t dev) 942 { 943 return emx_suspend(dev); 944 } 945 946 static int 947 emx_suspend(device_t dev) 948 { 949 struct emx_softc *sc = device_get_softc(dev); 950 struct ifnet *ifp = &sc->arpcom.ac_if; 951 952 ifnet_serialize_all(ifp); 953 954 emx_stop(sc); 955 956 emx_rel_mgmt(sc); 957 emx_rel_hw_control(sc); 958 959 if (sc->wol) { 960 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 961 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 962 emx_enable_wol(dev); 963 } 964 965 ifnet_deserialize_all(ifp); 966 967 return bus_generic_suspend(dev); 968 } 969 970 static int 971 emx_resume(device_t dev) 972 { 973 struct emx_softc *sc = device_get_softc(dev); 974 struct ifnet *ifp = &sc->arpcom.ac_if; 975 int i; 976 977 ifnet_serialize_all(ifp); 978 979 emx_init(sc); 980 emx_get_mgmt(sc); 981 for (i = 0; i < sc->tx_ring_inuse; ++i) 982 ifsq_devstart_sched(sc->tx_data[i].ifsq); 983 984 ifnet_deserialize_all(ifp); 985 986 return bus_generic_resume(dev); 987 } 988 989 static void 990 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 991 { 992 struct emx_softc *sc = ifp->if_softc; 993 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 994 struct mbuf *m_head; 995 int idx = -1, nsegs = 0; 996 997 KKASSERT(tdata->ifsq == ifsq); 998 ASSERT_SERIALIZED(&tdata->tx_serialize); 999 1000 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1001 return; 1002 1003 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1004 ifsq_purge(ifsq); 1005 return; 1006 } 1007 1008 while (!ifsq_is_empty(ifsq)) { 1009 /* Now do we at least have a minimal? */ 1010 if (EMX_IS_OACTIVE(tdata)) { 1011 emx_tx_collect(tdata); 1012 if (EMX_IS_OACTIVE(tdata)) { 1013 ifsq_set_oactive(ifsq); 1014 break; 1015 } 1016 } 1017 1018 logif(pkt_txqueue); 1019 m_head = ifsq_dequeue(ifsq); 1020 if (m_head == NULL) 1021 break; 1022 1023 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1024 IFNET_STAT_INC(ifp, oerrors, 1); 1025 emx_tx_collect(tdata); 1026 continue; 1027 } 1028 1029 if (nsegs >= tdata->tx_wreg_nsegs) { 1030 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1031 nsegs = 0; 1032 idx = -1; 1033 } 1034 1035 /* Send a copy of the frame to the BPF listener */ 1036 ETHER_BPF_MTAP(ifp, m_head); 1037 1038 /* Set timeout in case hardware has problems transmitting. */ 1039 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1040 } 1041 if (idx >= 0) 1042 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1043 } 1044 1045 static int 1046 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1047 { 1048 struct emx_softc *sc = ifp->if_softc; 1049 struct ifreq *ifr = (struct ifreq *)data; 1050 uint16_t eeprom_data = 0; 1051 int max_frame_size, mask, reinit; 1052 int error = 0; 1053 1054 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1055 1056 switch (command) { 1057 case SIOCSIFMTU: 1058 switch (sc->hw.mac.type) { 1059 case e1000_82573: 1060 /* 1061 * 82573 only supports jumbo frames 1062 * if ASPM is disabled. 1063 */ 1064 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1065 &eeprom_data); 1066 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1067 max_frame_size = ETHER_MAX_LEN; 1068 break; 1069 } 1070 /* FALL THROUGH */ 1071 1072 /* Limit Jumbo Frame size */ 1073 case e1000_82571: 1074 case e1000_82572: 1075 case e1000_82574: 1076 case e1000_pch_lpt: 1077 case e1000_80003es2lan: 1078 max_frame_size = 9234; 1079 break; 1080 1081 default: 1082 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1083 break; 1084 } 1085 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1086 ETHER_CRC_LEN) { 1087 error = EINVAL; 1088 break; 1089 } 1090 1091 ifp->if_mtu = ifr->ifr_mtu; 1092 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1093 ETHER_CRC_LEN; 1094 1095 if (ifp->if_flags & IFF_RUNNING) 1096 emx_init(sc); 1097 break; 1098 1099 case SIOCSIFFLAGS: 1100 if (ifp->if_flags & IFF_UP) { 1101 if ((ifp->if_flags & IFF_RUNNING)) { 1102 if ((ifp->if_flags ^ sc->if_flags) & 1103 (IFF_PROMISC | IFF_ALLMULTI)) { 1104 emx_disable_promisc(sc); 1105 emx_set_promisc(sc); 1106 } 1107 } else { 1108 emx_init(sc); 1109 } 1110 } else if (ifp->if_flags & IFF_RUNNING) { 1111 emx_stop(sc); 1112 } 1113 sc->if_flags = ifp->if_flags; 1114 break; 1115 1116 case SIOCADDMULTI: 1117 case SIOCDELMULTI: 1118 if (ifp->if_flags & IFF_RUNNING) { 1119 emx_disable_intr(sc); 1120 emx_set_multi(sc); 1121 #ifdef IFPOLL_ENABLE 1122 if (!(ifp->if_flags & IFF_NPOLLING)) 1123 #endif 1124 emx_enable_intr(sc); 1125 } 1126 break; 1127 1128 case SIOCSIFMEDIA: 1129 /* Check SOL/IDER usage */ 1130 if (e1000_check_reset_block(&sc->hw)) { 1131 device_printf(sc->dev, "Media change is" 1132 " blocked due to SOL/IDER session.\n"); 1133 break; 1134 } 1135 /* FALL THROUGH */ 1136 1137 case SIOCGIFMEDIA: 1138 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1139 break; 1140 1141 case SIOCSIFCAP: 1142 reinit = 0; 1143 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1144 if (mask & IFCAP_RXCSUM) { 1145 ifp->if_capenable ^= IFCAP_RXCSUM; 1146 reinit = 1; 1147 } 1148 if (mask & IFCAP_VLAN_HWTAGGING) { 1149 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1150 reinit = 1; 1151 } 1152 if (mask & IFCAP_TXCSUM) { 1153 ifp->if_capenable ^= IFCAP_TXCSUM; 1154 if (ifp->if_capenable & IFCAP_TXCSUM) 1155 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1156 else 1157 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1158 } 1159 if (mask & IFCAP_TSO) { 1160 ifp->if_capenable ^= IFCAP_TSO; 1161 if (ifp->if_capenable & IFCAP_TSO) 1162 ifp->if_hwassist |= CSUM_TSO; 1163 else 1164 ifp->if_hwassist &= ~CSUM_TSO; 1165 } 1166 if (mask & IFCAP_RSS) 1167 ifp->if_capenable ^= IFCAP_RSS; 1168 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1169 emx_init(sc); 1170 break; 1171 1172 default: 1173 error = ether_ioctl(ifp, command, data); 1174 break; 1175 } 1176 return (error); 1177 } 1178 1179 static void 1180 emx_watchdog(struct ifaltq_subque *ifsq) 1181 { 1182 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1183 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1184 struct emx_softc *sc = ifp->if_softc; 1185 int i; 1186 1187 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1188 1189 /* 1190 * The timer is set to 5 every time start queues a packet. 1191 * Then txeof keeps resetting it as long as it cleans at 1192 * least one descriptor. 1193 * Finally, anytime all descriptors are clean the timer is 1194 * set to 0. 1195 */ 1196 1197 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1198 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1199 /* 1200 * If we reach here, all TX jobs are completed and 1201 * the TX engine should have been idled for some time. 1202 * We don't need to call ifsq_devstart_sched() here. 1203 */ 1204 ifsq_clr_oactive(ifsq); 1205 tdata->tx_watchdog.wd_timer = 0; 1206 return; 1207 } 1208 1209 /* 1210 * If we are in this routine because of pause frames, then 1211 * don't reset the hardware. 1212 */ 1213 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1214 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1215 return; 1216 } 1217 1218 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1219 1220 IFNET_STAT_INC(ifp, oerrors, 1); 1221 1222 emx_init(sc); 1223 for (i = 0; i < sc->tx_ring_inuse; ++i) 1224 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1225 } 1226 1227 static void 1228 emx_init(void *xsc) 1229 { 1230 struct emx_softc *sc = xsc; 1231 struct ifnet *ifp = &sc->arpcom.ac_if; 1232 device_t dev = sc->dev; 1233 boolean_t polling; 1234 int i; 1235 1236 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1237 1238 emx_stop(sc); 1239 1240 /* Get the latest mac address, User can use a LAA */ 1241 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1242 1243 /* Put the address into the Receive Address Array */ 1244 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1245 1246 /* 1247 * With the 82571 sc, RAR[0] may be overwritten 1248 * when the other port is reset, we make a duplicate 1249 * in RAR[14] for that eventuality, this assures 1250 * the interface continues to function. 1251 */ 1252 if (sc->hw.mac.type == e1000_82571) { 1253 e1000_set_laa_state_82571(&sc->hw, TRUE); 1254 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1255 E1000_RAR_ENTRIES - 1); 1256 } 1257 1258 /* Initialize the hardware */ 1259 if (emx_reset(sc)) { 1260 device_printf(dev, "Unable to reset the hardware\n"); 1261 /* XXX emx_stop()? */ 1262 return; 1263 } 1264 emx_update_link_status(sc); 1265 1266 /* Setup VLAN support, basic and offload if available */ 1267 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1268 1269 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1270 uint32_t ctrl; 1271 1272 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1273 ctrl |= E1000_CTRL_VME; 1274 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1275 } 1276 1277 /* Configure for OS presence */ 1278 emx_get_mgmt(sc); 1279 1280 polling = FALSE; 1281 #ifdef IFPOLL_ENABLE 1282 if (ifp->if_flags & IFF_NPOLLING) 1283 polling = TRUE; 1284 #endif 1285 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1286 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1287 1288 /* Prepare transmit descriptors and buffers */ 1289 for (i = 0; i < sc->tx_ring_inuse; ++i) 1290 emx_init_tx_ring(&sc->tx_data[i]); 1291 emx_init_tx_unit(sc); 1292 1293 /* Setup Multicast table */ 1294 emx_set_multi(sc); 1295 1296 /* Prepare receive descriptors and buffers */ 1297 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1298 if (emx_init_rx_ring(&sc->rx_data[i])) { 1299 device_printf(dev, 1300 "Could not setup receive structures\n"); 1301 emx_stop(sc); 1302 return; 1303 } 1304 } 1305 emx_init_rx_unit(sc); 1306 1307 /* Don't lose promiscuous settings */ 1308 emx_set_promisc(sc); 1309 1310 ifp->if_flags |= IFF_RUNNING; 1311 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1312 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1313 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1314 } 1315 1316 callout_reset(&sc->timer, hz, emx_timer, sc); 1317 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1318 1319 /* MSI/X configuration for 82574 */ 1320 if (sc->hw.mac.type == e1000_82574) { 1321 int tmp; 1322 1323 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1324 tmp |= E1000_CTRL_EXT_PBA_CLR; 1325 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1326 /* 1327 * XXX MSIX 1328 * Set the IVAR - interrupt vector routing. 1329 * Each nibble represents a vector, high bit 1330 * is enable, other 3 bits are the MSIX table 1331 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1332 * Link (other) to 2, hence the magic number. 1333 */ 1334 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1335 } 1336 1337 /* 1338 * Only enable interrupts if we are not polling, make sure 1339 * they are off otherwise. 1340 */ 1341 if (polling) 1342 emx_disable_intr(sc); 1343 else 1344 emx_enable_intr(sc); 1345 1346 /* AMT based hardware can now take control from firmware */ 1347 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1348 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1349 emx_get_hw_control(sc); 1350 } 1351 1352 static void 1353 emx_intr(void *xsc) 1354 { 1355 emx_intr_body(xsc, TRUE); 1356 } 1357 1358 static void 1359 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1360 { 1361 struct ifnet *ifp = &sc->arpcom.ac_if; 1362 uint32_t reg_icr; 1363 1364 logif(intr_beg); 1365 ASSERT_SERIALIZED(&sc->main_serialize); 1366 1367 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1368 1369 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1370 logif(intr_end); 1371 return; 1372 } 1373 1374 /* 1375 * XXX: some laptops trigger several spurious interrupts 1376 * on emx(4) when in the resume cycle. The ICR register 1377 * reports all-ones value in this case. Processing such 1378 * interrupts would lead to a freeze. I don't know why. 1379 */ 1380 if (reg_icr == 0xffffffff) { 1381 logif(intr_end); 1382 return; 1383 } 1384 1385 if (ifp->if_flags & IFF_RUNNING) { 1386 if (reg_icr & 1387 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1388 int i; 1389 1390 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1391 lwkt_serialize_enter( 1392 &sc->rx_data[i].rx_serialize); 1393 emx_rxeof(&sc->rx_data[i], -1); 1394 lwkt_serialize_exit( 1395 &sc->rx_data[i].rx_serialize); 1396 } 1397 } 1398 if (reg_icr & E1000_ICR_TXDW) { 1399 struct emx_txdata *tdata = &sc->tx_data[0]; 1400 1401 lwkt_serialize_enter(&tdata->tx_serialize); 1402 emx_txeof(tdata); 1403 if (!ifsq_is_empty(tdata->ifsq)) 1404 ifsq_devstart(tdata->ifsq); 1405 lwkt_serialize_exit(&tdata->tx_serialize); 1406 } 1407 } 1408 1409 /* Link status change */ 1410 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1411 emx_serialize_skipmain(sc); 1412 1413 callout_stop(&sc->timer); 1414 sc->hw.mac.get_link_status = 1; 1415 emx_update_link_status(sc); 1416 1417 /* Deal with TX cruft when link lost */ 1418 emx_tx_purge(sc); 1419 1420 callout_reset(&sc->timer, hz, emx_timer, sc); 1421 1422 emx_deserialize_skipmain(sc); 1423 } 1424 1425 if (reg_icr & E1000_ICR_RXO) 1426 sc->rx_overruns++; 1427 1428 logif(intr_end); 1429 } 1430 1431 static void 1432 emx_intr_mask(void *xsc) 1433 { 1434 struct emx_softc *sc = xsc; 1435 1436 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1437 /* 1438 * NOTE: 1439 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1440 * so don't check it. 1441 */ 1442 emx_intr_body(sc, FALSE); 1443 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1444 } 1445 1446 static void 1447 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1448 { 1449 struct emx_softc *sc = ifp->if_softc; 1450 1451 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1452 1453 emx_update_link_status(sc); 1454 1455 ifmr->ifm_status = IFM_AVALID; 1456 ifmr->ifm_active = IFM_ETHER; 1457 1458 if (!sc->link_active) 1459 return; 1460 1461 ifmr->ifm_status |= IFM_ACTIVE; 1462 1463 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1464 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1465 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1466 } else { 1467 switch (sc->link_speed) { 1468 case 10: 1469 ifmr->ifm_active |= IFM_10_T; 1470 break; 1471 case 100: 1472 ifmr->ifm_active |= IFM_100_TX; 1473 break; 1474 1475 case 1000: 1476 ifmr->ifm_active |= IFM_1000_T; 1477 break; 1478 } 1479 if (sc->link_duplex == FULL_DUPLEX) 1480 ifmr->ifm_active |= IFM_FDX; 1481 else 1482 ifmr->ifm_active |= IFM_HDX; 1483 } 1484 } 1485 1486 static int 1487 emx_media_change(struct ifnet *ifp) 1488 { 1489 struct emx_softc *sc = ifp->if_softc; 1490 struct ifmedia *ifm = &sc->media; 1491 1492 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1493 1494 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1495 return (EINVAL); 1496 1497 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1498 case IFM_AUTO: 1499 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1500 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1501 break; 1502 1503 case IFM_1000_LX: 1504 case IFM_1000_SX: 1505 case IFM_1000_T: 1506 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1507 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1508 break; 1509 1510 case IFM_100_TX: 1511 sc->hw.mac.autoneg = FALSE; 1512 sc->hw.phy.autoneg_advertised = 0; 1513 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1514 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1515 else 1516 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1517 break; 1518 1519 case IFM_10_T: 1520 sc->hw.mac.autoneg = FALSE; 1521 sc->hw.phy.autoneg_advertised = 0; 1522 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1523 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1524 else 1525 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1526 break; 1527 1528 default: 1529 if_printf(ifp, "Unsupported media type\n"); 1530 break; 1531 } 1532 1533 emx_init(sc); 1534 1535 return (0); 1536 } 1537 1538 static int 1539 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1540 int *segs_used, int *idx) 1541 { 1542 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1543 bus_dmamap_t map; 1544 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1545 struct e1000_tx_desc *ctxd = NULL; 1546 struct mbuf *m_head = *m_headp; 1547 uint32_t txd_upper, txd_lower, cmd = 0; 1548 int maxsegs, nsegs, i, j, first, last = 0, error; 1549 1550 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1551 error = emx_tso_pullup(tdata, m_headp); 1552 if (error) 1553 return error; 1554 m_head = *m_headp; 1555 } 1556 1557 txd_upper = txd_lower = 0; 1558 1559 /* 1560 * Capture the first descriptor index, this descriptor 1561 * will have the index of the EOP which is the only one 1562 * that now gets a DONE bit writeback. 1563 */ 1564 first = tdata->next_avail_tx_desc; 1565 tx_buffer = &tdata->tx_buf[first]; 1566 tx_buffer_mapped = tx_buffer; 1567 map = tx_buffer->map; 1568 1569 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1570 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1571 if (maxsegs > EMX_MAX_SCATTER) 1572 maxsegs = EMX_MAX_SCATTER; 1573 1574 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1575 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1576 if (error) { 1577 m_freem(*m_headp); 1578 *m_headp = NULL; 1579 return error; 1580 } 1581 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1582 1583 m_head = *m_headp; 1584 tdata->tx_nsegs += nsegs; 1585 *segs_used += nsegs; 1586 1587 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1588 /* TSO will consume one TX desc */ 1589 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1590 tdata->tx_nsegs += i; 1591 *segs_used += i; 1592 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1593 /* TX csum offloading will consume one TX desc */ 1594 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1595 tdata->tx_nsegs += i; 1596 *segs_used += i; 1597 } 1598 1599 /* Handle VLAN tag */ 1600 if (m_head->m_flags & M_VLANTAG) { 1601 /* Set the vlan id. */ 1602 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1603 /* Tell hardware to add tag */ 1604 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1605 } 1606 1607 i = tdata->next_avail_tx_desc; 1608 1609 /* Set up our transmit descriptors */ 1610 for (j = 0; j < nsegs; j++) { 1611 tx_buffer = &tdata->tx_buf[i]; 1612 ctxd = &tdata->tx_desc_base[i]; 1613 1614 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1615 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1616 txd_lower | segs[j].ds_len); 1617 ctxd->upper.data = htole32(txd_upper); 1618 1619 last = i; 1620 if (++i == tdata->num_tx_desc) 1621 i = 0; 1622 } 1623 1624 tdata->next_avail_tx_desc = i; 1625 1626 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1627 tdata->num_tx_desc_avail -= nsegs; 1628 1629 tx_buffer->m_head = m_head; 1630 tx_buffer_mapped->map = tx_buffer->map; 1631 tx_buffer->map = map; 1632 1633 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1634 tdata->tx_nsegs = 0; 1635 1636 /* 1637 * Report Status (RS) is turned on 1638 * every tx_intr_nsegs descriptors. 1639 */ 1640 cmd = E1000_TXD_CMD_RS; 1641 1642 /* 1643 * Keep track of the descriptor, which will 1644 * be written back by hardware. 1645 */ 1646 tdata->tx_dd[tdata->tx_dd_tail] = last; 1647 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1648 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1649 } 1650 1651 /* 1652 * Last Descriptor of Packet needs End Of Packet (EOP) 1653 */ 1654 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1655 1656 /* 1657 * Defer TDT updating, until enough descriptors are setup 1658 */ 1659 *idx = i; 1660 1661 #ifdef EMX_TSS_DEBUG 1662 tdata->tx_pkts++; 1663 #endif 1664 1665 return (0); 1666 } 1667 1668 static void 1669 emx_set_promisc(struct emx_softc *sc) 1670 { 1671 struct ifnet *ifp = &sc->arpcom.ac_if; 1672 uint32_t reg_rctl; 1673 1674 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1675 1676 if (ifp->if_flags & IFF_PROMISC) { 1677 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1678 /* Turn this on if you want to see bad packets */ 1679 if (emx_debug_sbp) 1680 reg_rctl |= E1000_RCTL_SBP; 1681 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1682 } else if (ifp->if_flags & IFF_ALLMULTI) { 1683 reg_rctl |= E1000_RCTL_MPE; 1684 reg_rctl &= ~E1000_RCTL_UPE; 1685 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1686 } 1687 } 1688 1689 static void 1690 emx_disable_promisc(struct emx_softc *sc) 1691 { 1692 uint32_t reg_rctl; 1693 1694 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1695 1696 reg_rctl &= ~E1000_RCTL_UPE; 1697 reg_rctl &= ~E1000_RCTL_MPE; 1698 reg_rctl &= ~E1000_RCTL_SBP; 1699 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1700 } 1701 1702 static void 1703 emx_set_multi(struct emx_softc *sc) 1704 { 1705 struct ifnet *ifp = &sc->arpcom.ac_if; 1706 struct ifmultiaddr *ifma; 1707 uint32_t reg_rctl = 0; 1708 uint8_t *mta; 1709 int mcnt = 0; 1710 1711 mta = sc->mta; 1712 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1713 1714 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1715 if (ifma->ifma_addr->sa_family != AF_LINK) 1716 continue; 1717 1718 if (mcnt == EMX_MCAST_ADDR_MAX) 1719 break; 1720 1721 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1722 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1723 mcnt++; 1724 } 1725 1726 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1727 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1728 reg_rctl |= E1000_RCTL_MPE; 1729 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1730 } else { 1731 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1732 } 1733 } 1734 1735 /* 1736 * This routine checks for link status and updates statistics. 1737 */ 1738 static void 1739 emx_timer(void *xsc) 1740 { 1741 struct emx_softc *sc = xsc; 1742 struct ifnet *ifp = &sc->arpcom.ac_if; 1743 1744 lwkt_serialize_enter(&sc->main_serialize); 1745 1746 emx_update_link_status(sc); 1747 emx_update_stats(sc); 1748 1749 /* Reset LAA into RAR[0] on 82571 */ 1750 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1751 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1752 1753 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1754 emx_print_hw_stats(sc); 1755 1756 emx_smartspeed(sc); 1757 1758 callout_reset(&sc->timer, hz, emx_timer, sc); 1759 1760 lwkt_serialize_exit(&sc->main_serialize); 1761 } 1762 1763 static void 1764 emx_update_link_status(struct emx_softc *sc) 1765 { 1766 struct e1000_hw *hw = &sc->hw; 1767 struct ifnet *ifp = &sc->arpcom.ac_if; 1768 device_t dev = sc->dev; 1769 uint32_t link_check = 0; 1770 1771 /* Get the cached link value or read phy for real */ 1772 switch (hw->phy.media_type) { 1773 case e1000_media_type_copper: 1774 if (hw->mac.get_link_status) { 1775 /* Do the work to read phy */ 1776 e1000_check_for_link(hw); 1777 link_check = !hw->mac.get_link_status; 1778 if (link_check) /* ESB2 fix */ 1779 e1000_cfg_on_link_up(hw); 1780 } else { 1781 link_check = TRUE; 1782 } 1783 break; 1784 1785 case e1000_media_type_fiber: 1786 e1000_check_for_link(hw); 1787 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1788 break; 1789 1790 case e1000_media_type_internal_serdes: 1791 e1000_check_for_link(hw); 1792 link_check = sc->hw.mac.serdes_has_link; 1793 break; 1794 1795 case e1000_media_type_unknown: 1796 default: 1797 break; 1798 } 1799 1800 /* Now check for a transition */ 1801 if (link_check && sc->link_active == 0) { 1802 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1803 &sc->link_duplex); 1804 1805 /* 1806 * Check if we should enable/disable SPEED_MODE bit on 1807 * 82571EB/82572EI 1808 */ 1809 if (sc->link_speed != SPEED_1000 && 1810 (hw->mac.type == e1000_82571 || 1811 hw->mac.type == e1000_82572)) { 1812 int tarc0; 1813 1814 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1815 tarc0 &= ~EMX_TARC_SPEED_MODE; 1816 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1817 } 1818 if (bootverbose) { 1819 device_printf(dev, "Link is up %d Mbps %s\n", 1820 sc->link_speed, 1821 ((sc->link_duplex == FULL_DUPLEX) ? 1822 "Full Duplex" : "Half Duplex")); 1823 } 1824 sc->link_active = 1; 1825 sc->smartspeed = 0; 1826 ifp->if_baudrate = sc->link_speed * 1000000; 1827 ifp->if_link_state = LINK_STATE_UP; 1828 if_link_state_change(ifp); 1829 } else if (!link_check && sc->link_active == 1) { 1830 ifp->if_baudrate = sc->link_speed = 0; 1831 sc->link_duplex = 0; 1832 if (bootverbose) 1833 device_printf(dev, "Link is Down\n"); 1834 sc->link_active = 0; 1835 ifp->if_link_state = LINK_STATE_DOWN; 1836 if_link_state_change(ifp); 1837 } 1838 } 1839 1840 static void 1841 emx_stop(struct emx_softc *sc) 1842 { 1843 struct ifnet *ifp = &sc->arpcom.ac_if; 1844 int i; 1845 1846 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1847 1848 emx_disable_intr(sc); 1849 1850 callout_stop(&sc->timer); 1851 1852 ifp->if_flags &= ~IFF_RUNNING; 1853 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1854 struct emx_txdata *tdata = &sc->tx_data[i]; 1855 1856 ifsq_clr_oactive(tdata->ifsq); 1857 ifsq_watchdog_stop(&tdata->tx_watchdog); 1858 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1859 } 1860 1861 /* 1862 * Disable multiple receive queues. 1863 * 1864 * NOTE: 1865 * We should disable multiple receive queues before 1866 * resetting the hardware. 1867 */ 1868 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1869 1870 e1000_reset_hw(&sc->hw); 1871 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1872 1873 for (i = 0; i < sc->tx_ring_cnt; ++i) 1874 emx_free_tx_ring(&sc->tx_data[i]); 1875 for (i = 0; i < sc->rx_ring_cnt; ++i) 1876 emx_free_rx_ring(&sc->rx_data[i]); 1877 } 1878 1879 static int 1880 emx_reset(struct emx_softc *sc) 1881 { 1882 device_t dev = sc->dev; 1883 uint16_t rx_buffer_size; 1884 uint32_t pba; 1885 1886 /* Set up smart power down as default off on newer adapters. */ 1887 if (!emx_smart_pwr_down && 1888 (sc->hw.mac.type == e1000_82571 || 1889 sc->hw.mac.type == e1000_82572)) { 1890 uint16_t phy_tmp = 0; 1891 1892 /* Speed up time to link by disabling smart power down. */ 1893 e1000_read_phy_reg(&sc->hw, 1894 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1895 phy_tmp &= ~IGP02E1000_PM_SPD; 1896 e1000_write_phy_reg(&sc->hw, 1897 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1898 } 1899 1900 /* 1901 * Packet Buffer Allocation (PBA) 1902 * Writing PBA sets the receive portion of the buffer 1903 * the remainder is used for the transmit buffer. 1904 */ 1905 switch (sc->hw.mac.type) { 1906 /* Total Packet Buffer on these is 48K */ 1907 case e1000_82571: 1908 case e1000_82572: 1909 case e1000_80003es2lan: 1910 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1911 break; 1912 1913 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1914 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1915 break; 1916 1917 case e1000_82574: 1918 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1919 break; 1920 1921 case e1000_pch_lpt: 1922 pba = E1000_PBA_26K; 1923 break; 1924 1925 default: 1926 /* Devices before 82547 had a Packet Buffer of 64K. */ 1927 if (sc->hw.mac.max_frame_size > 8192) 1928 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1929 else 1930 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1931 } 1932 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1933 1934 /* 1935 * These parameters control the automatic generation (Tx) and 1936 * response (Rx) to Ethernet PAUSE frames. 1937 * - High water mark should allow for at least two frames to be 1938 * received after sending an XOFF. 1939 * - Low water mark works best when it is very near the high water mark. 1940 * This allows the receiver to restart by sending XON when it has 1941 * drained a bit. Here we use an arbitary value of 1500 which will 1942 * restart after one full frame is pulled from the buffer. There 1943 * could be several smaller frames in the buffer and if so they will 1944 * not trigger the XON until their total number reduces the buffer 1945 * by 1500. 1946 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1947 */ 1948 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1949 1950 sc->hw.fc.high_water = rx_buffer_size - 1951 roundup2(sc->hw.mac.max_frame_size, 1024); 1952 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1953 1954 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1955 sc->hw.fc.send_xon = TRUE; 1956 sc->hw.fc.requested_mode = e1000_fc_full; 1957 1958 /* 1959 * Device specific overrides/settings 1960 */ 1961 if (sc->hw.mac.type == e1000_pch_lpt) { 1962 sc->hw.fc.high_water = 0x5C20; 1963 sc->hw.fc.low_water = 0x5048; 1964 sc->hw.fc.pause_time = 0x0650; 1965 sc->hw.fc.refresh_time = 0x0400; 1966 /* Jumbos need adjusted PBA */ 1967 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 1968 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 1969 else 1970 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 1971 } else if (sc->hw.mac.type == e1000_80003es2lan) { 1972 sc->hw.fc.pause_time = 0xFFFF; 1973 } 1974 1975 /* Issue a global reset */ 1976 e1000_reset_hw(&sc->hw); 1977 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1978 emx_disable_aspm(sc); 1979 1980 if (e1000_init_hw(&sc->hw) < 0) { 1981 device_printf(dev, "Hardware Initialization Failed\n"); 1982 return (EIO); 1983 } 1984 1985 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1986 e1000_get_phy_info(&sc->hw); 1987 e1000_check_for_link(&sc->hw); 1988 1989 return (0); 1990 } 1991 1992 static void 1993 emx_setup_ifp(struct emx_softc *sc) 1994 { 1995 struct ifnet *ifp = &sc->arpcom.ac_if; 1996 int i; 1997 1998 if_initname(ifp, device_get_name(sc->dev), 1999 device_get_unit(sc->dev)); 2000 ifp->if_softc = sc; 2001 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2002 ifp->if_init = emx_init; 2003 ifp->if_ioctl = emx_ioctl; 2004 ifp->if_start = emx_start; 2005 #ifdef IFPOLL_ENABLE 2006 ifp->if_npoll = emx_npoll; 2007 #endif 2008 ifp->if_serialize = emx_serialize; 2009 ifp->if_deserialize = emx_deserialize; 2010 ifp->if_tryserialize = emx_tryserialize; 2011 #ifdef INVARIANTS 2012 ifp->if_serialize_assert = emx_serialize_assert; 2013 #endif 2014 2015 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2016 ifq_set_ready(&ifp->if_snd); 2017 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2018 2019 ifp->if_mapsubq = ifq_mapsubq_mask; 2020 ifq_set_subq_mask(&ifp->if_snd, 0); 2021 2022 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2023 2024 ifp->if_capabilities = IFCAP_HWCSUM | 2025 IFCAP_VLAN_HWTAGGING | 2026 IFCAP_VLAN_MTU | 2027 IFCAP_TSO; 2028 if (sc->rx_ring_cnt > 1) 2029 ifp->if_capabilities |= IFCAP_RSS; 2030 ifp->if_capenable = ifp->if_capabilities; 2031 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2032 2033 /* 2034 * Tell the upper layer(s) we support long frames. 2035 */ 2036 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2037 2038 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2039 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2040 struct emx_txdata *tdata = &sc->tx_data[i]; 2041 2042 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2043 ifsq_set_priv(ifsq, tdata); 2044 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2045 tdata->ifsq = ifsq; 2046 2047 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2048 } 2049 2050 /* 2051 * Specify the media types supported by this sc and register 2052 * callbacks to update media and link information 2053 */ 2054 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2055 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2056 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2057 0, NULL); 2058 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2059 } else { 2060 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2061 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2062 0, NULL); 2063 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2064 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2065 0, NULL); 2066 if (sc->hw.phy.type != e1000_phy_ife) { 2067 ifmedia_add(&sc->media, 2068 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2069 ifmedia_add(&sc->media, 2070 IFM_ETHER | IFM_1000_T, 0, NULL); 2071 } 2072 } 2073 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2074 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 2075 } 2076 2077 /* 2078 * Workaround for SmartSpeed on 82541 and 82547 controllers 2079 */ 2080 static void 2081 emx_smartspeed(struct emx_softc *sc) 2082 { 2083 uint16_t phy_tmp; 2084 2085 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2086 sc->hw.mac.autoneg == 0 || 2087 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2088 return; 2089 2090 if (sc->smartspeed == 0) { 2091 /* 2092 * If Master/Slave config fault is asserted twice, 2093 * we assume back-to-back 2094 */ 2095 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2096 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2097 return; 2098 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2099 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2100 e1000_read_phy_reg(&sc->hw, 2101 PHY_1000T_CTRL, &phy_tmp); 2102 if (phy_tmp & CR_1000T_MS_ENABLE) { 2103 phy_tmp &= ~CR_1000T_MS_ENABLE; 2104 e1000_write_phy_reg(&sc->hw, 2105 PHY_1000T_CTRL, phy_tmp); 2106 sc->smartspeed++; 2107 if (sc->hw.mac.autoneg && 2108 !e1000_phy_setup_autoneg(&sc->hw) && 2109 !e1000_read_phy_reg(&sc->hw, 2110 PHY_CONTROL, &phy_tmp)) { 2111 phy_tmp |= MII_CR_AUTO_NEG_EN | 2112 MII_CR_RESTART_AUTO_NEG; 2113 e1000_write_phy_reg(&sc->hw, 2114 PHY_CONTROL, phy_tmp); 2115 } 2116 } 2117 } 2118 return; 2119 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2120 /* If still no link, perhaps using 2/3 pair cable */ 2121 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2122 phy_tmp |= CR_1000T_MS_ENABLE; 2123 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2124 if (sc->hw.mac.autoneg && 2125 !e1000_phy_setup_autoneg(&sc->hw) && 2126 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2127 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2128 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2129 } 2130 } 2131 2132 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2133 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2134 sc->smartspeed = 0; 2135 } 2136 2137 static int 2138 emx_create_tx_ring(struct emx_txdata *tdata) 2139 { 2140 device_t dev = tdata->sc->dev; 2141 struct emx_txbuf *tx_buffer; 2142 int error, i, tsize, ntxd; 2143 2144 /* 2145 * Validate number of transmit descriptors. It must not exceed 2146 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2147 */ 2148 ntxd = device_getenv_int(dev, "txd", emx_txd); 2149 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2150 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2151 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2152 EMX_DEFAULT_TXD, ntxd); 2153 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2154 } else { 2155 tdata->num_tx_desc = ntxd; 2156 } 2157 2158 /* 2159 * Allocate Transmit Descriptor ring 2160 */ 2161 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2162 EMX_DBA_ALIGN); 2163 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2164 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2165 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2166 &tdata->tx_desc_paddr); 2167 if (tdata->tx_desc_base == NULL) { 2168 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2169 return ENOMEM; 2170 } 2171 2172 tsize = __VM_CACHELINE_ALIGN( 2173 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2174 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2175 2176 /* 2177 * Create DMA tags for tx buffers 2178 */ 2179 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2180 1, 0, /* alignment, bounds */ 2181 BUS_SPACE_MAXADDR, /* lowaddr */ 2182 BUS_SPACE_MAXADDR, /* highaddr */ 2183 NULL, NULL, /* filter, filterarg */ 2184 EMX_TSO_SIZE, /* maxsize */ 2185 EMX_MAX_SCATTER, /* nsegments */ 2186 EMX_MAX_SEGSIZE, /* maxsegsize */ 2187 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2188 BUS_DMA_ONEBPAGE, /* flags */ 2189 &tdata->txtag); 2190 if (error) { 2191 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2192 kfree(tdata->tx_buf, M_DEVBUF); 2193 tdata->tx_buf = NULL; 2194 return error; 2195 } 2196 2197 /* 2198 * Create DMA maps for tx buffers 2199 */ 2200 for (i = 0; i < tdata->num_tx_desc; i++) { 2201 tx_buffer = &tdata->tx_buf[i]; 2202 2203 error = bus_dmamap_create(tdata->txtag, 2204 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2205 &tx_buffer->map); 2206 if (error) { 2207 device_printf(dev, "Unable to create TX DMA map\n"); 2208 emx_destroy_tx_ring(tdata, i); 2209 return error; 2210 } 2211 } 2212 2213 /* 2214 * Setup TX parameters 2215 */ 2216 tdata->spare_tx_desc = EMX_TX_SPARE; 2217 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2218 2219 /* 2220 * Keep following relationship between spare_tx_desc, oact_tx_desc 2221 * and tx_intr_nsegs: 2222 * (spare_tx_desc + EMX_TX_RESERVED) <= 2223 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2224 */ 2225 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2226 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2227 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2228 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2229 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2230 2231 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2232 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2233 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2234 2235 /* 2236 * Pullup extra 4bytes into the first data segment, see: 2237 * 82571/82572 specification update errata #7 2238 * 2239 * NOTE: 2240 * 4bytes instead of 2bytes, which are mentioned in the errata, 2241 * are pulled; mainly to keep rest of the data properly aligned. 2242 */ 2243 if (tdata->sc->hw.mac.type == e1000_82571 || 2244 tdata->sc->hw.mac.type == e1000_82572) 2245 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2246 2247 return (0); 2248 } 2249 2250 static void 2251 emx_init_tx_ring(struct emx_txdata *tdata) 2252 { 2253 /* Clear the old ring contents */ 2254 bzero(tdata->tx_desc_base, 2255 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2256 2257 /* Reset state */ 2258 tdata->next_avail_tx_desc = 0; 2259 tdata->next_tx_to_clean = 0; 2260 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2261 2262 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2263 if (tdata->sc->tx_ring_inuse > 1) { 2264 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2265 if (bootverbose) { 2266 if_printf(&tdata->sc->arpcom.ac_if, 2267 "TX %d force ctx setup\n", tdata->idx); 2268 } 2269 } 2270 } 2271 2272 static void 2273 emx_init_tx_unit(struct emx_softc *sc) 2274 { 2275 uint32_t tctl, tarc, tipg = 0; 2276 int i; 2277 2278 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2279 struct emx_txdata *tdata = &sc->tx_data[i]; 2280 uint64_t bus_addr; 2281 2282 /* Setup the Base and Length of the Tx Descriptor Ring */ 2283 bus_addr = tdata->tx_desc_paddr; 2284 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2285 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2286 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2287 (uint32_t)(bus_addr >> 32)); 2288 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2289 (uint32_t)bus_addr); 2290 /* Setup the HW Tx Head and Tail descriptor pointers */ 2291 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2292 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2293 } 2294 2295 /* Set the default values for the Tx Inter Packet Gap timer */ 2296 switch (sc->hw.mac.type) { 2297 case e1000_80003es2lan: 2298 tipg = DEFAULT_82543_TIPG_IPGR1; 2299 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2300 E1000_TIPG_IPGR2_SHIFT; 2301 break; 2302 2303 default: 2304 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2305 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2306 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2307 else 2308 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2309 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2310 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2311 break; 2312 } 2313 2314 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2315 2316 /* NOTE: 0 is not allowed for TIDV */ 2317 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2318 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2319 2320 if (sc->hw.mac.type == e1000_82571 || 2321 sc->hw.mac.type == e1000_82572) { 2322 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2323 tarc |= EMX_TARC_SPEED_MODE; 2324 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2325 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2326 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2327 tarc |= 1; 2328 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2329 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2330 tarc |= 1; 2331 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2332 } 2333 2334 /* Program the Transmit Control Register */ 2335 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2336 tctl &= ~E1000_TCTL_CT; 2337 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2338 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2339 tctl |= E1000_TCTL_MULR; 2340 2341 /* This write will effectively turn on the transmit unit. */ 2342 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2343 2344 if (sc->hw.mac.type == e1000_82571 || 2345 sc->hw.mac.type == e1000_82572 || 2346 sc->hw.mac.type == e1000_80003es2lan) { 2347 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2348 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2349 tarc &= ~(1 << 28); 2350 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2351 } 2352 2353 if (sc->tx_ring_inuse > 1) { 2354 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2355 tarc &= ~EMX_TARC_COUNT_MASK; 2356 tarc |= 1; 2357 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2358 2359 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2360 tarc &= ~EMX_TARC_COUNT_MASK; 2361 tarc |= 1; 2362 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2363 } 2364 } 2365 2366 static void 2367 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2368 { 2369 struct emx_txbuf *tx_buffer; 2370 int i; 2371 2372 /* Free Transmit Descriptor ring */ 2373 if (tdata->tx_desc_base) { 2374 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2375 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2376 tdata->tx_desc_dmap); 2377 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2378 2379 tdata->tx_desc_base = NULL; 2380 } 2381 2382 if (tdata->tx_buf == NULL) 2383 return; 2384 2385 for (i = 0; i < ndesc; i++) { 2386 tx_buffer = &tdata->tx_buf[i]; 2387 2388 KKASSERT(tx_buffer->m_head == NULL); 2389 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2390 } 2391 bus_dma_tag_destroy(tdata->txtag); 2392 2393 kfree(tdata->tx_buf, M_DEVBUF); 2394 tdata->tx_buf = NULL; 2395 } 2396 2397 /* 2398 * The offload context needs to be set when we transfer the first 2399 * packet of a particular protocol (TCP/UDP). This routine has been 2400 * enhanced to deal with inserted VLAN headers. 2401 * 2402 * If the new packet's ether header length, ip header length and 2403 * csum offloading type are same as the previous packet, we should 2404 * avoid allocating a new csum context descriptor; mainly to take 2405 * advantage of the pipeline effect of the TX data read request. 2406 * 2407 * This function returns number of TX descrptors allocated for 2408 * csum context. 2409 */ 2410 static int 2411 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2412 uint32_t *txd_upper, uint32_t *txd_lower) 2413 { 2414 struct e1000_context_desc *TXD; 2415 int curr_txd, ehdrlen, csum_flags; 2416 uint32_t cmd, hdr_len, ip_hlen; 2417 2418 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2419 ip_hlen = mp->m_pkthdr.csum_iphlen; 2420 ehdrlen = mp->m_pkthdr.csum_lhlen; 2421 2422 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2423 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2424 tdata->csum_flags == csum_flags) { 2425 /* 2426 * Same csum offload context as the previous packets; 2427 * just return. 2428 */ 2429 *txd_upper = tdata->csum_txd_upper; 2430 *txd_lower = tdata->csum_txd_lower; 2431 return 0; 2432 } 2433 2434 /* 2435 * Setup a new csum offload context. 2436 */ 2437 2438 curr_txd = tdata->next_avail_tx_desc; 2439 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2440 2441 cmd = 0; 2442 2443 /* Setup of IP header checksum. */ 2444 if (csum_flags & CSUM_IP) { 2445 /* 2446 * Start offset for header checksum calculation. 2447 * End offset for header checksum calculation. 2448 * Offset of place to put the checksum. 2449 */ 2450 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2451 TXD->lower_setup.ip_fields.ipcse = 2452 htole16(ehdrlen + ip_hlen - 1); 2453 TXD->lower_setup.ip_fields.ipcso = 2454 ehdrlen + offsetof(struct ip, ip_sum); 2455 cmd |= E1000_TXD_CMD_IP; 2456 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2457 } 2458 hdr_len = ehdrlen + ip_hlen; 2459 2460 if (csum_flags & CSUM_TCP) { 2461 /* 2462 * Start offset for payload checksum calculation. 2463 * End offset for payload checksum calculation. 2464 * Offset of place to put the checksum. 2465 */ 2466 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2467 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2468 TXD->upper_setup.tcp_fields.tucso = 2469 hdr_len + offsetof(struct tcphdr, th_sum); 2470 cmd |= E1000_TXD_CMD_TCP; 2471 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2472 } else if (csum_flags & CSUM_UDP) { 2473 /* 2474 * Start offset for header checksum calculation. 2475 * End offset for header checksum calculation. 2476 * Offset of place to put the checksum. 2477 */ 2478 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2479 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2480 TXD->upper_setup.tcp_fields.tucso = 2481 hdr_len + offsetof(struct udphdr, uh_sum); 2482 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2483 } 2484 2485 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2486 E1000_TXD_DTYP_D; /* Data descr */ 2487 2488 /* Save the information for this csum offloading context */ 2489 tdata->csum_lhlen = ehdrlen; 2490 tdata->csum_iphlen = ip_hlen; 2491 tdata->csum_flags = csum_flags; 2492 tdata->csum_txd_upper = *txd_upper; 2493 tdata->csum_txd_lower = *txd_lower; 2494 2495 TXD->tcp_seg_setup.data = htole32(0); 2496 TXD->cmd_and_length = 2497 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2498 2499 if (++curr_txd == tdata->num_tx_desc) 2500 curr_txd = 0; 2501 2502 KKASSERT(tdata->num_tx_desc_avail > 0); 2503 tdata->num_tx_desc_avail--; 2504 2505 tdata->next_avail_tx_desc = curr_txd; 2506 return 1; 2507 } 2508 2509 static void 2510 emx_txeof(struct emx_txdata *tdata) 2511 { 2512 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 2513 struct emx_txbuf *tx_buffer; 2514 int first, num_avail; 2515 2516 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2517 return; 2518 2519 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2520 return; 2521 2522 num_avail = tdata->num_tx_desc_avail; 2523 first = tdata->next_tx_to_clean; 2524 2525 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2526 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2527 struct e1000_tx_desc *tx_desc; 2528 2529 tx_desc = &tdata->tx_desc_base[dd_idx]; 2530 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2531 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2532 2533 if (++dd_idx == tdata->num_tx_desc) 2534 dd_idx = 0; 2535 2536 while (first != dd_idx) { 2537 logif(pkt_txclean); 2538 2539 num_avail++; 2540 2541 tx_buffer = &tdata->tx_buf[first]; 2542 if (tx_buffer->m_head) { 2543 IFNET_STAT_INC(ifp, opackets, 1); 2544 bus_dmamap_unload(tdata->txtag, 2545 tx_buffer->map); 2546 m_freem(tx_buffer->m_head); 2547 tx_buffer->m_head = NULL; 2548 } 2549 2550 if (++first == tdata->num_tx_desc) 2551 first = 0; 2552 } 2553 } else { 2554 break; 2555 } 2556 } 2557 tdata->next_tx_to_clean = first; 2558 tdata->num_tx_desc_avail = num_avail; 2559 2560 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2561 tdata->tx_dd_head = 0; 2562 tdata->tx_dd_tail = 0; 2563 } 2564 2565 if (!EMX_IS_OACTIVE(tdata)) { 2566 ifsq_clr_oactive(tdata->ifsq); 2567 2568 /* All clean, turn off the timer */ 2569 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2570 tdata->tx_watchdog.wd_timer = 0; 2571 } 2572 } 2573 2574 static void 2575 emx_tx_collect(struct emx_txdata *tdata) 2576 { 2577 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 2578 struct emx_txbuf *tx_buffer; 2579 int tdh, first, num_avail, dd_idx = -1; 2580 2581 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2582 return; 2583 2584 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2585 if (tdh == tdata->next_tx_to_clean) 2586 return; 2587 2588 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2589 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2590 2591 num_avail = tdata->num_tx_desc_avail; 2592 first = tdata->next_tx_to_clean; 2593 2594 while (first != tdh) { 2595 logif(pkt_txclean); 2596 2597 num_avail++; 2598 2599 tx_buffer = &tdata->tx_buf[first]; 2600 if (tx_buffer->m_head) { 2601 IFNET_STAT_INC(ifp, opackets, 1); 2602 bus_dmamap_unload(tdata->txtag, 2603 tx_buffer->map); 2604 m_freem(tx_buffer->m_head); 2605 tx_buffer->m_head = NULL; 2606 } 2607 2608 if (first == dd_idx) { 2609 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2610 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2611 tdata->tx_dd_head = 0; 2612 tdata->tx_dd_tail = 0; 2613 dd_idx = -1; 2614 } else { 2615 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2616 } 2617 } 2618 2619 if (++first == tdata->num_tx_desc) 2620 first = 0; 2621 } 2622 tdata->next_tx_to_clean = first; 2623 tdata->num_tx_desc_avail = num_avail; 2624 2625 if (!EMX_IS_OACTIVE(tdata)) { 2626 ifsq_clr_oactive(tdata->ifsq); 2627 2628 /* All clean, turn off the timer */ 2629 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2630 tdata->tx_watchdog.wd_timer = 0; 2631 } 2632 } 2633 2634 /* 2635 * When Link is lost sometimes there is work still in the TX ring 2636 * which will result in a watchdog, rather than allow that do an 2637 * attempted cleanup and then reinit here. Note that this has been 2638 * seens mostly with fiber adapters. 2639 */ 2640 static void 2641 emx_tx_purge(struct emx_softc *sc) 2642 { 2643 int i; 2644 2645 if (sc->link_active) 2646 return; 2647 2648 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2649 struct emx_txdata *tdata = &sc->tx_data[i]; 2650 2651 if (tdata->tx_watchdog.wd_timer) { 2652 emx_tx_collect(tdata); 2653 if (tdata->tx_watchdog.wd_timer) { 2654 if_printf(&sc->arpcom.ac_if, 2655 "Link lost, TX pending, reinit\n"); 2656 emx_init(sc); 2657 return; 2658 } 2659 } 2660 } 2661 } 2662 2663 static int 2664 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2665 { 2666 struct mbuf *m; 2667 bus_dma_segment_t seg; 2668 bus_dmamap_t map; 2669 struct emx_rxbuf *rx_buffer; 2670 int error, nseg; 2671 2672 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2673 if (m == NULL) { 2674 if (init) { 2675 if_printf(&rdata->sc->arpcom.ac_if, 2676 "Unable to allocate RX mbuf\n"); 2677 } 2678 return (ENOBUFS); 2679 } 2680 m->m_len = m->m_pkthdr.len = MCLBYTES; 2681 2682 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2683 m_adj(m, ETHER_ALIGN); 2684 2685 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2686 rdata->rx_sparemap, m, 2687 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2688 if (error) { 2689 m_freem(m); 2690 if (init) { 2691 if_printf(&rdata->sc->arpcom.ac_if, 2692 "Unable to load RX mbuf\n"); 2693 } 2694 return (error); 2695 } 2696 2697 rx_buffer = &rdata->rx_buf[i]; 2698 if (rx_buffer->m_head != NULL) 2699 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2700 2701 map = rx_buffer->map; 2702 rx_buffer->map = rdata->rx_sparemap; 2703 rdata->rx_sparemap = map; 2704 2705 rx_buffer->m_head = m; 2706 rx_buffer->paddr = seg.ds_addr; 2707 2708 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2709 return (0); 2710 } 2711 2712 static int 2713 emx_create_rx_ring(struct emx_rxdata *rdata) 2714 { 2715 device_t dev = rdata->sc->dev; 2716 struct emx_rxbuf *rx_buffer; 2717 int i, error, rsize, nrxd; 2718 2719 /* 2720 * Validate number of receive descriptors. It must not exceed 2721 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2722 */ 2723 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2724 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2725 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2726 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2727 EMX_DEFAULT_RXD, nrxd); 2728 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2729 } else { 2730 rdata->num_rx_desc = nrxd; 2731 } 2732 2733 /* 2734 * Allocate Receive Descriptor ring 2735 */ 2736 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2737 EMX_DBA_ALIGN); 2738 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2739 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2740 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2741 &rdata->rx_desc_paddr); 2742 if (rdata->rx_desc == NULL) { 2743 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2744 return ENOMEM; 2745 } 2746 2747 rsize = __VM_CACHELINE_ALIGN( 2748 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2749 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2750 2751 /* 2752 * Create DMA tag for rx buffers 2753 */ 2754 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2755 1, 0, /* alignment, bounds */ 2756 BUS_SPACE_MAXADDR, /* lowaddr */ 2757 BUS_SPACE_MAXADDR, /* highaddr */ 2758 NULL, NULL, /* filter, filterarg */ 2759 MCLBYTES, /* maxsize */ 2760 1, /* nsegments */ 2761 MCLBYTES, /* maxsegsize */ 2762 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2763 &rdata->rxtag); 2764 if (error) { 2765 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2766 kfree(rdata->rx_buf, M_DEVBUF); 2767 rdata->rx_buf = NULL; 2768 return error; 2769 } 2770 2771 /* 2772 * Create spare DMA map for rx buffers 2773 */ 2774 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2775 &rdata->rx_sparemap); 2776 if (error) { 2777 device_printf(dev, "Unable to create spare RX DMA map\n"); 2778 bus_dma_tag_destroy(rdata->rxtag); 2779 kfree(rdata->rx_buf, M_DEVBUF); 2780 rdata->rx_buf = NULL; 2781 return error; 2782 } 2783 2784 /* 2785 * Create DMA maps for rx buffers 2786 */ 2787 for (i = 0; i < rdata->num_rx_desc; i++) { 2788 rx_buffer = &rdata->rx_buf[i]; 2789 2790 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2791 &rx_buffer->map); 2792 if (error) { 2793 device_printf(dev, "Unable to create RX DMA map\n"); 2794 emx_destroy_rx_ring(rdata, i); 2795 return error; 2796 } 2797 } 2798 return (0); 2799 } 2800 2801 static void 2802 emx_free_rx_ring(struct emx_rxdata *rdata) 2803 { 2804 int i; 2805 2806 for (i = 0; i < rdata->num_rx_desc; i++) { 2807 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2808 2809 if (rx_buffer->m_head != NULL) { 2810 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2811 m_freem(rx_buffer->m_head); 2812 rx_buffer->m_head = NULL; 2813 } 2814 } 2815 2816 if (rdata->fmp != NULL) 2817 m_freem(rdata->fmp); 2818 rdata->fmp = NULL; 2819 rdata->lmp = NULL; 2820 } 2821 2822 static void 2823 emx_free_tx_ring(struct emx_txdata *tdata) 2824 { 2825 int i; 2826 2827 for (i = 0; i < tdata->num_tx_desc; i++) { 2828 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2829 2830 if (tx_buffer->m_head != NULL) { 2831 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2832 m_freem(tx_buffer->m_head); 2833 tx_buffer->m_head = NULL; 2834 } 2835 } 2836 2837 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2838 2839 tdata->csum_flags = 0; 2840 tdata->csum_lhlen = 0; 2841 tdata->csum_iphlen = 0; 2842 tdata->csum_thlen = 0; 2843 tdata->csum_mss = 0; 2844 tdata->csum_pktlen = 0; 2845 2846 tdata->tx_dd_head = 0; 2847 tdata->tx_dd_tail = 0; 2848 tdata->tx_nsegs = 0; 2849 } 2850 2851 static int 2852 emx_init_rx_ring(struct emx_rxdata *rdata) 2853 { 2854 int i, error; 2855 2856 /* Reset descriptor ring */ 2857 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2858 2859 /* Allocate new ones. */ 2860 for (i = 0; i < rdata->num_rx_desc; i++) { 2861 error = emx_newbuf(rdata, i, 1); 2862 if (error) 2863 return (error); 2864 } 2865 2866 /* Setup our descriptor pointers */ 2867 rdata->next_rx_desc_to_check = 0; 2868 2869 return (0); 2870 } 2871 2872 static void 2873 emx_init_rx_unit(struct emx_softc *sc) 2874 { 2875 struct ifnet *ifp = &sc->arpcom.ac_if; 2876 uint64_t bus_addr; 2877 uint32_t rctl, itr, rfctl; 2878 int i; 2879 2880 /* 2881 * Make sure receives are disabled while setting 2882 * up the descriptor ring 2883 */ 2884 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2885 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2886 2887 /* 2888 * Set the interrupt throttling rate. Value is calculated 2889 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2890 */ 2891 if (sc->int_throttle_ceil) 2892 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2893 else 2894 itr = 0; 2895 emx_set_itr(sc, itr); 2896 2897 /* Use extended RX descriptor */ 2898 rfctl = E1000_RFCTL_EXTEN; 2899 2900 /* Disable accelerated ackknowledge */ 2901 if (sc->hw.mac.type == e1000_82574) 2902 rfctl |= E1000_RFCTL_ACK_DIS; 2903 2904 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2905 2906 /* 2907 * Receive Checksum Offload for TCP and UDP 2908 * 2909 * Checksum offloading is also enabled if multiple receive 2910 * queue is to be supported, since we need it to figure out 2911 * packet type. 2912 */ 2913 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2914 sc->rx_ring_cnt > 1) { 2915 uint32_t rxcsum; 2916 2917 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2918 2919 /* 2920 * NOTE: 2921 * PCSD must be enabled to enable multiple 2922 * receive queues. 2923 */ 2924 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2925 E1000_RXCSUM_PCSD; 2926 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2927 } 2928 2929 /* 2930 * Configure multiple receive queue (RSS) 2931 */ 2932 if (sc->rx_ring_cnt > 1) { 2933 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2934 uint32_t reta; 2935 2936 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 2937 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 2938 2939 /* 2940 * NOTE: 2941 * When we reach here, RSS has already been disabled 2942 * in emx_stop(), so we could safely configure RSS key 2943 * and redirect table. 2944 */ 2945 2946 /* 2947 * Configure RSS key 2948 */ 2949 toeplitz_get_key(key, sizeof(key)); 2950 for (i = 0; i < EMX_NRSSRK; ++i) { 2951 uint32_t rssrk; 2952 2953 rssrk = EMX_RSSRK_VAL(key, i); 2954 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2955 2956 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2957 } 2958 2959 /* 2960 * Configure RSS redirect table in following fashion: 2961 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2962 */ 2963 reta = 0; 2964 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2965 uint32_t q; 2966 2967 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 2968 reta |= q << (8 * i); 2969 } 2970 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2971 2972 for (i = 0; i < EMX_NRETA; ++i) 2973 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2974 2975 /* 2976 * Enable multiple receive queues. 2977 * Enable IPv4 RSS standard hash functions. 2978 * Disable RSS interrupt. 2979 */ 2980 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2981 E1000_MRQC_ENABLE_RSS_2Q | 2982 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2983 E1000_MRQC_RSS_FIELD_IPV4); 2984 } 2985 2986 /* 2987 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2988 * long latencies are observed, like Lenovo X60. This 2989 * change eliminates the problem, but since having positive 2990 * values in RDTR is a known source of problems on other 2991 * platforms another solution is being sought. 2992 */ 2993 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 2994 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 2995 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 2996 } 2997 2998 for (i = 0; i < sc->rx_ring_cnt; ++i) { 2999 struct emx_rxdata *rdata = &sc->rx_data[i]; 3000 3001 /* 3002 * Setup the Base and Length of the Rx Descriptor Ring 3003 */ 3004 bus_addr = rdata->rx_desc_paddr; 3005 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3006 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3007 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3008 (uint32_t)(bus_addr >> 32)); 3009 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3010 (uint32_t)bus_addr); 3011 3012 /* 3013 * Setup the HW Rx Head and Tail Descriptor Pointers 3014 */ 3015 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3016 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3017 sc->rx_data[i].num_rx_desc - 1); 3018 } 3019 3020 if (sc->hw.mac.type >= e1000_pch2lan) { 3021 if (ifp->if_mtu > ETHERMTU) 3022 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3023 else 3024 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3025 } 3026 3027 /* Setup the Receive Control Register */ 3028 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3029 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3030 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3031 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3032 3033 /* Make sure VLAN Filters are off */ 3034 rctl &= ~E1000_RCTL_VFE; 3035 3036 /* Don't store bad paket */ 3037 rctl &= ~E1000_RCTL_SBP; 3038 3039 /* MCLBYTES */ 3040 rctl |= E1000_RCTL_SZ_2048; 3041 3042 if (ifp->if_mtu > ETHERMTU) 3043 rctl |= E1000_RCTL_LPE; 3044 else 3045 rctl &= ~E1000_RCTL_LPE; 3046 3047 /* Enable Receives */ 3048 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3049 } 3050 3051 static void 3052 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3053 { 3054 struct emx_rxbuf *rx_buffer; 3055 int i; 3056 3057 /* Free Receive Descriptor ring */ 3058 if (rdata->rx_desc) { 3059 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3060 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3061 rdata->rx_desc_dmap); 3062 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3063 3064 rdata->rx_desc = NULL; 3065 } 3066 3067 if (rdata->rx_buf == NULL) 3068 return; 3069 3070 for (i = 0; i < ndesc; i++) { 3071 rx_buffer = &rdata->rx_buf[i]; 3072 3073 KKASSERT(rx_buffer->m_head == NULL); 3074 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3075 } 3076 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3077 bus_dma_tag_destroy(rdata->rxtag); 3078 3079 kfree(rdata->rx_buf, M_DEVBUF); 3080 rdata->rx_buf = NULL; 3081 } 3082 3083 static void 3084 emx_rxeof(struct emx_rxdata *rdata, int count) 3085 { 3086 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3087 uint32_t staterr; 3088 emx_rxdesc_t *current_desc; 3089 struct mbuf *mp; 3090 int i, cpuid = mycpuid; 3091 3092 i = rdata->next_rx_desc_to_check; 3093 current_desc = &rdata->rx_desc[i]; 3094 staterr = le32toh(current_desc->rxd_staterr); 3095 3096 if (!(staterr & E1000_RXD_STAT_DD)) 3097 return; 3098 3099 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3100 struct pktinfo *pi = NULL, pi0; 3101 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3102 struct mbuf *m = NULL; 3103 int eop, len; 3104 3105 logif(pkt_receive); 3106 3107 mp = rx_buf->m_head; 3108 3109 /* 3110 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3111 * needs to access the last received byte in the mbuf. 3112 */ 3113 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3114 BUS_DMASYNC_POSTREAD); 3115 3116 len = le16toh(current_desc->rxd_length); 3117 if (staterr & E1000_RXD_STAT_EOP) { 3118 count--; 3119 eop = 1; 3120 } else { 3121 eop = 0; 3122 } 3123 3124 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3125 uint16_t vlan = 0; 3126 uint32_t mrq, rss_hash; 3127 3128 /* 3129 * Save several necessary information, 3130 * before emx_newbuf() destroy it. 3131 */ 3132 if ((staterr & E1000_RXD_STAT_VP) && eop) 3133 vlan = le16toh(current_desc->rxd_vlan); 3134 3135 mrq = le32toh(current_desc->rxd_mrq); 3136 rss_hash = le32toh(current_desc->rxd_rss); 3137 3138 EMX_RSS_DPRINTF(rdata->sc, 10, 3139 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3140 rdata->idx, mrq, rss_hash); 3141 3142 if (emx_newbuf(rdata, i, 0) != 0) { 3143 IFNET_STAT_INC(ifp, iqdrops, 1); 3144 goto discard; 3145 } 3146 3147 /* Assign correct length to the current fragment */ 3148 mp->m_len = len; 3149 3150 if (rdata->fmp == NULL) { 3151 mp->m_pkthdr.len = len; 3152 rdata->fmp = mp; /* Store the first mbuf */ 3153 rdata->lmp = mp; 3154 } else { 3155 /* 3156 * Chain mbuf's together 3157 */ 3158 rdata->lmp->m_next = mp; 3159 rdata->lmp = rdata->lmp->m_next; 3160 rdata->fmp->m_pkthdr.len += len; 3161 } 3162 3163 if (eop) { 3164 rdata->fmp->m_pkthdr.rcvif = ifp; 3165 IFNET_STAT_INC(ifp, ipackets, 1); 3166 3167 if (ifp->if_capenable & IFCAP_RXCSUM) 3168 emx_rxcsum(staterr, rdata->fmp); 3169 3170 if (staterr & E1000_RXD_STAT_VP) { 3171 rdata->fmp->m_pkthdr.ether_vlantag = 3172 vlan; 3173 rdata->fmp->m_flags |= M_VLANTAG; 3174 } 3175 m = rdata->fmp; 3176 rdata->fmp = NULL; 3177 rdata->lmp = NULL; 3178 3179 if (ifp->if_capenable & IFCAP_RSS) { 3180 pi = emx_rssinfo(m, &pi0, mrq, 3181 rss_hash, staterr); 3182 } 3183 #ifdef EMX_RSS_DEBUG 3184 rdata->rx_pkts++; 3185 #endif 3186 } 3187 } else { 3188 IFNET_STAT_INC(ifp, ierrors, 1); 3189 discard: 3190 emx_setup_rxdesc(current_desc, rx_buf); 3191 if (rdata->fmp != NULL) { 3192 m_freem(rdata->fmp); 3193 rdata->fmp = NULL; 3194 rdata->lmp = NULL; 3195 } 3196 m = NULL; 3197 } 3198 3199 if (m != NULL) 3200 ifp->if_input(ifp, m, pi, cpuid); 3201 3202 /* Advance our pointers to the next descriptor. */ 3203 if (++i == rdata->num_rx_desc) 3204 i = 0; 3205 3206 current_desc = &rdata->rx_desc[i]; 3207 staterr = le32toh(current_desc->rxd_staterr); 3208 } 3209 rdata->next_rx_desc_to_check = i; 3210 3211 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3212 if (--i < 0) 3213 i = rdata->num_rx_desc - 1; 3214 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3215 } 3216 3217 static void 3218 emx_enable_intr(struct emx_softc *sc) 3219 { 3220 uint32_t ims_mask = IMS_ENABLE_MASK; 3221 3222 lwkt_serialize_handler_enable(&sc->main_serialize); 3223 3224 #if 0 3225 if (sc->hw.mac.type == e1000_82574) { 3226 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3227 ims_mask |= EM_MSIX_MASK; 3228 } 3229 #endif 3230 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3231 } 3232 3233 static void 3234 emx_disable_intr(struct emx_softc *sc) 3235 { 3236 if (sc->hw.mac.type == e1000_82574) 3237 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3238 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3239 3240 lwkt_serialize_handler_disable(&sc->main_serialize); 3241 } 3242 3243 /* 3244 * Bit of a misnomer, what this really means is 3245 * to enable OS management of the system... aka 3246 * to disable special hardware management features 3247 */ 3248 static void 3249 emx_get_mgmt(struct emx_softc *sc) 3250 { 3251 /* A shared code workaround */ 3252 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3253 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3254 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3255 3256 /* disable hardware interception of ARP */ 3257 manc &= ~(E1000_MANC_ARP_EN); 3258 3259 /* enable receiving management packets to the host */ 3260 manc |= E1000_MANC_EN_MNG2HOST; 3261 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3262 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3263 manc2h |= E1000_MNG2HOST_PORT_623; 3264 manc2h |= E1000_MNG2HOST_PORT_664; 3265 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3266 3267 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3268 } 3269 } 3270 3271 /* 3272 * Give control back to hardware management 3273 * controller if there is one. 3274 */ 3275 static void 3276 emx_rel_mgmt(struct emx_softc *sc) 3277 { 3278 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3279 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3280 3281 /* re-enable hardware interception of ARP */ 3282 manc |= E1000_MANC_ARP_EN; 3283 manc &= ~E1000_MANC_EN_MNG2HOST; 3284 3285 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3286 } 3287 } 3288 3289 /* 3290 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3291 * For ASF and Pass Through versions of f/w this means that 3292 * the driver is loaded. For AMT version (only with 82573) 3293 * of the f/w this means that the network i/f is open. 3294 */ 3295 static void 3296 emx_get_hw_control(struct emx_softc *sc) 3297 { 3298 /* Let firmware know the driver has taken over */ 3299 if (sc->hw.mac.type == e1000_82573) { 3300 uint32_t swsm; 3301 3302 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3303 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3304 swsm | E1000_SWSM_DRV_LOAD); 3305 } else { 3306 uint32_t ctrl_ext; 3307 3308 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3309 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3310 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3311 } 3312 sc->flags |= EMX_FLAG_HW_CTRL; 3313 } 3314 3315 /* 3316 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3317 * For ASF and Pass Through versions of f/w this means that the 3318 * driver is no longer loaded. For AMT version (only with 82573) 3319 * of the f/w this means that the network i/f is closed. 3320 */ 3321 static void 3322 emx_rel_hw_control(struct emx_softc *sc) 3323 { 3324 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3325 return; 3326 sc->flags &= ~EMX_FLAG_HW_CTRL; 3327 3328 /* Let firmware taken over control of h/w */ 3329 if (sc->hw.mac.type == e1000_82573) { 3330 uint32_t swsm; 3331 3332 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3333 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3334 swsm & ~E1000_SWSM_DRV_LOAD); 3335 } else { 3336 uint32_t ctrl_ext; 3337 3338 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3339 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3340 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3341 } 3342 } 3343 3344 static int 3345 emx_is_valid_eaddr(const uint8_t *addr) 3346 { 3347 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3348 3349 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3350 return (FALSE); 3351 3352 return (TRUE); 3353 } 3354 3355 /* 3356 * Enable PCI Wake On Lan capability 3357 */ 3358 void 3359 emx_enable_wol(device_t dev) 3360 { 3361 uint16_t cap, status; 3362 uint8_t id; 3363 3364 /* First find the capabilities pointer*/ 3365 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3366 3367 /* Read the PM Capabilities */ 3368 id = pci_read_config(dev, cap, 1); 3369 if (id != PCIY_PMG) /* Something wrong */ 3370 return; 3371 3372 /* 3373 * OK, we have the power capabilities, 3374 * so now get the status register 3375 */ 3376 cap += PCIR_POWER_STATUS; 3377 status = pci_read_config(dev, cap, 2); 3378 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3379 pci_write_config(dev, cap, status, 2); 3380 } 3381 3382 static void 3383 emx_update_stats(struct emx_softc *sc) 3384 { 3385 struct ifnet *ifp = &sc->arpcom.ac_if; 3386 3387 if (sc->hw.phy.media_type == e1000_media_type_copper || 3388 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3389 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3390 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3391 } 3392 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3393 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3394 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3395 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3396 3397 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3398 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3399 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3400 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3401 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3402 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3403 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3404 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3405 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3406 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3407 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3408 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3409 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3410 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3411 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3412 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3413 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3414 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3415 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3416 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3417 3418 /* For the 64-bit byte counters the low dword must be read first. */ 3419 /* Both registers clear on the read of the high dword */ 3420 3421 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3422 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3423 3424 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3425 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3426 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3427 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3428 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3429 3430 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3431 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3432 3433 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3434 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3435 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3436 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3437 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3438 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3439 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3440 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3441 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3442 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3443 3444 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3445 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3446 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3447 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3448 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3449 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3450 3451 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3452 3453 /* Rx Errors */ 3454 IFNET_STAT_SET(ifp, ierrors, 3455 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3456 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3457 3458 /* Tx Errors */ 3459 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3460 } 3461 3462 static void 3463 emx_print_debug_info(struct emx_softc *sc) 3464 { 3465 device_t dev = sc->dev; 3466 uint8_t *hw_addr = sc->hw.hw_addr; 3467 int i; 3468 3469 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3470 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3471 E1000_READ_REG(&sc->hw, E1000_CTRL), 3472 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3473 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3474 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3475 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3476 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3477 sc->hw.fc.high_water, sc->hw.fc.low_water); 3478 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3479 E1000_READ_REG(&sc->hw, E1000_TIDV), 3480 E1000_READ_REG(&sc->hw, E1000_TADV)); 3481 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3482 E1000_READ_REG(&sc->hw, E1000_RDTR), 3483 E1000_READ_REG(&sc->hw, E1000_RADV)); 3484 3485 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3486 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3487 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3488 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3489 } 3490 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3491 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3492 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3493 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3494 } 3495 3496 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3497 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3498 sc->tx_data[i].num_tx_desc_avail); 3499 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3500 sc->tx_data[i].tso_segments); 3501 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3502 sc->tx_data[i].tso_ctx_reused); 3503 } 3504 } 3505 3506 static void 3507 emx_print_hw_stats(struct emx_softc *sc) 3508 { 3509 device_t dev = sc->dev; 3510 3511 device_printf(dev, "Excessive collisions = %lld\n", 3512 (long long)sc->stats.ecol); 3513 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3514 device_printf(dev, "Symbol errors = %lld\n", 3515 (long long)sc->stats.symerrs); 3516 #endif 3517 device_printf(dev, "Sequence errors = %lld\n", 3518 (long long)sc->stats.sec); 3519 device_printf(dev, "Defer count = %lld\n", 3520 (long long)sc->stats.dc); 3521 device_printf(dev, "Missed Packets = %lld\n", 3522 (long long)sc->stats.mpc); 3523 device_printf(dev, "Receive No Buffers = %lld\n", 3524 (long long)sc->stats.rnbc); 3525 /* RLEC is inaccurate on some hardware, calculate our own. */ 3526 device_printf(dev, "Receive Length Errors = %lld\n", 3527 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3528 device_printf(dev, "Receive errors = %lld\n", 3529 (long long)sc->stats.rxerrc); 3530 device_printf(dev, "Crc errors = %lld\n", 3531 (long long)sc->stats.crcerrs); 3532 device_printf(dev, "Alignment errors = %lld\n", 3533 (long long)sc->stats.algnerrc); 3534 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3535 (long long)sc->stats.cexterr); 3536 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3537 device_printf(dev, "XON Rcvd = %lld\n", 3538 (long long)sc->stats.xonrxc); 3539 device_printf(dev, "XON Xmtd = %lld\n", 3540 (long long)sc->stats.xontxc); 3541 device_printf(dev, "XOFF Rcvd = %lld\n", 3542 (long long)sc->stats.xoffrxc); 3543 device_printf(dev, "XOFF Xmtd = %lld\n", 3544 (long long)sc->stats.xofftxc); 3545 device_printf(dev, "Good Packets Rcvd = %lld\n", 3546 (long long)sc->stats.gprc); 3547 device_printf(dev, "Good Packets Xmtd = %lld\n", 3548 (long long)sc->stats.gptc); 3549 } 3550 3551 static void 3552 emx_print_nvm_info(struct emx_softc *sc) 3553 { 3554 uint16_t eeprom_data; 3555 int i, j, row = 0; 3556 3557 /* Its a bit crude, but it gets the job done */ 3558 kprintf("\nInterface EEPROM Dump:\n"); 3559 kprintf("Offset\n0x0000 "); 3560 for (i = 0, j = 0; i < 32; i++, j++) { 3561 if (j == 8) { /* Make the offset block */ 3562 j = 0; ++row; 3563 kprintf("\n0x00%x0 ",row); 3564 } 3565 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3566 kprintf("%04x ", eeprom_data); 3567 } 3568 kprintf("\n"); 3569 } 3570 3571 static int 3572 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3573 { 3574 struct emx_softc *sc; 3575 struct ifnet *ifp; 3576 int error, result; 3577 3578 result = -1; 3579 error = sysctl_handle_int(oidp, &result, 0, req); 3580 if (error || !req->newptr) 3581 return (error); 3582 3583 sc = (struct emx_softc *)arg1; 3584 ifp = &sc->arpcom.ac_if; 3585 3586 ifnet_serialize_all(ifp); 3587 3588 if (result == 1) 3589 emx_print_debug_info(sc); 3590 3591 /* 3592 * This value will cause a hex dump of the 3593 * first 32 16-bit words of the EEPROM to 3594 * the screen. 3595 */ 3596 if (result == 2) 3597 emx_print_nvm_info(sc); 3598 3599 ifnet_deserialize_all(ifp); 3600 3601 return (error); 3602 } 3603 3604 static int 3605 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3606 { 3607 int error, result; 3608 3609 result = -1; 3610 error = sysctl_handle_int(oidp, &result, 0, req); 3611 if (error || !req->newptr) 3612 return (error); 3613 3614 if (result == 1) { 3615 struct emx_softc *sc = (struct emx_softc *)arg1; 3616 struct ifnet *ifp = &sc->arpcom.ac_if; 3617 3618 ifnet_serialize_all(ifp); 3619 emx_print_hw_stats(sc); 3620 ifnet_deserialize_all(ifp); 3621 } 3622 return (error); 3623 } 3624 3625 static void 3626 emx_add_sysctl(struct emx_softc *sc) 3627 { 3628 struct sysctl_ctx_list *ctx; 3629 struct sysctl_oid *tree; 3630 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3631 char pkt_desc[32]; 3632 int i; 3633 #endif 3634 3635 ctx = device_get_sysctl_ctx(sc->dev); 3636 tree = device_get_sysctl_tree(sc->dev); 3637 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3638 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3639 emx_sysctl_debug_info, "I", "Debug Information"); 3640 3641 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3642 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3643 emx_sysctl_stats, "I", "Statistics"); 3644 3645 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3646 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3647 "# of RX descs"); 3648 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3649 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3650 "# of TX descs"); 3651 3652 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3653 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3654 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3655 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3656 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3657 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3658 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3659 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3660 emx_sysctl_tx_wreg_nsegs, "I", 3661 "# segments sent before write to hardware register"); 3662 3663 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3664 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3665 "# of RX rings"); 3666 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3667 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3668 "# of TX rings"); 3669 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3670 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3671 "# of TX rings used"); 3672 3673 #ifdef IFPOLL_ENABLE 3674 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3675 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3676 sc, 0, emx_sysctl_npoll_rxoff, "I", 3677 "NPOLLING RX cpu offset"); 3678 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3679 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3680 sc, 0, emx_sysctl_npoll_txoff, "I", 3681 "NPOLLING TX cpu offset"); 3682 #endif 3683 3684 #ifdef EMX_RSS_DEBUG 3685 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3686 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3687 0, "RSS debug level"); 3688 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3689 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3690 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3691 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3692 "RXed packets"); 3693 } 3694 #endif 3695 #ifdef EMX_TSS_DEBUG 3696 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3697 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3698 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3699 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3700 "TXed packets"); 3701 } 3702 #endif 3703 } 3704 3705 static int 3706 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3707 { 3708 struct emx_softc *sc = (void *)arg1; 3709 struct ifnet *ifp = &sc->arpcom.ac_if; 3710 int error, throttle; 3711 3712 throttle = sc->int_throttle_ceil; 3713 error = sysctl_handle_int(oidp, &throttle, 0, req); 3714 if (error || req->newptr == NULL) 3715 return error; 3716 if (throttle < 0 || throttle > 1000000000 / 256) 3717 return EINVAL; 3718 3719 if (throttle) { 3720 /* 3721 * Set the interrupt throttling rate in 256ns increments, 3722 * recalculate sysctl value assignment to get exact frequency. 3723 */ 3724 throttle = 1000000000 / 256 / throttle; 3725 3726 /* Upper 16bits of ITR is reserved and should be zero */ 3727 if (throttle & 0xffff0000) 3728 return EINVAL; 3729 } 3730 3731 ifnet_serialize_all(ifp); 3732 3733 if (throttle) 3734 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3735 else 3736 sc->int_throttle_ceil = 0; 3737 3738 if (ifp->if_flags & IFF_RUNNING) 3739 emx_set_itr(sc, throttle); 3740 3741 ifnet_deserialize_all(ifp); 3742 3743 if (bootverbose) { 3744 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3745 sc->int_throttle_ceil); 3746 } 3747 return 0; 3748 } 3749 3750 static int 3751 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3752 { 3753 struct emx_softc *sc = (void *)arg1; 3754 struct ifnet *ifp = &sc->arpcom.ac_if; 3755 struct emx_txdata *tdata = &sc->tx_data[0]; 3756 int error, segs; 3757 3758 segs = tdata->tx_intr_nsegs; 3759 error = sysctl_handle_int(oidp, &segs, 0, req); 3760 if (error || req->newptr == NULL) 3761 return error; 3762 if (segs <= 0) 3763 return EINVAL; 3764 3765 ifnet_serialize_all(ifp); 3766 3767 /* 3768 * Don't allow tx_intr_nsegs to become: 3769 * o Less the oact_tx_desc 3770 * o Too large that no TX desc will cause TX interrupt to 3771 * be generated (OACTIVE will never recover) 3772 * o Too small that will cause tx_dd[] overflow 3773 */ 3774 if (segs < tdata->oact_tx_desc || 3775 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3776 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3777 error = EINVAL; 3778 } else { 3779 int i; 3780 3781 error = 0; 3782 for (i = 0; i < sc->tx_ring_cnt; ++i) 3783 sc->tx_data[i].tx_intr_nsegs = segs; 3784 } 3785 3786 ifnet_deserialize_all(ifp); 3787 3788 return error; 3789 } 3790 3791 static int 3792 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3793 { 3794 struct emx_softc *sc = (void *)arg1; 3795 struct ifnet *ifp = &sc->arpcom.ac_if; 3796 int error, nsegs, i; 3797 3798 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3799 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3800 if (error || req->newptr == NULL) 3801 return error; 3802 3803 ifnet_serialize_all(ifp); 3804 for (i = 0; i < sc->tx_ring_cnt; ++i) 3805 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3806 ifnet_deserialize_all(ifp); 3807 3808 return 0; 3809 } 3810 3811 #ifdef IFPOLL_ENABLE 3812 3813 static int 3814 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3815 { 3816 struct emx_softc *sc = (void *)arg1; 3817 struct ifnet *ifp = &sc->arpcom.ac_if; 3818 int error, off; 3819 3820 off = sc->rx_npoll_off; 3821 error = sysctl_handle_int(oidp, &off, 0, req); 3822 if (error || req->newptr == NULL) 3823 return error; 3824 if (off < 0) 3825 return EINVAL; 3826 3827 ifnet_serialize_all(ifp); 3828 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3829 error = EINVAL; 3830 } else { 3831 error = 0; 3832 sc->rx_npoll_off = off; 3833 } 3834 ifnet_deserialize_all(ifp); 3835 3836 return error; 3837 } 3838 3839 static int 3840 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3841 { 3842 struct emx_softc *sc = (void *)arg1; 3843 struct ifnet *ifp = &sc->arpcom.ac_if; 3844 int error, off; 3845 3846 off = sc->tx_npoll_off; 3847 error = sysctl_handle_int(oidp, &off, 0, req); 3848 if (error || req->newptr == NULL) 3849 return error; 3850 if (off < 0) 3851 return EINVAL; 3852 3853 ifnet_serialize_all(ifp); 3854 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3855 error = EINVAL; 3856 } else { 3857 error = 0; 3858 sc->tx_npoll_off = off; 3859 } 3860 ifnet_deserialize_all(ifp); 3861 3862 return error; 3863 } 3864 3865 #endif /* IFPOLL_ENABLE */ 3866 3867 static int 3868 emx_dma_alloc(struct emx_softc *sc) 3869 { 3870 int error, i; 3871 3872 /* 3873 * Create top level busdma tag 3874 */ 3875 error = bus_dma_tag_create(NULL, 1, 0, 3876 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3877 NULL, NULL, 3878 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3879 0, &sc->parent_dtag); 3880 if (error) { 3881 device_printf(sc->dev, "could not create top level DMA tag\n"); 3882 return error; 3883 } 3884 3885 /* 3886 * Allocate transmit descriptors ring and buffers 3887 */ 3888 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3889 error = emx_create_tx_ring(&sc->tx_data[i]); 3890 if (error) { 3891 device_printf(sc->dev, 3892 "Could not setup transmit structures\n"); 3893 return error; 3894 } 3895 } 3896 3897 /* 3898 * Allocate receive descriptors ring and buffers 3899 */ 3900 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3901 error = emx_create_rx_ring(&sc->rx_data[i]); 3902 if (error) { 3903 device_printf(sc->dev, 3904 "Could not setup receive structures\n"); 3905 return error; 3906 } 3907 } 3908 return 0; 3909 } 3910 3911 static void 3912 emx_dma_free(struct emx_softc *sc) 3913 { 3914 int i; 3915 3916 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3917 emx_destroy_tx_ring(&sc->tx_data[i], 3918 sc->tx_data[i].num_tx_desc); 3919 } 3920 3921 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3922 emx_destroy_rx_ring(&sc->rx_data[i], 3923 sc->rx_data[i].num_rx_desc); 3924 } 3925 3926 /* Free top level busdma tag */ 3927 if (sc->parent_dtag != NULL) 3928 bus_dma_tag_destroy(sc->parent_dtag); 3929 } 3930 3931 static void 3932 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3933 { 3934 struct emx_softc *sc = ifp->if_softc; 3935 3936 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 3937 } 3938 3939 static void 3940 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3941 { 3942 struct emx_softc *sc = ifp->if_softc; 3943 3944 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 3945 } 3946 3947 static int 3948 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3949 { 3950 struct emx_softc *sc = ifp->if_softc; 3951 3952 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 3953 } 3954 3955 static void 3956 emx_serialize_skipmain(struct emx_softc *sc) 3957 { 3958 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3959 } 3960 3961 static void 3962 emx_deserialize_skipmain(struct emx_softc *sc) 3963 { 3964 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3965 } 3966 3967 #ifdef INVARIANTS 3968 3969 static void 3970 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3971 boolean_t serialized) 3972 { 3973 struct emx_softc *sc = ifp->if_softc; 3974 3975 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 3976 slz, serialized); 3977 } 3978 3979 #endif /* INVARIANTS */ 3980 3981 #ifdef IFPOLL_ENABLE 3982 3983 static void 3984 emx_npoll_status(struct ifnet *ifp) 3985 { 3986 struct emx_softc *sc = ifp->if_softc; 3987 uint32_t reg_icr; 3988 3989 ASSERT_SERIALIZED(&sc->main_serialize); 3990 3991 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3992 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3993 callout_stop(&sc->timer); 3994 sc->hw.mac.get_link_status = 1; 3995 emx_update_link_status(sc); 3996 callout_reset(&sc->timer, hz, emx_timer, sc); 3997 } 3998 } 3999 4000 static void 4001 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4002 { 4003 struct emx_txdata *tdata = arg; 4004 4005 ASSERT_SERIALIZED(&tdata->tx_serialize); 4006 4007 emx_txeof(tdata); 4008 if (!ifsq_is_empty(tdata->ifsq)) 4009 ifsq_devstart(tdata->ifsq); 4010 } 4011 4012 static void 4013 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4014 { 4015 struct emx_rxdata *rdata = arg; 4016 4017 ASSERT_SERIALIZED(&rdata->rx_serialize); 4018 4019 emx_rxeof(rdata, cycle); 4020 } 4021 4022 static void 4023 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4024 { 4025 struct emx_softc *sc = ifp->if_softc; 4026 int i, txr_cnt; 4027 4028 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4029 4030 if (info) { 4031 int off; 4032 4033 info->ifpi_status.status_func = emx_npoll_status; 4034 info->ifpi_status.serializer = &sc->main_serialize; 4035 4036 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4037 off = sc->tx_npoll_off; 4038 for (i = 0; i < txr_cnt; ++i) { 4039 struct emx_txdata *tdata = &sc->tx_data[i]; 4040 int idx = i + off; 4041 4042 KKASSERT(idx < ncpus2); 4043 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4044 info->ifpi_tx[idx].arg = tdata; 4045 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4046 ifsq_set_cpuid(tdata->ifsq, idx); 4047 } 4048 4049 off = sc->rx_npoll_off; 4050 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4051 struct emx_rxdata *rdata = &sc->rx_data[i]; 4052 int idx = i + off; 4053 4054 KKASSERT(idx < ncpus2); 4055 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4056 info->ifpi_rx[idx].arg = rdata; 4057 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4058 } 4059 4060 if (ifp->if_flags & IFF_RUNNING) { 4061 if (txr_cnt == sc->tx_ring_inuse) 4062 emx_disable_intr(sc); 4063 else 4064 emx_init(sc); 4065 } 4066 } else { 4067 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4068 struct emx_txdata *tdata = &sc->tx_data[i]; 4069 4070 ifsq_set_cpuid(tdata->ifsq, 4071 rman_get_cpuid(sc->intr_res)); 4072 } 4073 4074 if (ifp->if_flags & IFF_RUNNING) { 4075 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4076 if (txr_cnt == sc->tx_ring_inuse) 4077 emx_enable_intr(sc); 4078 else 4079 emx_init(sc); 4080 } 4081 } 4082 } 4083 4084 #endif /* IFPOLL_ENABLE */ 4085 4086 static void 4087 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4088 { 4089 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4090 if (sc->hw.mac.type == e1000_82574) { 4091 int i; 4092 4093 /* 4094 * When using MSIX interrupts we need to 4095 * throttle using the EITR register 4096 */ 4097 for (i = 0; i < 4; ++i) 4098 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4099 } 4100 } 4101 4102 /* 4103 * Disable the L0s, 82574L Errata #20 4104 */ 4105 static void 4106 emx_disable_aspm(struct emx_softc *sc) 4107 { 4108 uint16_t link_cap, link_ctrl, disable; 4109 uint8_t pcie_ptr, reg; 4110 device_t dev = sc->dev; 4111 4112 switch (sc->hw.mac.type) { 4113 case e1000_82571: 4114 case e1000_82572: 4115 case e1000_82573: 4116 /* 4117 * 82573 specification update 4118 * errata #8 disable L0s 4119 * errata #41 disable L1 4120 * 4121 * 82571/82572 specification update 4122 # errata #13 disable L1 4123 * errata #68 disable L0s 4124 */ 4125 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4126 break; 4127 4128 case e1000_82574: 4129 /* 4130 * 82574 specification update errata #20 4131 * 4132 * There is no need to disable L1 4133 */ 4134 disable = PCIEM_LNKCTL_ASPM_L0S; 4135 break; 4136 4137 default: 4138 return; 4139 } 4140 4141 pcie_ptr = pci_get_pciecap_ptr(dev); 4142 if (pcie_ptr == 0) 4143 return; 4144 4145 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4146 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4147 return; 4148 4149 if (bootverbose) 4150 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4151 4152 reg = pcie_ptr + PCIER_LINKCTRL; 4153 link_ctrl = pci_read_config(dev, reg, 2); 4154 link_ctrl &= ~disable; 4155 pci_write_config(dev, reg, link_ctrl, 2); 4156 } 4157 4158 static int 4159 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4160 { 4161 int iphlen, hoff, thoff, ex = 0; 4162 struct mbuf *m; 4163 struct ip *ip; 4164 4165 m = *mp; 4166 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4167 4168 iphlen = m->m_pkthdr.csum_iphlen; 4169 thoff = m->m_pkthdr.csum_thlen; 4170 hoff = m->m_pkthdr.csum_lhlen; 4171 4172 KASSERT(iphlen > 0, ("invalid ip hlen")); 4173 KASSERT(thoff > 0, ("invalid tcp hlen")); 4174 KASSERT(hoff > 0, ("invalid ether hlen")); 4175 4176 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4177 ex = 4; 4178 4179 if (m->m_len < hoff + iphlen + thoff + ex) { 4180 m = m_pullup(m, hoff + iphlen + thoff + ex); 4181 if (m == NULL) { 4182 *mp = NULL; 4183 return ENOBUFS; 4184 } 4185 *mp = m; 4186 } 4187 ip = mtodoff(m, struct ip *, hoff); 4188 ip->ip_len = 0; 4189 4190 return 0; 4191 } 4192 4193 static int 4194 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4195 uint32_t *txd_upper, uint32_t *txd_lower) 4196 { 4197 struct e1000_context_desc *TXD; 4198 int hoff, iphlen, thoff, hlen; 4199 int mss, pktlen, curr_txd; 4200 4201 #ifdef EMX_TSO_DEBUG 4202 tdata->tso_segments++; 4203 #endif 4204 4205 iphlen = mp->m_pkthdr.csum_iphlen; 4206 thoff = mp->m_pkthdr.csum_thlen; 4207 hoff = mp->m_pkthdr.csum_lhlen; 4208 mss = mp->m_pkthdr.tso_segsz; 4209 pktlen = mp->m_pkthdr.len; 4210 4211 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4212 tdata->csum_flags == CSUM_TSO && 4213 tdata->csum_iphlen == iphlen && 4214 tdata->csum_lhlen == hoff && 4215 tdata->csum_thlen == thoff && 4216 tdata->csum_mss == mss && 4217 tdata->csum_pktlen == pktlen) { 4218 *txd_upper = tdata->csum_txd_upper; 4219 *txd_lower = tdata->csum_txd_lower; 4220 #ifdef EMX_TSO_DEBUG 4221 tdata->tso_ctx_reused++; 4222 #endif 4223 return 0; 4224 } 4225 hlen = hoff + iphlen + thoff; 4226 4227 /* 4228 * Setup a new TSO context. 4229 */ 4230 4231 curr_txd = tdata->next_avail_tx_desc; 4232 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4233 4234 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4235 E1000_TXD_DTYP_D | /* Data descr type */ 4236 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4237 4238 /* IP and/or TCP header checksum calculation and insertion. */ 4239 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4240 4241 /* 4242 * Start offset for header checksum calculation. 4243 * End offset for header checksum calculation. 4244 * Offset of place put the checksum. 4245 */ 4246 TXD->lower_setup.ip_fields.ipcss = hoff; 4247 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4248 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4249 4250 /* 4251 * Start offset for payload checksum calculation. 4252 * End offset for payload checksum calculation. 4253 * Offset of place to put the checksum. 4254 */ 4255 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4256 TXD->upper_setup.tcp_fields.tucse = 0; 4257 TXD->upper_setup.tcp_fields.tucso = 4258 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4259 4260 /* 4261 * Payload size per packet w/o any headers. 4262 * Length of all headers up to payload. 4263 */ 4264 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4265 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4266 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4267 E1000_TXD_CMD_DEXT | /* Extended descr */ 4268 E1000_TXD_CMD_TSE | /* TSE context */ 4269 E1000_TXD_CMD_IP | /* Do IP csum */ 4270 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4271 (pktlen - hlen)); /* Total len */ 4272 4273 /* Save the information for this TSO context */ 4274 tdata->csum_flags = CSUM_TSO; 4275 tdata->csum_lhlen = hoff; 4276 tdata->csum_iphlen = iphlen; 4277 tdata->csum_thlen = thoff; 4278 tdata->csum_mss = mss; 4279 tdata->csum_pktlen = pktlen; 4280 tdata->csum_txd_upper = *txd_upper; 4281 tdata->csum_txd_lower = *txd_lower; 4282 4283 if (++curr_txd == tdata->num_tx_desc) 4284 curr_txd = 0; 4285 4286 KKASSERT(tdata->num_tx_desc_avail > 0); 4287 tdata->num_tx_desc_avail--; 4288 4289 tdata->next_avail_tx_desc = curr_txd; 4290 return 1; 4291 } 4292 4293 static int 4294 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4295 { 4296 if (polling) 4297 return sc->tx_ring_cnt; 4298 else 4299 return 1; 4300 } 4301