1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 179 /* required last entry */ 180 EMX_DEVICE_NULL 181 }; 182 183 static int emx_probe(device_t); 184 static int emx_attach(device_t); 185 static int emx_detach(device_t); 186 static int emx_shutdown(device_t); 187 static int emx_suspend(device_t); 188 static int emx_resume(device_t); 189 190 static void emx_init(void *); 191 static void emx_stop(struct emx_softc *); 192 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 193 static void emx_start(struct ifnet *, struct ifaltq_subque *); 194 #ifdef IFPOLL_ENABLE 195 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 196 static void emx_npoll_status(struct ifnet *); 197 static void emx_npoll_tx(struct ifnet *, void *, int); 198 static void emx_npoll_rx(struct ifnet *, void *, int); 199 #endif 200 static void emx_watchdog(struct ifaltq_subque *); 201 static void emx_media_status(struct ifnet *, struct ifmediareq *); 202 static int emx_media_change(struct ifnet *); 203 static void emx_timer(void *); 204 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 205 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 206 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 207 #ifdef INVARIANTS 208 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 209 boolean_t); 210 #endif 211 212 static void emx_intr(void *); 213 static void emx_intr_mask(void *); 214 static void emx_intr_body(struct emx_softc *, boolean_t); 215 static void emx_rxeof(struct emx_rxdata *, int); 216 static void emx_txeof(struct emx_txdata *); 217 static void emx_tx_collect(struct emx_txdata *); 218 static void emx_tx_purge(struct emx_softc *); 219 static void emx_enable_intr(struct emx_softc *); 220 static void emx_disable_intr(struct emx_softc *); 221 222 static int emx_dma_alloc(struct emx_softc *); 223 static void emx_dma_free(struct emx_softc *); 224 static void emx_init_tx_ring(struct emx_txdata *); 225 static int emx_init_rx_ring(struct emx_rxdata *); 226 static void emx_free_tx_ring(struct emx_txdata *); 227 static void emx_free_rx_ring(struct emx_rxdata *); 228 static int emx_create_tx_ring(struct emx_txdata *); 229 static int emx_create_rx_ring(struct emx_rxdata *); 230 static void emx_destroy_tx_ring(struct emx_txdata *, int); 231 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 232 static int emx_newbuf(struct emx_rxdata *, int, int); 233 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 234 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 235 uint32_t *, uint32_t *); 236 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 237 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 238 uint32_t *, uint32_t *); 239 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 240 241 static int emx_is_valid_eaddr(const uint8_t *); 242 static int emx_reset(struct emx_softc *); 243 static void emx_setup_ifp(struct emx_softc *); 244 static void emx_init_tx_unit(struct emx_softc *); 245 static void emx_init_rx_unit(struct emx_softc *); 246 static void emx_update_stats(struct emx_softc *); 247 static void emx_set_promisc(struct emx_softc *); 248 static void emx_disable_promisc(struct emx_softc *); 249 static void emx_set_multi(struct emx_softc *); 250 static void emx_update_link_status(struct emx_softc *); 251 static void emx_smartspeed(struct emx_softc *); 252 static void emx_set_itr(struct emx_softc *, uint32_t); 253 static void emx_disable_aspm(struct emx_softc *); 254 255 static void emx_print_debug_info(struct emx_softc *); 256 static void emx_print_nvm_info(struct emx_softc *); 257 static void emx_print_hw_stats(struct emx_softc *); 258 259 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 261 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 262 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 263 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 264 static void emx_add_sysctl(struct emx_softc *); 265 266 static void emx_serialize_skipmain(struct emx_softc *); 267 static void emx_deserialize_skipmain(struct emx_softc *); 268 269 /* Management and WOL Support */ 270 static void emx_get_mgmt(struct emx_softc *); 271 static void emx_rel_mgmt(struct emx_softc *); 272 static void emx_get_hw_control(struct emx_softc *); 273 static void emx_rel_hw_control(struct emx_softc *); 274 static void emx_enable_wol(device_t); 275 276 static device_method_t emx_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, emx_probe), 279 DEVMETHOD(device_attach, emx_attach), 280 DEVMETHOD(device_detach, emx_detach), 281 DEVMETHOD(device_shutdown, emx_shutdown), 282 DEVMETHOD(device_suspend, emx_suspend), 283 DEVMETHOD(device_resume, emx_resume), 284 DEVMETHOD_END 285 }; 286 287 static driver_t emx_driver = { 288 "emx", 289 emx_methods, 290 sizeof(struct emx_softc), 291 }; 292 293 static devclass_t emx_devclass; 294 295 DECLARE_DUMMY_MODULE(if_emx); 296 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 297 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 298 299 /* 300 * Tunables 301 */ 302 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 303 static int emx_rxd = EMX_DEFAULT_RXD; 304 static int emx_txd = EMX_DEFAULT_TXD; 305 static int emx_smart_pwr_down = 0; 306 static int emx_rxr = 0; 307 static int emx_txr = 1; 308 309 /* Controls whether promiscuous also shows bad packets */ 310 static int emx_debug_sbp = 0; 311 312 static int emx_82573_workaround = 1; 313 static int emx_msi_enable = 1; 314 315 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 316 317 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 318 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 319 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 320 TUNABLE_INT("hw.emx.txd", &emx_txd); 321 TUNABLE_INT("hw.emx.txr", &emx_txr); 322 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 323 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 324 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 325 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 326 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 327 328 /* Global used in WOL setup with multiport cards */ 329 static int emx_global_quad_port_a = 0; 330 331 /* Set this to one to display debug statistics */ 332 static int emx_display_debug_stats = 0; 333 334 #if !defined(KTR_IF_EMX) 335 #define KTR_IF_EMX KTR_ALL 336 #endif 337 KTR_INFO_MASTER(if_emx); 338 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 339 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 340 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 341 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 342 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 343 #define logif(name) KTR_LOG(if_emx_ ## name) 344 345 static __inline void 346 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 347 { 348 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 349 /* DD bit must be cleared */ 350 rxd->rxd_staterr = 0; 351 } 352 353 static __inline void 354 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 355 { 356 /* Ignore Checksum bit is set */ 357 if (staterr & E1000_RXD_STAT_IXSM) 358 return; 359 360 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 361 E1000_RXD_STAT_IPCS) 362 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 363 364 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 365 E1000_RXD_STAT_TCPCS) { 366 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 367 CSUM_PSEUDO_HDR | 368 CSUM_FRAG_NOT_CHECKED; 369 mp->m_pkthdr.csum_data = htons(0xffff); 370 } 371 } 372 373 static __inline struct pktinfo * 374 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 375 uint32_t mrq, uint32_t hash, uint32_t staterr) 376 { 377 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 378 case EMX_RXDMRQ_IPV4_TCP: 379 pi->pi_netisr = NETISR_IP; 380 pi->pi_flags = 0; 381 pi->pi_l3proto = IPPROTO_TCP; 382 break; 383 384 case EMX_RXDMRQ_IPV6_TCP: 385 pi->pi_netisr = NETISR_IPV6; 386 pi->pi_flags = 0; 387 pi->pi_l3proto = IPPROTO_TCP; 388 break; 389 390 case EMX_RXDMRQ_IPV4: 391 if (staterr & E1000_RXD_STAT_IXSM) 392 return NULL; 393 394 if ((staterr & 395 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 396 E1000_RXD_STAT_TCPCS) { 397 pi->pi_netisr = NETISR_IP; 398 pi->pi_flags = 0; 399 pi->pi_l3proto = IPPROTO_UDP; 400 break; 401 } 402 /* FALL THROUGH */ 403 default: 404 return NULL; 405 } 406 407 m_sethash(m, toeplitz_hash(hash)); 408 return pi; 409 } 410 411 static int 412 emx_probe(device_t dev) 413 { 414 const struct emx_device *d; 415 uint16_t vid, did; 416 417 vid = pci_get_vendor(dev); 418 did = pci_get_device(dev); 419 420 for (d = emx_devices; d->desc != NULL; ++d) { 421 if (vid == d->vid && did == d->did) { 422 device_set_desc(dev, d->desc); 423 device_set_async_attach(dev, TRUE); 424 return 0; 425 } 426 } 427 return ENXIO; 428 } 429 430 static int 431 emx_attach(device_t dev) 432 { 433 struct emx_softc *sc = device_get_softc(dev); 434 int error = 0, i, throttle, msi_enable; 435 int tx_ring_max, ring_cnt; 436 u_int intr_flags; 437 uint16_t eeprom_data, device_id, apme_mask; 438 driver_intr_t *intr_func; 439 char flowctrl[IFM_ETH_FC_STRLEN]; 440 441 /* 442 * Setup RX rings 443 */ 444 for (i = 0; i < EMX_NRX_RING; ++i) { 445 sc->rx_data[i].sc = sc; 446 sc->rx_data[i].idx = i; 447 } 448 449 /* 450 * Setup TX ring 451 */ 452 for (i = 0; i < EMX_NTX_RING; ++i) { 453 sc->tx_data[i].sc = sc; 454 sc->tx_data[i].idx = i; 455 } 456 457 /* 458 * Initialize serializers 459 */ 460 lwkt_serialize_init(&sc->main_serialize); 461 for (i = 0; i < EMX_NTX_RING; ++i) 462 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 463 for (i = 0; i < EMX_NRX_RING; ++i) 464 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 465 466 /* 467 * Initialize serializer array 468 */ 469 i = 0; 470 471 KKASSERT(i < EMX_NSERIALIZE); 472 sc->serializes[i++] = &sc->main_serialize; 473 474 KKASSERT(i < EMX_NSERIALIZE); 475 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 476 KKASSERT(i < EMX_NSERIALIZE); 477 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 478 479 KKASSERT(i < EMX_NSERIALIZE); 480 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 481 KKASSERT(i < EMX_NSERIALIZE); 482 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 483 484 KKASSERT(i == EMX_NSERIALIZE); 485 486 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 487 emx_media_change, emx_media_status); 488 callout_init_mp(&sc->timer); 489 490 sc->dev = sc->osdep.dev = dev; 491 492 /* 493 * Determine hardware and mac type 494 */ 495 sc->hw.vendor_id = pci_get_vendor(dev); 496 sc->hw.device_id = pci_get_device(dev); 497 sc->hw.revision_id = pci_get_revid(dev); 498 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 499 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 500 501 if (e1000_set_mac_type(&sc->hw)) 502 return ENXIO; 503 504 /* Enable bus mastering */ 505 pci_enable_busmaster(dev); 506 507 /* 508 * Allocate IO memory 509 */ 510 sc->memory_rid = EMX_BAR_MEM; 511 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 512 &sc->memory_rid, RF_ACTIVE); 513 if (sc->memory == NULL) { 514 device_printf(dev, "Unable to allocate bus resource: memory\n"); 515 error = ENXIO; 516 goto fail; 517 } 518 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 519 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 520 521 /* XXX This is quite goofy, it is not actually used */ 522 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 523 524 /* 525 * Don't enable MSI-X on 82574, see: 526 * 82574 specification update errata #15 527 * 528 * Don't enable MSI on 82571/82572, see: 529 * 82571/82572 specification update errata #63 530 */ 531 msi_enable = emx_msi_enable; 532 if (msi_enable && 533 (sc->hw.mac.type == e1000_82571 || 534 sc->hw.mac.type == e1000_82572)) 535 msi_enable = 0; 536 again: 537 /* 538 * Allocate interrupt 539 */ 540 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 541 &sc->intr_rid, &intr_flags); 542 543 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 544 int unshared; 545 546 unshared = device_getenv_int(dev, "irq.unshared", 0); 547 if (!unshared) { 548 sc->flags |= EMX_FLAG_SHARED_INTR; 549 if (bootverbose) 550 device_printf(dev, "IRQ shared\n"); 551 } else { 552 intr_flags &= ~RF_SHAREABLE; 553 if (bootverbose) 554 device_printf(dev, "IRQ unshared\n"); 555 } 556 } 557 558 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 559 intr_flags); 560 if (sc->intr_res == NULL) { 561 device_printf(dev, "Unable to allocate bus resource: %s\n", 562 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 563 if (!msi_enable) { 564 /* Retry with MSI. */ 565 msi_enable = 1; 566 sc->flags &= ~EMX_FLAG_SHARED_INTR; 567 goto again; 568 } 569 error = ENXIO; 570 goto fail; 571 } 572 573 /* Save PCI command register for Shared Code */ 574 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 575 sc->hw.back = &sc->osdep; 576 577 /* 578 * For I217/I218, we need to map the flash memory and this 579 * must happen after the MAC is identified. 580 */ 581 if (sc->hw.mac.type == e1000_pch_lpt) { 582 sc->flash_rid = EMX_BAR_FLASH; 583 584 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 585 &sc->flash_rid, RF_ACTIVE); 586 if (sc->flash == NULL) { 587 device_printf(dev, "Mapping of Flash failed\n"); 588 error = ENXIO; 589 goto fail; 590 } 591 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 592 sc->osdep.flash_bus_space_handle = 593 rman_get_bushandle(sc->flash); 594 595 /* 596 * This is used in the shared code 597 * XXX this goof is actually not used. 598 */ 599 sc->hw.flash_address = (uint8_t *)sc->flash; 600 } 601 602 /* Do Shared Code initialization */ 603 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 604 device_printf(dev, "Setup of Shared code failed\n"); 605 error = ENXIO; 606 goto fail; 607 } 608 e1000_get_bus_info(&sc->hw); 609 610 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 611 sc->hw.phy.autoneg_wait_to_complete = FALSE; 612 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 613 614 /* 615 * Interrupt throttle rate 616 */ 617 throttle = device_getenv_int(dev, "int_throttle_ceil", 618 emx_int_throttle_ceil); 619 if (throttle == 0) { 620 sc->int_throttle_ceil = 0; 621 } else { 622 if (throttle < 0) 623 throttle = EMX_DEFAULT_ITR; 624 625 /* Recalculate the tunable value to get the exact frequency. */ 626 throttle = 1000000000 / 256 / throttle; 627 628 /* Upper 16bits of ITR is reserved and should be zero */ 629 if (throttle & 0xffff0000) 630 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 631 632 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 633 } 634 635 e1000_init_script_state_82541(&sc->hw, TRUE); 636 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 637 638 /* Copper options */ 639 if (sc->hw.phy.media_type == e1000_media_type_copper) { 640 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 641 sc->hw.phy.disable_polarity_correction = FALSE; 642 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 643 } 644 645 /* Set the frame limits assuming standard ethernet sized frames. */ 646 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 647 648 /* This controls when hardware reports transmit completion status. */ 649 sc->hw.mac.report_tx_early = 1; 650 651 /* 652 * Calculate # of RX/TX rings 653 */ 654 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 655 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 656 657 tx_ring_max = 1; 658 if (sc->hw.mac.type == e1000_82571 || 659 sc->hw.mac.type == e1000_82572 || 660 sc->hw.mac.type == e1000_80003es2lan || 661 sc->hw.mac.type == e1000_pch_lpt || 662 sc->hw.mac.type == e1000_pch_spt || 663 sc->hw.mac.type == e1000_82574) 664 tx_ring_max = EMX_NTX_RING; 665 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 666 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 667 668 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 669 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 670 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 671 672 /* Allocate RX/TX rings' busdma(9) stuffs */ 673 error = emx_dma_alloc(sc); 674 if (error) 675 goto fail; 676 677 /* Allocate multicast array memory. */ 678 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 679 M_DEVBUF, M_WAITOK); 680 681 /* Indicate SOL/IDER usage */ 682 if (e1000_check_reset_block(&sc->hw)) { 683 device_printf(dev, 684 "PHY reset is blocked due to SOL/IDER session.\n"); 685 } 686 687 /* Disable EEE on I217/I218 */ 688 sc->hw.dev_spec.ich8lan.eee_disable = 1; 689 690 /* 691 * Start from a known state, this is important in reading the 692 * nvm and mac from that. 693 */ 694 e1000_reset_hw(&sc->hw); 695 696 /* Make sure we have a good EEPROM before we read from it */ 697 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 698 /* 699 * Some PCI-E parts fail the first check due to 700 * the link being in sleep state, call it again, 701 * if it fails a second time its a real issue. 702 */ 703 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 704 device_printf(dev, 705 "The EEPROM Checksum Is Not Valid\n"); 706 error = EIO; 707 goto fail; 708 } 709 } 710 711 /* Copy the permanent MAC address out of the EEPROM */ 712 if (e1000_read_mac_addr(&sc->hw) < 0) { 713 device_printf(dev, "EEPROM read error while reading MAC" 714 " address\n"); 715 error = EIO; 716 goto fail; 717 } 718 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 719 device_printf(dev, "Invalid MAC address\n"); 720 error = EIO; 721 goto fail; 722 } 723 724 /* Disable ULP support */ 725 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 726 727 /* Determine if we have to control management hardware */ 728 if (e1000_enable_mng_pass_thru(&sc->hw)) 729 sc->flags |= EMX_FLAG_HAS_MGMT; 730 731 /* 732 * Setup Wake-on-Lan 733 */ 734 apme_mask = EMX_EEPROM_APME; 735 eeprom_data = 0; 736 switch (sc->hw.mac.type) { 737 case e1000_82573: 738 sc->flags |= EMX_FLAG_HAS_AMT; 739 /* FALL THROUGH */ 740 741 case e1000_82571: 742 case e1000_82572: 743 case e1000_80003es2lan: 744 if (sc->hw.bus.func == 1) { 745 e1000_read_nvm(&sc->hw, 746 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 747 } else { 748 e1000_read_nvm(&sc->hw, 749 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 750 } 751 break; 752 753 default: 754 e1000_read_nvm(&sc->hw, 755 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 756 break; 757 } 758 if (eeprom_data & apme_mask) 759 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 760 761 /* 762 * We have the eeprom settings, now apply the special cases 763 * where the eeprom may be wrong or the board won't support 764 * wake on lan on a particular port 765 */ 766 device_id = pci_get_device(dev); 767 switch (device_id) { 768 case E1000_DEV_ID_82571EB_FIBER: 769 /* 770 * Wake events only supported on port A for dual fiber 771 * regardless of eeprom setting 772 */ 773 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 774 E1000_STATUS_FUNC_1) 775 sc->wol = 0; 776 break; 777 778 case E1000_DEV_ID_82571EB_QUAD_COPPER: 779 case E1000_DEV_ID_82571EB_QUAD_FIBER: 780 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 781 /* if quad port sc, disable WoL on all but port A */ 782 if (emx_global_quad_port_a != 0) 783 sc->wol = 0; 784 /* Reset for multiple quad port adapters */ 785 if (++emx_global_quad_port_a == 4) 786 emx_global_quad_port_a = 0; 787 break; 788 } 789 790 /* XXX disable wol */ 791 sc->wol = 0; 792 793 /* Initialized #of TX rings to use. */ 794 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 795 796 /* Setup flow control. */ 797 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 798 emx_flowctrl); 799 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 800 801 /* Setup OS specific network interface */ 802 emx_setup_ifp(sc); 803 804 /* Add sysctl tree, must after em_setup_ifp() */ 805 emx_add_sysctl(sc); 806 807 /* Reset the hardware */ 808 error = emx_reset(sc); 809 if (error) { 810 /* 811 * Some 82573 parts fail the first reset, call it again, 812 * if it fails a second time its a real issue. 813 */ 814 error = emx_reset(sc); 815 if (error) { 816 device_printf(dev, "Unable to reset the hardware\n"); 817 ether_ifdetach(&sc->arpcom.ac_if); 818 goto fail; 819 } 820 } 821 822 /* Initialize statistics */ 823 emx_update_stats(sc); 824 825 sc->hw.mac.get_link_status = 1; 826 emx_update_link_status(sc); 827 828 /* Non-AMT based hardware can now take control from firmware */ 829 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 830 EMX_FLAG_HAS_MGMT) 831 emx_get_hw_control(sc); 832 833 /* 834 * Missing Interrupt Following ICR read: 835 * 836 * 82571/82572 specification update errata #76 837 * 82573 specification update errata #31 838 * 82574 specification update errata #12 839 */ 840 intr_func = emx_intr; 841 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 842 (sc->hw.mac.type == e1000_82571 || 843 sc->hw.mac.type == e1000_82572 || 844 sc->hw.mac.type == e1000_82573 || 845 sc->hw.mac.type == e1000_82574)) 846 intr_func = emx_intr_mask; 847 848 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 849 &sc->intr_tag, &sc->main_serialize); 850 if (error) { 851 device_printf(dev, "Failed to register interrupt handler"); 852 ether_ifdetach(&sc->arpcom.ac_if); 853 goto fail; 854 } 855 return (0); 856 fail: 857 emx_detach(dev); 858 return (error); 859 } 860 861 static int 862 emx_detach(device_t dev) 863 { 864 struct emx_softc *sc = device_get_softc(dev); 865 866 if (device_is_attached(dev)) { 867 struct ifnet *ifp = &sc->arpcom.ac_if; 868 869 ifnet_serialize_all(ifp); 870 871 emx_stop(sc); 872 873 e1000_phy_hw_reset(&sc->hw); 874 875 emx_rel_mgmt(sc); 876 emx_rel_hw_control(sc); 877 878 if (sc->wol) { 879 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 880 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 881 emx_enable_wol(dev); 882 } 883 884 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 885 886 ifnet_deserialize_all(ifp); 887 888 ether_ifdetach(ifp); 889 } else if (sc->memory != NULL) { 890 emx_rel_hw_control(sc); 891 } 892 893 ifmedia_removeall(&sc->media); 894 bus_generic_detach(dev); 895 896 if (sc->intr_res != NULL) { 897 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 898 sc->intr_res); 899 } 900 901 if (sc->intr_type == PCI_INTR_TYPE_MSI) 902 pci_release_msi(dev); 903 904 if (sc->memory != NULL) { 905 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 906 sc->memory); 907 } 908 909 if (sc->flash != NULL) { 910 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 911 sc->flash); 912 } 913 914 emx_dma_free(sc); 915 916 if (sc->mta != NULL) 917 kfree(sc->mta, M_DEVBUF); 918 919 if (sc->rx_rmap != NULL) 920 if_ringmap_free(sc->rx_rmap); 921 if (sc->tx_rmap != NULL) 922 if_ringmap_free(sc->tx_rmap); 923 924 return (0); 925 } 926 927 static int 928 emx_shutdown(device_t dev) 929 { 930 return emx_suspend(dev); 931 } 932 933 static int 934 emx_suspend(device_t dev) 935 { 936 struct emx_softc *sc = device_get_softc(dev); 937 struct ifnet *ifp = &sc->arpcom.ac_if; 938 939 ifnet_serialize_all(ifp); 940 941 emx_stop(sc); 942 943 emx_rel_mgmt(sc); 944 emx_rel_hw_control(sc); 945 946 if (sc->wol) { 947 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 948 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 949 emx_enable_wol(dev); 950 } 951 952 ifnet_deserialize_all(ifp); 953 954 return bus_generic_suspend(dev); 955 } 956 957 static int 958 emx_resume(device_t dev) 959 { 960 struct emx_softc *sc = device_get_softc(dev); 961 struct ifnet *ifp = &sc->arpcom.ac_if; 962 int i; 963 964 ifnet_serialize_all(ifp); 965 966 emx_init(sc); 967 emx_get_mgmt(sc); 968 for (i = 0; i < sc->tx_ring_inuse; ++i) 969 ifsq_devstart_sched(sc->tx_data[i].ifsq); 970 971 ifnet_deserialize_all(ifp); 972 973 return bus_generic_resume(dev); 974 } 975 976 static void 977 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 978 { 979 struct emx_softc *sc = ifp->if_softc; 980 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 981 struct mbuf *m_head; 982 int idx = -1, nsegs = 0; 983 984 KKASSERT(tdata->ifsq == ifsq); 985 ASSERT_SERIALIZED(&tdata->tx_serialize); 986 987 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 988 return; 989 990 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 991 ifsq_purge(ifsq); 992 return; 993 } 994 995 while (!ifsq_is_empty(ifsq)) { 996 /* Now do we at least have a minimal? */ 997 if (EMX_IS_OACTIVE(tdata)) { 998 emx_tx_collect(tdata); 999 if (EMX_IS_OACTIVE(tdata)) { 1000 ifsq_set_oactive(ifsq); 1001 break; 1002 } 1003 } 1004 1005 logif(pkt_txqueue); 1006 m_head = ifsq_dequeue(ifsq); 1007 if (m_head == NULL) 1008 break; 1009 1010 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1011 IFNET_STAT_INC(ifp, oerrors, 1); 1012 emx_tx_collect(tdata); 1013 continue; 1014 } 1015 1016 /* 1017 * TX interrupt are aggressively aggregated, so increasing 1018 * opackets at TX interrupt time will make the opackets 1019 * statistics vastly inaccurate; we do the opackets increment 1020 * now. 1021 */ 1022 IFNET_STAT_INC(ifp, opackets, 1); 1023 1024 if (nsegs >= tdata->tx_wreg_nsegs) { 1025 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1026 nsegs = 0; 1027 idx = -1; 1028 } 1029 1030 /* Send a copy of the frame to the BPF listener */ 1031 ETHER_BPF_MTAP(ifp, m_head); 1032 1033 /* Set timeout in case hardware has problems transmitting. */ 1034 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1035 } 1036 if (idx >= 0) 1037 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1038 } 1039 1040 static int 1041 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1042 { 1043 struct emx_softc *sc = ifp->if_softc; 1044 struct ifreq *ifr = (struct ifreq *)data; 1045 uint16_t eeprom_data = 0; 1046 int max_frame_size, mask, reinit; 1047 int error = 0; 1048 1049 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1050 1051 switch (command) { 1052 case SIOCSIFMTU: 1053 switch (sc->hw.mac.type) { 1054 case e1000_82573: 1055 /* 1056 * 82573 only supports jumbo frames 1057 * if ASPM is disabled. 1058 */ 1059 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1060 &eeprom_data); 1061 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1062 max_frame_size = ETHER_MAX_LEN; 1063 break; 1064 } 1065 /* FALL THROUGH */ 1066 1067 /* Limit Jumbo Frame size */ 1068 case e1000_82571: 1069 case e1000_82572: 1070 case e1000_82574: 1071 case e1000_pch_lpt: 1072 case e1000_pch_spt: 1073 case e1000_80003es2lan: 1074 max_frame_size = 9234; 1075 break; 1076 1077 default: 1078 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1079 break; 1080 } 1081 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1082 ETHER_CRC_LEN) { 1083 error = EINVAL; 1084 break; 1085 } 1086 1087 ifp->if_mtu = ifr->ifr_mtu; 1088 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1089 ETHER_CRC_LEN; 1090 1091 if (ifp->if_flags & IFF_RUNNING) 1092 emx_init(sc); 1093 break; 1094 1095 case SIOCSIFFLAGS: 1096 if (ifp->if_flags & IFF_UP) { 1097 if ((ifp->if_flags & IFF_RUNNING)) { 1098 if ((ifp->if_flags ^ sc->if_flags) & 1099 (IFF_PROMISC | IFF_ALLMULTI)) { 1100 emx_disable_promisc(sc); 1101 emx_set_promisc(sc); 1102 } 1103 } else { 1104 emx_init(sc); 1105 } 1106 } else if (ifp->if_flags & IFF_RUNNING) { 1107 emx_stop(sc); 1108 } 1109 sc->if_flags = ifp->if_flags; 1110 break; 1111 1112 case SIOCADDMULTI: 1113 case SIOCDELMULTI: 1114 if (ifp->if_flags & IFF_RUNNING) { 1115 emx_disable_intr(sc); 1116 emx_set_multi(sc); 1117 #ifdef IFPOLL_ENABLE 1118 if (!(ifp->if_flags & IFF_NPOLLING)) 1119 #endif 1120 emx_enable_intr(sc); 1121 } 1122 break; 1123 1124 case SIOCSIFMEDIA: 1125 /* Check SOL/IDER usage */ 1126 if (e1000_check_reset_block(&sc->hw)) { 1127 device_printf(sc->dev, "Media change is" 1128 " blocked due to SOL/IDER session.\n"); 1129 break; 1130 } 1131 /* FALL THROUGH */ 1132 1133 case SIOCGIFMEDIA: 1134 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1135 break; 1136 1137 case SIOCSIFCAP: 1138 reinit = 0; 1139 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1140 if (mask & IFCAP_RXCSUM) { 1141 ifp->if_capenable ^= IFCAP_RXCSUM; 1142 reinit = 1; 1143 } 1144 if (mask & IFCAP_VLAN_HWTAGGING) { 1145 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1146 reinit = 1; 1147 } 1148 if (mask & IFCAP_TXCSUM) { 1149 ifp->if_capenable ^= IFCAP_TXCSUM; 1150 if (ifp->if_capenable & IFCAP_TXCSUM) 1151 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1152 else 1153 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1154 } 1155 if (mask & IFCAP_TSO) { 1156 ifp->if_capenable ^= IFCAP_TSO; 1157 if (ifp->if_capenable & IFCAP_TSO) 1158 ifp->if_hwassist |= CSUM_TSO; 1159 else 1160 ifp->if_hwassist &= ~CSUM_TSO; 1161 } 1162 if (mask & IFCAP_RSS) 1163 ifp->if_capenable ^= IFCAP_RSS; 1164 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1165 emx_init(sc); 1166 break; 1167 1168 default: 1169 error = ether_ioctl(ifp, command, data); 1170 break; 1171 } 1172 return (error); 1173 } 1174 1175 static void 1176 emx_watchdog(struct ifaltq_subque *ifsq) 1177 { 1178 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1179 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1180 struct emx_softc *sc = ifp->if_softc; 1181 int i; 1182 1183 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1184 1185 /* 1186 * The timer is set to 5 every time start queues a packet. 1187 * Then txeof keeps resetting it as long as it cleans at 1188 * least one descriptor. 1189 * Finally, anytime all descriptors are clean the timer is 1190 * set to 0. 1191 */ 1192 1193 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1194 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1195 /* 1196 * If we reach here, all TX jobs are completed and 1197 * the TX engine should have been idled for some time. 1198 * We don't need to call ifsq_devstart_sched() here. 1199 */ 1200 ifsq_clr_oactive(ifsq); 1201 tdata->tx_watchdog.wd_timer = 0; 1202 return; 1203 } 1204 1205 /* 1206 * If we are in this routine because of pause frames, then 1207 * don't reset the hardware. 1208 */ 1209 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1210 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1211 return; 1212 } 1213 1214 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1215 1216 IFNET_STAT_INC(ifp, oerrors, 1); 1217 1218 emx_init(sc); 1219 for (i = 0; i < sc->tx_ring_inuse; ++i) 1220 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1221 } 1222 1223 static void 1224 emx_init(void *xsc) 1225 { 1226 struct emx_softc *sc = xsc; 1227 struct ifnet *ifp = &sc->arpcom.ac_if; 1228 device_t dev = sc->dev; 1229 boolean_t polling; 1230 int i; 1231 1232 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1233 1234 emx_stop(sc); 1235 1236 /* Get the latest mac address, User can use a LAA */ 1237 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1238 1239 /* Put the address into the Receive Address Array */ 1240 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1241 1242 /* 1243 * With the 82571 sc, RAR[0] may be overwritten 1244 * when the other port is reset, we make a duplicate 1245 * in RAR[14] for that eventuality, this assures 1246 * the interface continues to function. 1247 */ 1248 if (sc->hw.mac.type == e1000_82571) { 1249 e1000_set_laa_state_82571(&sc->hw, TRUE); 1250 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1251 E1000_RAR_ENTRIES - 1); 1252 } 1253 1254 /* Initialize the hardware */ 1255 if (emx_reset(sc)) { 1256 device_printf(dev, "Unable to reset the hardware\n"); 1257 /* XXX emx_stop()? */ 1258 return; 1259 } 1260 emx_update_link_status(sc); 1261 1262 /* Setup VLAN support, basic and offload if available */ 1263 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1264 1265 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1266 uint32_t ctrl; 1267 1268 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1269 ctrl |= E1000_CTRL_VME; 1270 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1271 } 1272 1273 /* Configure for OS presence */ 1274 emx_get_mgmt(sc); 1275 1276 polling = FALSE; 1277 #ifdef IFPOLL_ENABLE 1278 if (ifp->if_flags & IFF_NPOLLING) 1279 polling = TRUE; 1280 #endif 1281 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1282 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1283 1284 /* Prepare transmit descriptors and buffers */ 1285 for (i = 0; i < sc->tx_ring_inuse; ++i) 1286 emx_init_tx_ring(&sc->tx_data[i]); 1287 emx_init_tx_unit(sc); 1288 1289 /* Setup Multicast table */ 1290 emx_set_multi(sc); 1291 1292 /* Prepare receive descriptors and buffers */ 1293 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1294 if (emx_init_rx_ring(&sc->rx_data[i])) { 1295 device_printf(dev, 1296 "Could not setup receive structures\n"); 1297 emx_stop(sc); 1298 return; 1299 } 1300 } 1301 emx_init_rx_unit(sc); 1302 1303 /* Don't lose promiscuous settings */ 1304 emx_set_promisc(sc); 1305 1306 ifp->if_flags |= IFF_RUNNING; 1307 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1308 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1309 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1310 } 1311 1312 callout_reset(&sc->timer, hz, emx_timer, sc); 1313 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1314 1315 /* MSI/X configuration for 82574 */ 1316 if (sc->hw.mac.type == e1000_82574) { 1317 int tmp; 1318 1319 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1320 tmp |= E1000_CTRL_EXT_PBA_CLR; 1321 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1322 /* 1323 * XXX MSIX 1324 * Set the IVAR - interrupt vector routing. 1325 * Each nibble represents a vector, high bit 1326 * is enable, other 3 bits are the MSIX table 1327 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1328 * Link (other) to 2, hence the magic number. 1329 */ 1330 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1331 } 1332 1333 /* 1334 * Only enable interrupts if we are not polling, make sure 1335 * they are off otherwise. 1336 */ 1337 if (polling) 1338 emx_disable_intr(sc); 1339 else 1340 emx_enable_intr(sc); 1341 1342 /* AMT based hardware can now take control from firmware */ 1343 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1344 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1345 emx_get_hw_control(sc); 1346 } 1347 1348 static void 1349 emx_intr(void *xsc) 1350 { 1351 emx_intr_body(xsc, TRUE); 1352 } 1353 1354 static void 1355 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1356 { 1357 struct ifnet *ifp = &sc->arpcom.ac_if; 1358 uint32_t reg_icr; 1359 1360 logif(intr_beg); 1361 ASSERT_SERIALIZED(&sc->main_serialize); 1362 1363 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1364 1365 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1366 logif(intr_end); 1367 return; 1368 } 1369 1370 /* 1371 * XXX: some laptops trigger several spurious interrupts 1372 * on emx(4) when in the resume cycle. The ICR register 1373 * reports all-ones value in this case. Processing such 1374 * interrupts would lead to a freeze. I don't know why. 1375 */ 1376 if (reg_icr == 0xffffffff) { 1377 logif(intr_end); 1378 return; 1379 } 1380 1381 if (ifp->if_flags & IFF_RUNNING) { 1382 if (reg_icr & 1383 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1384 int i; 1385 1386 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1387 lwkt_serialize_enter( 1388 &sc->rx_data[i].rx_serialize); 1389 emx_rxeof(&sc->rx_data[i], -1); 1390 lwkt_serialize_exit( 1391 &sc->rx_data[i].rx_serialize); 1392 } 1393 } 1394 if (reg_icr & E1000_ICR_TXDW) { 1395 struct emx_txdata *tdata = &sc->tx_data[0]; 1396 1397 lwkt_serialize_enter(&tdata->tx_serialize); 1398 emx_txeof(tdata); 1399 if (!ifsq_is_empty(tdata->ifsq)) 1400 ifsq_devstart(tdata->ifsq); 1401 lwkt_serialize_exit(&tdata->tx_serialize); 1402 } 1403 } 1404 1405 /* Link status change */ 1406 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1407 emx_serialize_skipmain(sc); 1408 1409 callout_stop(&sc->timer); 1410 sc->hw.mac.get_link_status = 1; 1411 emx_update_link_status(sc); 1412 1413 /* Deal with TX cruft when link lost */ 1414 emx_tx_purge(sc); 1415 1416 callout_reset(&sc->timer, hz, emx_timer, sc); 1417 1418 emx_deserialize_skipmain(sc); 1419 } 1420 1421 if (reg_icr & E1000_ICR_RXO) 1422 sc->rx_overruns++; 1423 1424 logif(intr_end); 1425 } 1426 1427 static void 1428 emx_intr_mask(void *xsc) 1429 { 1430 struct emx_softc *sc = xsc; 1431 1432 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1433 /* 1434 * NOTE: 1435 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1436 * so don't check it. 1437 */ 1438 emx_intr_body(sc, FALSE); 1439 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1440 } 1441 1442 static void 1443 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1444 { 1445 struct emx_softc *sc = ifp->if_softc; 1446 1447 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1448 1449 emx_update_link_status(sc); 1450 1451 ifmr->ifm_status = IFM_AVALID; 1452 ifmr->ifm_active = IFM_ETHER; 1453 1454 if (!sc->link_active) { 1455 if (sc->hw.mac.autoneg) 1456 ifmr->ifm_active |= IFM_NONE; 1457 else 1458 ifmr->ifm_active |= sc->media.ifm_media; 1459 return; 1460 } 1461 1462 ifmr->ifm_status |= IFM_ACTIVE; 1463 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1464 ifmr->ifm_active |= sc->ifm_flowctrl; 1465 1466 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1467 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1468 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1469 } else { 1470 switch (sc->link_speed) { 1471 case 10: 1472 ifmr->ifm_active |= IFM_10_T; 1473 break; 1474 case 100: 1475 ifmr->ifm_active |= IFM_100_TX; 1476 break; 1477 1478 case 1000: 1479 ifmr->ifm_active |= IFM_1000_T; 1480 break; 1481 } 1482 if (sc->link_duplex == FULL_DUPLEX) 1483 ifmr->ifm_active |= IFM_FDX; 1484 else 1485 ifmr->ifm_active |= IFM_HDX; 1486 } 1487 if (ifmr->ifm_active & IFM_FDX) 1488 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1489 } 1490 1491 static int 1492 emx_media_change(struct ifnet *ifp) 1493 { 1494 struct emx_softc *sc = ifp->if_softc; 1495 struct ifmedia *ifm = &sc->media; 1496 1497 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1498 1499 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1500 return (EINVAL); 1501 1502 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1503 case IFM_AUTO: 1504 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1505 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1506 break; 1507 1508 case IFM_1000_SX: 1509 case IFM_1000_T: 1510 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1511 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1512 break; 1513 1514 case IFM_100_TX: 1515 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1516 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1517 } else { 1518 if (IFM_OPTIONS(ifm->ifm_media) & 1519 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1520 if (bootverbose) { 1521 if_printf(ifp, "Flow control is not " 1522 "allowed for half-duplex\n"); 1523 } 1524 return EINVAL; 1525 } 1526 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1527 } 1528 sc->hw.mac.autoneg = FALSE; 1529 sc->hw.phy.autoneg_advertised = 0; 1530 break; 1531 1532 case IFM_10_T: 1533 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1534 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1535 } else { 1536 if (IFM_OPTIONS(ifm->ifm_media) & 1537 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1538 if (bootverbose) { 1539 if_printf(ifp, "Flow control is not " 1540 "allowed for half-duplex\n"); 1541 } 1542 return EINVAL; 1543 } 1544 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1545 } 1546 sc->hw.mac.autoneg = FALSE; 1547 sc->hw.phy.autoneg_advertised = 0; 1548 break; 1549 1550 default: 1551 if (bootverbose) { 1552 if_printf(ifp, "Unsupported media type %d\n", 1553 IFM_SUBTYPE(ifm->ifm_media)); 1554 } 1555 return EINVAL; 1556 } 1557 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1558 1559 if (ifp->if_flags & IFF_RUNNING) 1560 emx_init(sc); 1561 1562 return (0); 1563 } 1564 1565 static int 1566 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1567 int *segs_used, int *idx) 1568 { 1569 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1570 bus_dmamap_t map; 1571 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1572 struct e1000_tx_desc *ctxd = NULL; 1573 struct mbuf *m_head = *m_headp; 1574 uint32_t txd_upper, txd_lower, cmd = 0; 1575 int maxsegs, nsegs, i, j, first, last = 0, error; 1576 1577 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1578 error = emx_tso_pullup(tdata, m_headp); 1579 if (error) 1580 return error; 1581 m_head = *m_headp; 1582 } 1583 1584 txd_upper = txd_lower = 0; 1585 1586 /* 1587 * Capture the first descriptor index, this descriptor 1588 * will have the index of the EOP which is the only one 1589 * that now gets a DONE bit writeback. 1590 */ 1591 first = tdata->next_avail_tx_desc; 1592 tx_buffer = &tdata->tx_buf[first]; 1593 tx_buffer_mapped = tx_buffer; 1594 map = tx_buffer->map; 1595 1596 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1597 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1598 if (maxsegs > EMX_MAX_SCATTER) 1599 maxsegs = EMX_MAX_SCATTER; 1600 1601 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1602 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1603 if (error) { 1604 m_freem(*m_headp); 1605 *m_headp = NULL; 1606 return error; 1607 } 1608 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1609 1610 m_head = *m_headp; 1611 tdata->tx_nsegs += nsegs; 1612 *segs_used += nsegs; 1613 1614 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1615 /* TSO will consume one TX desc */ 1616 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1617 tdata->tx_nsegs += i; 1618 *segs_used += i; 1619 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1620 /* TX csum offloading will consume one TX desc */ 1621 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1622 tdata->tx_nsegs += i; 1623 *segs_used += i; 1624 } 1625 1626 /* Handle VLAN tag */ 1627 if (m_head->m_flags & M_VLANTAG) { 1628 /* Set the vlan id. */ 1629 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1630 /* Tell hardware to add tag */ 1631 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1632 } 1633 1634 i = tdata->next_avail_tx_desc; 1635 1636 /* Set up our transmit descriptors */ 1637 for (j = 0; j < nsegs; j++) { 1638 tx_buffer = &tdata->tx_buf[i]; 1639 ctxd = &tdata->tx_desc_base[i]; 1640 1641 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1642 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1643 txd_lower | segs[j].ds_len); 1644 ctxd->upper.data = htole32(txd_upper); 1645 1646 last = i; 1647 if (++i == tdata->num_tx_desc) 1648 i = 0; 1649 } 1650 1651 tdata->next_avail_tx_desc = i; 1652 1653 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1654 tdata->num_tx_desc_avail -= nsegs; 1655 1656 tx_buffer->m_head = m_head; 1657 tx_buffer_mapped->map = tx_buffer->map; 1658 tx_buffer->map = map; 1659 1660 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1661 tdata->tx_nsegs = 0; 1662 1663 /* 1664 * Report Status (RS) is turned on 1665 * every tx_intr_nsegs descriptors. 1666 */ 1667 cmd = E1000_TXD_CMD_RS; 1668 1669 /* 1670 * Keep track of the descriptor, which will 1671 * be written back by hardware. 1672 */ 1673 tdata->tx_dd[tdata->tx_dd_tail] = last; 1674 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1675 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1676 } 1677 1678 /* 1679 * Last Descriptor of Packet needs End Of Packet (EOP) 1680 */ 1681 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1682 1683 /* 1684 * Defer TDT updating, until enough descriptors are setup 1685 */ 1686 *idx = i; 1687 1688 #ifdef EMX_TSS_DEBUG 1689 tdata->tx_pkts++; 1690 #endif 1691 1692 return (0); 1693 } 1694 1695 static void 1696 emx_set_promisc(struct emx_softc *sc) 1697 { 1698 struct ifnet *ifp = &sc->arpcom.ac_if; 1699 uint32_t reg_rctl; 1700 1701 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1702 1703 if (ifp->if_flags & IFF_PROMISC) { 1704 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1705 /* Turn this on if you want to see bad packets */ 1706 if (emx_debug_sbp) 1707 reg_rctl |= E1000_RCTL_SBP; 1708 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1709 } else if (ifp->if_flags & IFF_ALLMULTI) { 1710 reg_rctl |= E1000_RCTL_MPE; 1711 reg_rctl &= ~E1000_RCTL_UPE; 1712 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1713 } 1714 } 1715 1716 static void 1717 emx_disable_promisc(struct emx_softc *sc) 1718 { 1719 uint32_t reg_rctl; 1720 1721 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1722 1723 reg_rctl &= ~E1000_RCTL_UPE; 1724 reg_rctl &= ~E1000_RCTL_MPE; 1725 reg_rctl &= ~E1000_RCTL_SBP; 1726 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1727 } 1728 1729 static void 1730 emx_set_multi(struct emx_softc *sc) 1731 { 1732 struct ifnet *ifp = &sc->arpcom.ac_if; 1733 struct ifmultiaddr *ifma; 1734 uint32_t reg_rctl = 0; 1735 uint8_t *mta; 1736 int mcnt = 0; 1737 1738 mta = sc->mta; 1739 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1740 1741 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1742 if (ifma->ifma_addr->sa_family != AF_LINK) 1743 continue; 1744 1745 if (mcnt == EMX_MCAST_ADDR_MAX) 1746 break; 1747 1748 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1749 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1750 mcnt++; 1751 } 1752 1753 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1754 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1755 reg_rctl |= E1000_RCTL_MPE; 1756 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1757 } else { 1758 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1759 } 1760 } 1761 1762 /* 1763 * This routine checks for link status and updates statistics. 1764 */ 1765 static void 1766 emx_timer(void *xsc) 1767 { 1768 struct emx_softc *sc = xsc; 1769 struct ifnet *ifp = &sc->arpcom.ac_if; 1770 1771 lwkt_serialize_enter(&sc->main_serialize); 1772 1773 emx_update_link_status(sc); 1774 emx_update_stats(sc); 1775 1776 /* Reset LAA into RAR[0] on 82571 */ 1777 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1778 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1779 1780 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1781 emx_print_hw_stats(sc); 1782 1783 emx_smartspeed(sc); 1784 1785 callout_reset(&sc->timer, hz, emx_timer, sc); 1786 1787 lwkt_serialize_exit(&sc->main_serialize); 1788 } 1789 1790 static void 1791 emx_update_link_status(struct emx_softc *sc) 1792 { 1793 struct e1000_hw *hw = &sc->hw; 1794 struct ifnet *ifp = &sc->arpcom.ac_if; 1795 device_t dev = sc->dev; 1796 uint32_t link_check = 0; 1797 1798 /* Get the cached link value or read phy for real */ 1799 switch (hw->phy.media_type) { 1800 case e1000_media_type_copper: 1801 if (hw->mac.get_link_status) { 1802 /* Do the work to read phy */ 1803 e1000_check_for_link(hw); 1804 link_check = !hw->mac.get_link_status; 1805 if (link_check) /* ESB2 fix */ 1806 e1000_cfg_on_link_up(hw); 1807 } else { 1808 link_check = TRUE; 1809 } 1810 break; 1811 1812 case e1000_media_type_fiber: 1813 e1000_check_for_link(hw); 1814 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1815 break; 1816 1817 case e1000_media_type_internal_serdes: 1818 e1000_check_for_link(hw); 1819 link_check = sc->hw.mac.serdes_has_link; 1820 break; 1821 1822 case e1000_media_type_unknown: 1823 default: 1824 break; 1825 } 1826 1827 /* Now check for a transition */ 1828 if (link_check && sc->link_active == 0) { 1829 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1830 &sc->link_duplex); 1831 1832 /* 1833 * Check if we should enable/disable SPEED_MODE bit on 1834 * 82571EB/82572EI 1835 */ 1836 if (sc->link_speed != SPEED_1000 && 1837 (hw->mac.type == e1000_82571 || 1838 hw->mac.type == e1000_82572)) { 1839 int tarc0; 1840 1841 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1842 tarc0 &= ~EMX_TARC_SPEED_MODE; 1843 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1844 } 1845 if (bootverbose) { 1846 char flowctrl[IFM_ETH_FC_STRLEN]; 1847 1848 e1000_fc2str(hw->fc.current_mode, flowctrl, 1849 sizeof(flowctrl)); 1850 device_printf(dev, "Link is up %d Mbps %s, " 1851 "Flow control: %s\n", 1852 sc->link_speed, 1853 (sc->link_duplex == FULL_DUPLEX) ? 1854 "Full Duplex" : "Half Duplex", 1855 flowctrl); 1856 } 1857 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1858 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1859 sc->link_active = 1; 1860 sc->smartspeed = 0; 1861 ifp->if_baudrate = sc->link_speed * 1000000; 1862 ifp->if_link_state = LINK_STATE_UP; 1863 if_link_state_change(ifp); 1864 } else if (!link_check && sc->link_active == 1) { 1865 ifp->if_baudrate = sc->link_speed = 0; 1866 sc->link_duplex = 0; 1867 if (bootverbose) 1868 device_printf(dev, "Link is Down\n"); 1869 sc->link_active = 0; 1870 ifp->if_link_state = LINK_STATE_DOWN; 1871 if_link_state_change(ifp); 1872 } 1873 } 1874 1875 static void 1876 emx_stop(struct emx_softc *sc) 1877 { 1878 struct ifnet *ifp = &sc->arpcom.ac_if; 1879 int i; 1880 1881 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1882 1883 emx_disable_intr(sc); 1884 1885 callout_stop(&sc->timer); 1886 1887 ifp->if_flags &= ~IFF_RUNNING; 1888 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1889 struct emx_txdata *tdata = &sc->tx_data[i]; 1890 1891 ifsq_clr_oactive(tdata->ifsq); 1892 ifsq_watchdog_stop(&tdata->tx_watchdog); 1893 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1894 } 1895 1896 /* 1897 * Disable multiple receive queues. 1898 * 1899 * NOTE: 1900 * We should disable multiple receive queues before 1901 * resetting the hardware. 1902 */ 1903 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1904 1905 e1000_reset_hw(&sc->hw); 1906 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1907 1908 for (i = 0; i < sc->tx_ring_cnt; ++i) 1909 emx_free_tx_ring(&sc->tx_data[i]); 1910 for (i = 0; i < sc->rx_ring_cnt; ++i) 1911 emx_free_rx_ring(&sc->rx_data[i]); 1912 } 1913 1914 static int 1915 emx_reset(struct emx_softc *sc) 1916 { 1917 device_t dev = sc->dev; 1918 uint16_t rx_buffer_size; 1919 uint32_t pba; 1920 1921 /* Set up smart power down as default off on newer adapters. */ 1922 if (!emx_smart_pwr_down && 1923 (sc->hw.mac.type == e1000_82571 || 1924 sc->hw.mac.type == e1000_82572)) { 1925 uint16_t phy_tmp = 0; 1926 1927 /* Speed up time to link by disabling smart power down. */ 1928 e1000_read_phy_reg(&sc->hw, 1929 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1930 phy_tmp &= ~IGP02E1000_PM_SPD; 1931 e1000_write_phy_reg(&sc->hw, 1932 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1933 } 1934 1935 /* 1936 * Packet Buffer Allocation (PBA) 1937 * Writing PBA sets the receive portion of the buffer 1938 * the remainder is used for the transmit buffer. 1939 */ 1940 switch (sc->hw.mac.type) { 1941 /* Total Packet Buffer on these is 48K */ 1942 case e1000_82571: 1943 case e1000_82572: 1944 case e1000_80003es2lan: 1945 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1946 break; 1947 1948 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1949 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1950 break; 1951 1952 case e1000_82574: 1953 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1954 break; 1955 1956 case e1000_pch_lpt: 1957 case e1000_pch_spt: 1958 pba = E1000_PBA_26K; 1959 break; 1960 1961 default: 1962 /* Devices before 82547 had a Packet Buffer of 64K. */ 1963 if (sc->hw.mac.max_frame_size > 8192) 1964 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1965 else 1966 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1967 } 1968 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1969 1970 /* 1971 * These parameters control the automatic generation (Tx) and 1972 * response (Rx) to Ethernet PAUSE frames. 1973 * - High water mark should allow for at least two frames to be 1974 * received after sending an XOFF. 1975 * - Low water mark works best when it is very near the high water mark. 1976 * This allows the receiver to restart by sending XON when it has 1977 * drained a bit. Here we use an arbitary value of 1500 which will 1978 * restart after one full frame is pulled from the buffer. There 1979 * could be several smaller frames in the buffer and if so they will 1980 * not trigger the XON until their total number reduces the buffer 1981 * by 1500. 1982 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1983 */ 1984 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1985 1986 sc->hw.fc.high_water = rx_buffer_size - 1987 roundup2(sc->hw.mac.max_frame_size, 1024); 1988 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1989 1990 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1991 sc->hw.fc.send_xon = TRUE; 1992 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1993 1994 /* 1995 * Device specific overrides/settings 1996 */ 1997 if (sc->hw.mac.type == e1000_pch_lpt || 1998 sc->hw.mac.type == e1000_pch_spt) { 1999 sc->hw.fc.high_water = 0x5C20; 2000 sc->hw.fc.low_water = 0x5048; 2001 sc->hw.fc.pause_time = 0x0650; 2002 sc->hw.fc.refresh_time = 0x0400; 2003 /* Jumbos need adjusted PBA */ 2004 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2005 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2006 else 2007 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2008 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2009 sc->hw.fc.pause_time = 0xFFFF; 2010 } 2011 2012 /* Issue a global reset */ 2013 e1000_reset_hw(&sc->hw); 2014 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2015 emx_disable_aspm(sc); 2016 2017 if (e1000_init_hw(&sc->hw) < 0) { 2018 device_printf(dev, "Hardware Initialization Failed\n"); 2019 return (EIO); 2020 } 2021 2022 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2023 e1000_get_phy_info(&sc->hw); 2024 e1000_check_for_link(&sc->hw); 2025 2026 return (0); 2027 } 2028 2029 static void 2030 emx_setup_ifp(struct emx_softc *sc) 2031 { 2032 struct ifnet *ifp = &sc->arpcom.ac_if; 2033 int i; 2034 2035 if_initname(ifp, device_get_name(sc->dev), 2036 device_get_unit(sc->dev)); 2037 ifp->if_softc = sc; 2038 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2039 ifp->if_init = emx_init; 2040 ifp->if_ioctl = emx_ioctl; 2041 ifp->if_start = emx_start; 2042 #ifdef IFPOLL_ENABLE 2043 ifp->if_npoll = emx_npoll; 2044 #endif 2045 ifp->if_serialize = emx_serialize; 2046 ifp->if_deserialize = emx_deserialize; 2047 ifp->if_tryserialize = emx_tryserialize; 2048 #ifdef INVARIANTS 2049 ifp->if_serialize_assert = emx_serialize_assert; 2050 #endif 2051 2052 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2053 2054 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2055 ifq_set_ready(&ifp->if_snd); 2056 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2057 2058 ifp->if_mapsubq = ifq_mapsubq_modulo; 2059 ifq_set_subq_divisor(&ifp->if_snd, 1); 2060 2061 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2062 2063 ifp->if_capabilities = IFCAP_HWCSUM | 2064 IFCAP_VLAN_HWTAGGING | 2065 IFCAP_VLAN_MTU | 2066 IFCAP_TSO; 2067 if (sc->rx_ring_cnt > 1) 2068 ifp->if_capabilities |= IFCAP_RSS; 2069 ifp->if_capenable = ifp->if_capabilities; 2070 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2071 2072 /* 2073 * Tell the upper layer(s) we support long frames. 2074 */ 2075 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2076 2077 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2078 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2079 struct emx_txdata *tdata = &sc->tx_data[i]; 2080 2081 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2082 ifsq_set_priv(ifsq, tdata); 2083 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2084 tdata->ifsq = ifsq; 2085 2086 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2087 } 2088 2089 /* 2090 * Specify the media types supported by this sc and register 2091 * callbacks to update media and link information 2092 */ 2093 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2094 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2095 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2096 0, NULL); 2097 } else { 2098 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2099 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2100 0, NULL); 2101 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2102 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2103 0, NULL); 2104 if (sc->hw.phy.type != e1000_phy_ife) { 2105 ifmedia_add(&sc->media, 2106 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2107 } 2108 } 2109 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2110 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2111 } 2112 2113 /* 2114 * Workaround for SmartSpeed on 82541 and 82547 controllers 2115 */ 2116 static void 2117 emx_smartspeed(struct emx_softc *sc) 2118 { 2119 uint16_t phy_tmp; 2120 2121 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2122 sc->hw.mac.autoneg == 0 || 2123 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2124 return; 2125 2126 if (sc->smartspeed == 0) { 2127 /* 2128 * If Master/Slave config fault is asserted twice, 2129 * we assume back-to-back 2130 */ 2131 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2132 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2133 return; 2134 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2135 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2136 e1000_read_phy_reg(&sc->hw, 2137 PHY_1000T_CTRL, &phy_tmp); 2138 if (phy_tmp & CR_1000T_MS_ENABLE) { 2139 phy_tmp &= ~CR_1000T_MS_ENABLE; 2140 e1000_write_phy_reg(&sc->hw, 2141 PHY_1000T_CTRL, phy_tmp); 2142 sc->smartspeed++; 2143 if (sc->hw.mac.autoneg && 2144 !e1000_phy_setup_autoneg(&sc->hw) && 2145 !e1000_read_phy_reg(&sc->hw, 2146 PHY_CONTROL, &phy_tmp)) { 2147 phy_tmp |= MII_CR_AUTO_NEG_EN | 2148 MII_CR_RESTART_AUTO_NEG; 2149 e1000_write_phy_reg(&sc->hw, 2150 PHY_CONTROL, phy_tmp); 2151 } 2152 } 2153 } 2154 return; 2155 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2156 /* If still no link, perhaps using 2/3 pair cable */ 2157 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2158 phy_tmp |= CR_1000T_MS_ENABLE; 2159 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2160 if (sc->hw.mac.autoneg && 2161 !e1000_phy_setup_autoneg(&sc->hw) && 2162 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2163 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2164 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2165 } 2166 } 2167 2168 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2169 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2170 sc->smartspeed = 0; 2171 } 2172 2173 static int 2174 emx_create_tx_ring(struct emx_txdata *tdata) 2175 { 2176 device_t dev = tdata->sc->dev; 2177 struct emx_txbuf *tx_buffer; 2178 int error, i, tsize, ntxd; 2179 2180 /* 2181 * Validate number of transmit descriptors. It must not exceed 2182 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2183 */ 2184 ntxd = device_getenv_int(dev, "txd", emx_txd); 2185 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2186 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2187 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2188 EMX_DEFAULT_TXD, ntxd); 2189 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2190 } else { 2191 tdata->num_tx_desc = ntxd; 2192 } 2193 2194 /* 2195 * Allocate Transmit Descriptor ring 2196 */ 2197 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2198 EMX_DBA_ALIGN); 2199 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2200 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2201 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2202 &tdata->tx_desc_paddr); 2203 if (tdata->tx_desc_base == NULL) { 2204 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2205 return ENOMEM; 2206 } 2207 2208 tsize = __VM_CACHELINE_ALIGN( 2209 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2210 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2211 2212 /* 2213 * Create DMA tags for tx buffers 2214 */ 2215 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2216 1, 0, /* alignment, bounds */ 2217 BUS_SPACE_MAXADDR, /* lowaddr */ 2218 BUS_SPACE_MAXADDR, /* highaddr */ 2219 NULL, NULL, /* filter, filterarg */ 2220 EMX_TSO_SIZE, /* maxsize */ 2221 EMX_MAX_SCATTER, /* nsegments */ 2222 EMX_MAX_SEGSIZE, /* maxsegsize */ 2223 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2224 BUS_DMA_ONEBPAGE, /* flags */ 2225 &tdata->txtag); 2226 if (error) { 2227 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2228 kfree(tdata->tx_buf, M_DEVBUF); 2229 tdata->tx_buf = NULL; 2230 return error; 2231 } 2232 2233 /* 2234 * Create DMA maps for tx buffers 2235 */ 2236 for (i = 0; i < tdata->num_tx_desc; i++) { 2237 tx_buffer = &tdata->tx_buf[i]; 2238 2239 error = bus_dmamap_create(tdata->txtag, 2240 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2241 &tx_buffer->map); 2242 if (error) { 2243 device_printf(dev, "Unable to create TX DMA map\n"); 2244 emx_destroy_tx_ring(tdata, i); 2245 return error; 2246 } 2247 } 2248 2249 /* 2250 * Setup TX parameters 2251 */ 2252 tdata->spare_tx_desc = EMX_TX_SPARE; 2253 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2254 2255 /* 2256 * Keep following relationship between spare_tx_desc, oact_tx_desc 2257 * and tx_intr_nsegs: 2258 * (spare_tx_desc + EMX_TX_RESERVED) <= 2259 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2260 */ 2261 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2262 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2263 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2264 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2265 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2266 2267 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2268 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2269 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2270 2271 /* 2272 * Pullup extra 4bytes into the first data segment for TSO, see: 2273 * 82571/82572 specification update errata #7 2274 * 2275 * Same applies to I217 (and maybe I218 and I219). 2276 * 2277 * NOTE: 2278 * 4bytes instead of 2bytes, which are mentioned in the errata, 2279 * are pulled; mainly to keep rest of the data properly aligned. 2280 */ 2281 if (tdata->sc->hw.mac.type == e1000_82571 || 2282 tdata->sc->hw.mac.type == e1000_82572 || 2283 tdata->sc->hw.mac.type == e1000_pch_lpt || 2284 tdata->sc->hw.mac.type == e1000_pch_spt) 2285 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2286 2287 return (0); 2288 } 2289 2290 static void 2291 emx_init_tx_ring(struct emx_txdata *tdata) 2292 { 2293 /* Clear the old ring contents */ 2294 bzero(tdata->tx_desc_base, 2295 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2296 2297 /* Reset state */ 2298 tdata->next_avail_tx_desc = 0; 2299 tdata->next_tx_to_clean = 0; 2300 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2301 2302 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2303 if (tdata->sc->tx_ring_inuse > 1) { 2304 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2305 if (bootverbose) { 2306 if_printf(&tdata->sc->arpcom.ac_if, 2307 "TX %d force ctx setup\n", tdata->idx); 2308 } 2309 } 2310 } 2311 2312 static void 2313 emx_init_tx_unit(struct emx_softc *sc) 2314 { 2315 uint32_t tctl, tarc, tipg = 0, txdctl; 2316 int i; 2317 2318 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2319 struct emx_txdata *tdata = &sc->tx_data[i]; 2320 uint64_t bus_addr; 2321 2322 /* Setup the Base and Length of the Tx Descriptor Ring */ 2323 bus_addr = tdata->tx_desc_paddr; 2324 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2325 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2326 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2327 (uint32_t)(bus_addr >> 32)); 2328 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2329 (uint32_t)bus_addr); 2330 /* Setup the HW Tx Head and Tail descriptor pointers */ 2331 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2332 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2333 } 2334 2335 /* Set the default values for the Tx Inter Packet Gap timer */ 2336 switch (sc->hw.mac.type) { 2337 case e1000_80003es2lan: 2338 tipg = DEFAULT_82543_TIPG_IPGR1; 2339 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2340 E1000_TIPG_IPGR2_SHIFT; 2341 break; 2342 2343 default: 2344 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2345 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2346 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2347 else 2348 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2349 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2350 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2351 break; 2352 } 2353 2354 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2355 2356 /* NOTE: 0 is not allowed for TIDV */ 2357 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2358 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2359 2360 /* 2361 * Errata workaround (obtained from Linux). This is necessary 2362 * to make multiple TX queues work on 82574. 2363 * XXX can't find it in any published errata though. 2364 */ 2365 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2366 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2367 2368 if (sc->hw.mac.type == e1000_82571 || 2369 sc->hw.mac.type == e1000_82572) { 2370 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2371 tarc |= EMX_TARC_SPEED_MODE; 2372 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2373 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2374 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2375 tarc |= 1; 2376 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2377 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2378 tarc |= 1; 2379 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2380 } 2381 2382 /* Program the Transmit Control Register */ 2383 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2384 tctl &= ~E1000_TCTL_CT; 2385 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2386 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2387 tctl |= E1000_TCTL_MULR; 2388 2389 /* This write will effectively turn on the transmit unit. */ 2390 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2391 2392 if (sc->hw.mac.type == e1000_82571 || 2393 sc->hw.mac.type == e1000_82572 || 2394 sc->hw.mac.type == e1000_80003es2lan) { 2395 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2396 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2397 tarc &= ~(1 << 28); 2398 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2399 } 2400 2401 if (sc->tx_ring_inuse > 1) { 2402 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2403 tarc &= ~EMX_TARC_COUNT_MASK; 2404 tarc |= 1; 2405 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2406 2407 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2408 tarc &= ~EMX_TARC_COUNT_MASK; 2409 tarc |= 1; 2410 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2411 } 2412 } 2413 2414 static void 2415 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2416 { 2417 struct emx_txbuf *tx_buffer; 2418 int i; 2419 2420 /* Free Transmit Descriptor ring */ 2421 if (tdata->tx_desc_base) { 2422 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2423 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2424 tdata->tx_desc_dmap); 2425 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2426 2427 tdata->tx_desc_base = NULL; 2428 } 2429 2430 if (tdata->tx_buf == NULL) 2431 return; 2432 2433 for (i = 0; i < ndesc; i++) { 2434 tx_buffer = &tdata->tx_buf[i]; 2435 2436 KKASSERT(tx_buffer->m_head == NULL); 2437 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2438 } 2439 bus_dma_tag_destroy(tdata->txtag); 2440 2441 kfree(tdata->tx_buf, M_DEVBUF); 2442 tdata->tx_buf = NULL; 2443 } 2444 2445 /* 2446 * The offload context needs to be set when we transfer the first 2447 * packet of a particular protocol (TCP/UDP). This routine has been 2448 * enhanced to deal with inserted VLAN headers. 2449 * 2450 * If the new packet's ether header length, ip header length and 2451 * csum offloading type are same as the previous packet, we should 2452 * avoid allocating a new csum context descriptor; mainly to take 2453 * advantage of the pipeline effect of the TX data read request. 2454 * 2455 * This function returns number of TX descrptors allocated for 2456 * csum context. 2457 */ 2458 static int 2459 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2460 uint32_t *txd_upper, uint32_t *txd_lower) 2461 { 2462 struct e1000_context_desc *TXD; 2463 int curr_txd, ehdrlen, csum_flags; 2464 uint32_t cmd, hdr_len, ip_hlen; 2465 2466 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2467 ip_hlen = mp->m_pkthdr.csum_iphlen; 2468 ehdrlen = mp->m_pkthdr.csum_lhlen; 2469 2470 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2471 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2472 tdata->csum_flags == csum_flags) { 2473 /* 2474 * Same csum offload context as the previous packets; 2475 * just return. 2476 */ 2477 *txd_upper = tdata->csum_txd_upper; 2478 *txd_lower = tdata->csum_txd_lower; 2479 return 0; 2480 } 2481 2482 /* 2483 * Setup a new csum offload context. 2484 */ 2485 2486 curr_txd = tdata->next_avail_tx_desc; 2487 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2488 2489 cmd = 0; 2490 2491 /* Setup of IP header checksum. */ 2492 if (csum_flags & CSUM_IP) { 2493 /* 2494 * Start offset for header checksum calculation. 2495 * End offset for header checksum calculation. 2496 * Offset of place to put the checksum. 2497 */ 2498 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2499 TXD->lower_setup.ip_fields.ipcse = 2500 htole16(ehdrlen + ip_hlen - 1); 2501 TXD->lower_setup.ip_fields.ipcso = 2502 ehdrlen + offsetof(struct ip, ip_sum); 2503 cmd |= E1000_TXD_CMD_IP; 2504 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2505 } 2506 hdr_len = ehdrlen + ip_hlen; 2507 2508 if (csum_flags & CSUM_TCP) { 2509 /* 2510 * Start offset for payload checksum calculation. 2511 * End offset for payload checksum calculation. 2512 * Offset of place to put the checksum. 2513 */ 2514 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2515 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2516 TXD->upper_setup.tcp_fields.tucso = 2517 hdr_len + offsetof(struct tcphdr, th_sum); 2518 cmd |= E1000_TXD_CMD_TCP; 2519 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2520 } else if (csum_flags & CSUM_UDP) { 2521 /* 2522 * Start offset for header checksum calculation. 2523 * End offset for header checksum calculation. 2524 * Offset of place to put the checksum. 2525 */ 2526 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2527 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2528 TXD->upper_setup.tcp_fields.tucso = 2529 hdr_len + offsetof(struct udphdr, uh_sum); 2530 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2531 } 2532 2533 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2534 E1000_TXD_DTYP_D; /* Data descr */ 2535 2536 /* Save the information for this csum offloading context */ 2537 tdata->csum_lhlen = ehdrlen; 2538 tdata->csum_iphlen = ip_hlen; 2539 tdata->csum_flags = csum_flags; 2540 tdata->csum_txd_upper = *txd_upper; 2541 tdata->csum_txd_lower = *txd_lower; 2542 2543 TXD->tcp_seg_setup.data = htole32(0); 2544 TXD->cmd_and_length = 2545 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2546 2547 if (++curr_txd == tdata->num_tx_desc) 2548 curr_txd = 0; 2549 2550 KKASSERT(tdata->num_tx_desc_avail > 0); 2551 tdata->num_tx_desc_avail--; 2552 2553 tdata->next_avail_tx_desc = curr_txd; 2554 return 1; 2555 } 2556 2557 static void 2558 emx_txeof(struct emx_txdata *tdata) 2559 { 2560 struct emx_txbuf *tx_buffer; 2561 int first, num_avail; 2562 2563 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2564 return; 2565 2566 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2567 return; 2568 2569 num_avail = tdata->num_tx_desc_avail; 2570 first = tdata->next_tx_to_clean; 2571 2572 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2573 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2574 struct e1000_tx_desc *tx_desc; 2575 2576 tx_desc = &tdata->tx_desc_base[dd_idx]; 2577 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2578 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2579 2580 if (++dd_idx == tdata->num_tx_desc) 2581 dd_idx = 0; 2582 2583 while (first != dd_idx) { 2584 logif(pkt_txclean); 2585 2586 num_avail++; 2587 2588 tx_buffer = &tdata->tx_buf[first]; 2589 if (tx_buffer->m_head) { 2590 bus_dmamap_unload(tdata->txtag, 2591 tx_buffer->map); 2592 m_freem(tx_buffer->m_head); 2593 tx_buffer->m_head = NULL; 2594 } 2595 2596 if (++first == tdata->num_tx_desc) 2597 first = 0; 2598 } 2599 } else { 2600 break; 2601 } 2602 } 2603 tdata->next_tx_to_clean = first; 2604 tdata->num_tx_desc_avail = num_avail; 2605 2606 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2607 tdata->tx_dd_head = 0; 2608 tdata->tx_dd_tail = 0; 2609 } 2610 2611 if (!EMX_IS_OACTIVE(tdata)) { 2612 ifsq_clr_oactive(tdata->ifsq); 2613 2614 /* All clean, turn off the timer */ 2615 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2616 tdata->tx_watchdog.wd_timer = 0; 2617 } 2618 } 2619 2620 static void 2621 emx_tx_collect(struct emx_txdata *tdata) 2622 { 2623 struct emx_txbuf *tx_buffer; 2624 int tdh, first, num_avail, dd_idx = -1; 2625 2626 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2627 return; 2628 2629 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2630 if (tdh == tdata->next_tx_to_clean) 2631 return; 2632 2633 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2634 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2635 2636 num_avail = tdata->num_tx_desc_avail; 2637 first = tdata->next_tx_to_clean; 2638 2639 while (first != tdh) { 2640 logif(pkt_txclean); 2641 2642 num_avail++; 2643 2644 tx_buffer = &tdata->tx_buf[first]; 2645 if (tx_buffer->m_head) { 2646 bus_dmamap_unload(tdata->txtag, 2647 tx_buffer->map); 2648 m_freem(tx_buffer->m_head); 2649 tx_buffer->m_head = NULL; 2650 } 2651 2652 if (first == dd_idx) { 2653 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2654 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2655 tdata->tx_dd_head = 0; 2656 tdata->tx_dd_tail = 0; 2657 dd_idx = -1; 2658 } else { 2659 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2660 } 2661 } 2662 2663 if (++first == tdata->num_tx_desc) 2664 first = 0; 2665 } 2666 tdata->next_tx_to_clean = first; 2667 tdata->num_tx_desc_avail = num_avail; 2668 2669 if (!EMX_IS_OACTIVE(tdata)) { 2670 ifsq_clr_oactive(tdata->ifsq); 2671 2672 /* All clean, turn off the timer */ 2673 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2674 tdata->tx_watchdog.wd_timer = 0; 2675 } 2676 } 2677 2678 /* 2679 * When Link is lost sometimes there is work still in the TX ring 2680 * which will result in a watchdog, rather than allow that do an 2681 * attempted cleanup and then reinit here. Note that this has been 2682 * seens mostly with fiber adapters. 2683 */ 2684 static void 2685 emx_tx_purge(struct emx_softc *sc) 2686 { 2687 int i; 2688 2689 if (sc->link_active) 2690 return; 2691 2692 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2693 struct emx_txdata *tdata = &sc->tx_data[i]; 2694 2695 if (tdata->tx_watchdog.wd_timer) { 2696 emx_tx_collect(tdata); 2697 if (tdata->tx_watchdog.wd_timer) { 2698 if_printf(&sc->arpcom.ac_if, 2699 "Link lost, TX pending, reinit\n"); 2700 emx_init(sc); 2701 return; 2702 } 2703 } 2704 } 2705 } 2706 2707 static int 2708 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2709 { 2710 struct mbuf *m; 2711 bus_dma_segment_t seg; 2712 bus_dmamap_t map; 2713 struct emx_rxbuf *rx_buffer; 2714 int error, nseg; 2715 2716 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2717 if (m == NULL) { 2718 if (init) { 2719 if_printf(&rdata->sc->arpcom.ac_if, 2720 "Unable to allocate RX mbuf\n"); 2721 } 2722 return (ENOBUFS); 2723 } 2724 m->m_len = m->m_pkthdr.len = MCLBYTES; 2725 2726 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2727 m_adj(m, ETHER_ALIGN); 2728 2729 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2730 rdata->rx_sparemap, m, 2731 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2732 if (error) { 2733 m_freem(m); 2734 if (init) { 2735 if_printf(&rdata->sc->arpcom.ac_if, 2736 "Unable to load RX mbuf\n"); 2737 } 2738 return (error); 2739 } 2740 2741 rx_buffer = &rdata->rx_buf[i]; 2742 if (rx_buffer->m_head != NULL) 2743 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2744 2745 map = rx_buffer->map; 2746 rx_buffer->map = rdata->rx_sparemap; 2747 rdata->rx_sparemap = map; 2748 2749 rx_buffer->m_head = m; 2750 rx_buffer->paddr = seg.ds_addr; 2751 2752 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2753 return (0); 2754 } 2755 2756 static int 2757 emx_create_rx_ring(struct emx_rxdata *rdata) 2758 { 2759 device_t dev = rdata->sc->dev; 2760 struct emx_rxbuf *rx_buffer; 2761 int i, error, rsize, nrxd; 2762 2763 /* 2764 * Validate number of receive descriptors. It must not exceed 2765 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2766 */ 2767 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2768 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2769 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2770 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2771 EMX_DEFAULT_RXD, nrxd); 2772 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2773 } else { 2774 rdata->num_rx_desc = nrxd; 2775 } 2776 2777 /* 2778 * Allocate Receive Descriptor ring 2779 */ 2780 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2781 EMX_DBA_ALIGN); 2782 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2783 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2784 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2785 &rdata->rx_desc_paddr); 2786 if (rdata->rx_desc == NULL) { 2787 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2788 return ENOMEM; 2789 } 2790 2791 rsize = __VM_CACHELINE_ALIGN( 2792 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2793 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2794 2795 /* 2796 * Create DMA tag for rx buffers 2797 */ 2798 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2799 1, 0, /* alignment, bounds */ 2800 BUS_SPACE_MAXADDR, /* lowaddr */ 2801 BUS_SPACE_MAXADDR, /* highaddr */ 2802 NULL, NULL, /* filter, filterarg */ 2803 MCLBYTES, /* maxsize */ 2804 1, /* nsegments */ 2805 MCLBYTES, /* maxsegsize */ 2806 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2807 &rdata->rxtag); 2808 if (error) { 2809 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2810 kfree(rdata->rx_buf, M_DEVBUF); 2811 rdata->rx_buf = NULL; 2812 return error; 2813 } 2814 2815 /* 2816 * Create spare DMA map for rx buffers 2817 */ 2818 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2819 &rdata->rx_sparemap); 2820 if (error) { 2821 device_printf(dev, "Unable to create spare RX DMA map\n"); 2822 bus_dma_tag_destroy(rdata->rxtag); 2823 kfree(rdata->rx_buf, M_DEVBUF); 2824 rdata->rx_buf = NULL; 2825 return error; 2826 } 2827 2828 /* 2829 * Create DMA maps for rx buffers 2830 */ 2831 for (i = 0; i < rdata->num_rx_desc; i++) { 2832 rx_buffer = &rdata->rx_buf[i]; 2833 2834 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2835 &rx_buffer->map); 2836 if (error) { 2837 device_printf(dev, "Unable to create RX DMA map\n"); 2838 emx_destroy_rx_ring(rdata, i); 2839 return error; 2840 } 2841 } 2842 return (0); 2843 } 2844 2845 static void 2846 emx_free_rx_ring(struct emx_rxdata *rdata) 2847 { 2848 int i; 2849 2850 for (i = 0; i < rdata->num_rx_desc; i++) { 2851 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2852 2853 if (rx_buffer->m_head != NULL) { 2854 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2855 m_freem(rx_buffer->m_head); 2856 rx_buffer->m_head = NULL; 2857 } 2858 } 2859 2860 if (rdata->fmp != NULL) 2861 m_freem(rdata->fmp); 2862 rdata->fmp = NULL; 2863 rdata->lmp = NULL; 2864 } 2865 2866 static void 2867 emx_free_tx_ring(struct emx_txdata *tdata) 2868 { 2869 int i; 2870 2871 for (i = 0; i < tdata->num_tx_desc; i++) { 2872 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2873 2874 if (tx_buffer->m_head != NULL) { 2875 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2876 m_freem(tx_buffer->m_head); 2877 tx_buffer->m_head = NULL; 2878 } 2879 } 2880 2881 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2882 2883 tdata->csum_flags = 0; 2884 tdata->csum_lhlen = 0; 2885 tdata->csum_iphlen = 0; 2886 tdata->csum_thlen = 0; 2887 tdata->csum_mss = 0; 2888 tdata->csum_pktlen = 0; 2889 2890 tdata->tx_dd_head = 0; 2891 tdata->tx_dd_tail = 0; 2892 tdata->tx_nsegs = 0; 2893 } 2894 2895 static int 2896 emx_init_rx_ring(struct emx_rxdata *rdata) 2897 { 2898 int i, error; 2899 2900 /* Reset descriptor ring */ 2901 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2902 2903 /* Allocate new ones. */ 2904 for (i = 0; i < rdata->num_rx_desc; i++) { 2905 error = emx_newbuf(rdata, i, 1); 2906 if (error) 2907 return (error); 2908 } 2909 2910 /* Setup our descriptor pointers */ 2911 rdata->next_rx_desc_to_check = 0; 2912 2913 return (0); 2914 } 2915 2916 static void 2917 emx_init_rx_unit(struct emx_softc *sc) 2918 { 2919 struct ifnet *ifp = &sc->arpcom.ac_if; 2920 uint64_t bus_addr; 2921 uint32_t rctl, itr, rfctl; 2922 int i; 2923 2924 /* 2925 * Make sure receives are disabled while setting 2926 * up the descriptor ring 2927 */ 2928 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2929 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2930 2931 /* 2932 * Set the interrupt throttling rate. Value is calculated 2933 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2934 */ 2935 if (sc->int_throttle_ceil) 2936 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2937 else 2938 itr = 0; 2939 emx_set_itr(sc, itr); 2940 2941 /* Use extended RX descriptor */ 2942 rfctl = E1000_RFCTL_EXTEN; 2943 2944 /* Disable accelerated ackknowledge */ 2945 if (sc->hw.mac.type == e1000_82574) 2946 rfctl |= E1000_RFCTL_ACK_DIS; 2947 2948 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2949 2950 /* 2951 * Receive Checksum Offload for TCP and UDP 2952 * 2953 * Checksum offloading is also enabled if multiple receive 2954 * queue is to be supported, since we need it to figure out 2955 * packet type. 2956 */ 2957 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2958 sc->rx_ring_cnt > 1) { 2959 uint32_t rxcsum; 2960 2961 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2962 2963 /* 2964 * NOTE: 2965 * PCSD must be enabled to enable multiple 2966 * receive queues. 2967 */ 2968 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2969 E1000_RXCSUM_PCSD; 2970 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2971 } 2972 2973 /* 2974 * Configure multiple receive queue (RSS) 2975 */ 2976 if (sc->rx_ring_cnt > 1) { 2977 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2978 int r, j; 2979 2980 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 2981 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 2982 2983 /* 2984 * NOTE: 2985 * When we reach here, RSS has already been disabled 2986 * in emx_stop(), so we could safely configure RSS key 2987 * and redirect table. 2988 */ 2989 2990 /* 2991 * Configure RSS key 2992 */ 2993 toeplitz_get_key(key, sizeof(key)); 2994 for (i = 0; i < EMX_NRSSRK; ++i) { 2995 uint32_t rssrk; 2996 2997 rssrk = EMX_RSSRK_VAL(key, i); 2998 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2999 3000 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3001 } 3002 3003 /* 3004 * Configure RSS redirect table. 3005 */ 3006 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3007 EMX_RDRTABLE_SIZE); 3008 3009 r = 0; 3010 for (j = 0; j < EMX_NRETA; ++j) { 3011 uint32_t reta = 0; 3012 3013 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3014 uint32_t q; 3015 3016 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3017 reta |= q << (8 * i); 3018 ++r; 3019 } 3020 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3021 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3022 } 3023 3024 /* 3025 * Enable multiple receive queues. 3026 * Enable IPv4 RSS standard hash functions. 3027 * Disable RSS interrupt. 3028 */ 3029 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3030 E1000_MRQC_ENABLE_RSS_2Q | 3031 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3032 E1000_MRQC_RSS_FIELD_IPV4); 3033 } 3034 3035 /* 3036 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3037 * long latencies are observed, like Lenovo X60. This 3038 * change eliminates the problem, but since having positive 3039 * values in RDTR is a known source of problems on other 3040 * platforms another solution is being sought. 3041 */ 3042 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3043 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3044 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3045 } 3046 3047 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3048 struct emx_rxdata *rdata = &sc->rx_data[i]; 3049 3050 /* 3051 * Setup the Base and Length of the Rx Descriptor Ring 3052 */ 3053 bus_addr = rdata->rx_desc_paddr; 3054 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3055 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3056 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3057 (uint32_t)(bus_addr >> 32)); 3058 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3059 (uint32_t)bus_addr); 3060 3061 /* 3062 * Setup the HW Rx Head and Tail Descriptor Pointers 3063 */ 3064 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3065 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3066 sc->rx_data[i].num_rx_desc - 1); 3067 } 3068 3069 if (sc->hw.mac.type >= e1000_pch2lan) { 3070 if (ifp->if_mtu > ETHERMTU) 3071 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3072 else 3073 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3074 } 3075 3076 /* Setup the Receive Control Register */ 3077 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3078 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3079 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3080 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3081 3082 /* Make sure VLAN Filters are off */ 3083 rctl &= ~E1000_RCTL_VFE; 3084 3085 /* Don't store bad paket */ 3086 rctl &= ~E1000_RCTL_SBP; 3087 3088 /* MCLBYTES */ 3089 rctl |= E1000_RCTL_SZ_2048; 3090 3091 if (ifp->if_mtu > ETHERMTU) 3092 rctl |= E1000_RCTL_LPE; 3093 else 3094 rctl &= ~E1000_RCTL_LPE; 3095 3096 /* Enable Receives */ 3097 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3098 } 3099 3100 static void 3101 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3102 { 3103 struct emx_rxbuf *rx_buffer; 3104 int i; 3105 3106 /* Free Receive Descriptor ring */ 3107 if (rdata->rx_desc) { 3108 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3109 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3110 rdata->rx_desc_dmap); 3111 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3112 3113 rdata->rx_desc = NULL; 3114 } 3115 3116 if (rdata->rx_buf == NULL) 3117 return; 3118 3119 for (i = 0; i < ndesc; i++) { 3120 rx_buffer = &rdata->rx_buf[i]; 3121 3122 KKASSERT(rx_buffer->m_head == NULL); 3123 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3124 } 3125 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3126 bus_dma_tag_destroy(rdata->rxtag); 3127 3128 kfree(rdata->rx_buf, M_DEVBUF); 3129 rdata->rx_buf = NULL; 3130 } 3131 3132 static void 3133 emx_rxeof(struct emx_rxdata *rdata, int count) 3134 { 3135 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3136 uint32_t staterr; 3137 emx_rxdesc_t *current_desc; 3138 struct mbuf *mp; 3139 int i, cpuid = mycpuid; 3140 3141 i = rdata->next_rx_desc_to_check; 3142 current_desc = &rdata->rx_desc[i]; 3143 staterr = le32toh(current_desc->rxd_staterr); 3144 3145 if (!(staterr & E1000_RXD_STAT_DD)) 3146 return; 3147 3148 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3149 struct pktinfo *pi = NULL, pi0; 3150 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3151 struct mbuf *m = NULL; 3152 int eop, len; 3153 3154 logif(pkt_receive); 3155 3156 mp = rx_buf->m_head; 3157 3158 /* 3159 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3160 * needs to access the last received byte in the mbuf. 3161 */ 3162 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3163 BUS_DMASYNC_POSTREAD); 3164 3165 len = le16toh(current_desc->rxd_length); 3166 if (staterr & E1000_RXD_STAT_EOP) { 3167 count--; 3168 eop = 1; 3169 } else { 3170 eop = 0; 3171 } 3172 3173 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3174 uint16_t vlan = 0; 3175 uint32_t mrq, rss_hash; 3176 3177 /* 3178 * Save several necessary information, 3179 * before emx_newbuf() destroy it. 3180 */ 3181 if ((staterr & E1000_RXD_STAT_VP) && eop) 3182 vlan = le16toh(current_desc->rxd_vlan); 3183 3184 mrq = le32toh(current_desc->rxd_mrq); 3185 rss_hash = le32toh(current_desc->rxd_rss); 3186 3187 EMX_RSS_DPRINTF(rdata->sc, 10, 3188 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3189 rdata->idx, mrq, rss_hash); 3190 3191 if (emx_newbuf(rdata, i, 0) != 0) { 3192 IFNET_STAT_INC(ifp, iqdrops, 1); 3193 goto discard; 3194 } 3195 3196 /* Assign correct length to the current fragment */ 3197 mp->m_len = len; 3198 3199 if (rdata->fmp == NULL) { 3200 mp->m_pkthdr.len = len; 3201 rdata->fmp = mp; /* Store the first mbuf */ 3202 rdata->lmp = mp; 3203 } else { 3204 /* 3205 * Chain mbuf's together 3206 */ 3207 rdata->lmp->m_next = mp; 3208 rdata->lmp = rdata->lmp->m_next; 3209 rdata->fmp->m_pkthdr.len += len; 3210 } 3211 3212 if (eop) { 3213 rdata->fmp->m_pkthdr.rcvif = ifp; 3214 IFNET_STAT_INC(ifp, ipackets, 1); 3215 3216 if (ifp->if_capenable & IFCAP_RXCSUM) 3217 emx_rxcsum(staterr, rdata->fmp); 3218 3219 if (staterr & E1000_RXD_STAT_VP) { 3220 rdata->fmp->m_pkthdr.ether_vlantag = 3221 vlan; 3222 rdata->fmp->m_flags |= M_VLANTAG; 3223 } 3224 m = rdata->fmp; 3225 rdata->fmp = NULL; 3226 rdata->lmp = NULL; 3227 3228 if (ifp->if_capenable & IFCAP_RSS) { 3229 pi = emx_rssinfo(m, &pi0, mrq, 3230 rss_hash, staterr); 3231 } 3232 #ifdef EMX_RSS_DEBUG 3233 rdata->rx_pkts++; 3234 #endif 3235 } 3236 } else { 3237 IFNET_STAT_INC(ifp, ierrors, 1); 3238 discard: 3239 emx_setup_rxdesc(current_desc, rx_buf); 3240 if (rdata->fmp != NULL) { 3241 m_freem(rdata->fmp); 3242 rdata->fmp = NULL; 3243 rdata->lmp = NULL; 3244 } 3245 m = NULL; 3246 } 3247 3248 if (m != NULL) 3249 ifp->if_input(ifp, m, pi, cpuid); 3250 3251 /* Advance our pointers to the next descriptor. */ 3252 if (++i == rdata->num_rx_desc) 3253 i = 0; 3254 3255 current_desc = &rdata->rx_desc[i]; 3256 staterr = le32toh(current_desc->rxd_staterr); 3257 } 3258 rdata->next_rx_desc_to_check = i; 3259 3260 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3261 if (--i < 0) 3262 i = rdata->num_rx_desc - 1; 3263 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3264 } 3265 3266 static void 3267 emx_enable_intr(struct emx_softc *sc) 3268 { 3269 uint32_t ims_mask = IMS_ENABLE_MASK; 3270 3271 lwkt_serialize_handler_enable(&sc->main_serialize); 3272 3273 #if 0 3274 if (sc->hw.mac.type == e1000_82574) { 3275 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3276 ims_mask |= EM_MSIX_MASK; 3277 } 3278 #endif 3279 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3280 } 3281 3282 static void 3283 emx_disable_intr(struct emx_softc *sc) 3284 { 3285 if (sc->hw.mac.type == e1000_82574) 3286 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3287 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3288 3289 lwkt_serialize_handler_disable(&sc->main_serialize); 3290 } 3291 3292 /* 3293 * Bit of a misnomer, what this really means is 3294 * to enable OS management of the system... aka 3295 * to disable special hardware management features 3296 */ 3297 static void 3298 emx_get_mgmt(struct emx_softc *sc) 3299 { 3300 /* A shared code workaround */ 3301 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3302 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3303 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3304 3305 /* disable hardware interception of ARP */ 3306 manc &= ~(E1000_MANC_ARP_EN); 3307 3308 /* enable receiving management packets to the host */ 3309 manc |= E1000_MANC_EN_MNG2HOST; 3310 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3311 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3312 manc2h |= E1000_MNG2HOST_PORT_623; 3313 manc2h |= E1000_MNG2HOST_PORT_664; 3314 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3315 3316 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3317 } 3318 } 3319 3320 /* 3321 * Give control back to hardware management 3322 * controller if there is one. 3323 */ 3324 static void 3325 emx_rel_mgmt(struct emx_softc *sc) 3326 { 3327 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3328 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3329 3330 /* re-enable hardware interception of ARP */ 3331 manc |= E1000_MANC_ARP_EN; 3332 manc &= ~E1000_MANC_EN_MNG2HOST; 3333 3334 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3335 } 3336 } 3337 3338 /* 3339 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3340 * For ASF and Pass Through versions of f/w this means that 3341 * the driver is loaded. For AMT version (only with 82573) 3342 * of the f/w this means that the network i/f is open. 3343 */ 3344 static void 3345 emx_get_hw_control(struct emx_softc *sc) 3346 { 3347 /* Let firmware know the driver has taken over */ 3348 if (sc->hw.mac.type == e1000_82573) { 3349 uint32_t swsm; 3350 3351 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3352 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3353 swsm | E1000_SWSM_DRV_LOAD); 3354 } else { 3355 uint32_t ctrl_ext; 3356 3357 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3358 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3359 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3360 } 3361 sc->flags |= EMX_FLAG_HW_CTRL; 3362 } 3363 3364 /* 3365 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3366 * For ASF and Pass Through versions of f/w this means that the 3367 * driver is no longer loaded. For AMT version (only with 82573) 3368 * of the f/w this means that the network i/f is closed. 3369 */ 3370 static void 3371 emx_rel_hw_control(struct emx_softc *sc) 3372 { 3373 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3374 return; 3375 sc->flags &= ~EMX_FLAG_HW_CTRL; 3376 3377 /* Let firmware taken over control of h/w */ 3378 if (sc->hw.mac.type == e1000_82573) { 3379 uint32_t swsm; 3380 3381 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3382 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3383 swsm & ~E1000_SWSM_DRV_LOAD); 3384 } else { 3385 uint32_t ctrl_ext; 3386 3387 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3388 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3389 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3390 } 3391 } 3392 3393 static int 3394 emx_is_valid_eaddr(const uint8_t *addr) 3395 { 3396 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3397 3398 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3399 return (FALSE); 3400 3401 return (TRUE); 3402 } 3403 3404 /* 3405 * Enable PCI Wake On Lan capability 3406 */ 3407 void 3408 emx_enable_wol(device_t dev) 3409 { 3410 uint16_t cap, status; 3411 uint8_t id; 3412 3413 /* First find the capabilities pointer*/ 3414 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3415 3416 /* Read the PM Capabilities */ 3417 id = pci_read_config(dev, cap, 1); 3418 if (id != PCIY_PMG) /* Something wrong */ 3419 return; 3420 3421 /* 3422 * OK, we have the power capabilities, 3423 * so now get the status register 3424 */ 3425 cap += PCIR_POWER_STATUS; 3426 status = pci_read_config(dev, cap, 2); 3427 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3428 pci_write_config(dev, cap, status, 2); 3429 } 3430 3431 static void 3432 emx_update_stats(struct emx_softc *sc) 3433 { 3434 struct ifnet *ifp = &sc->arpcom.ac_if; 3435 3436 if (sc->hw.phy.media_type == e1000_media_type_copper || 3437 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3438 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3439 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3440 } 3441 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3442 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3443 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3444 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3445 3446 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3447 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3448 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3449 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3450 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3451 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3452 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3453 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3454 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3455 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3456 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3457 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3458 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3459 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3460 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3461 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3462 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3463 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3464 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3465 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3466 3467 /* For the 64-bit byte counters the low dword must be read first. */ 3468 /* Both registers clear on the read of the high dword */ 3469 3470 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3471 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3472 3473 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3474 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3475 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3476 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3477 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3478 3479 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3480 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3481 3482 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3483 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3484 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3485 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3486 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3487 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3488 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3489 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3490 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3491 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3492 3493 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3494 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3495 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3496 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3497 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3498 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3499 3500 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3501 3502 /* Rx Errors */ 3503 IFNET_STAT_SET(ifp, ierrors, 3504 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3505 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3506 3507 /* Tx Errors */ 3508 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3509 } 3510 3511 static void 3512 emx_print_debug_info(struct emx_softc *sc) 3513 { 3514 device_t dev = sc->dev; 3515 uint8_t *hw_addr = sc->hw.hw_addr; 3516 int i; 3517 3518 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3519 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3520 E1000_READ_REG(&sc->hw, E1000_CTRL), 3521 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3522 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3523 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3524 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3525 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3526 sc->hw.fc.high_water, sc->hw.fc.low_water); 3527 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3528 E1000_READ_REG(&sc->hw, E1000_TIDV), 3529 E1000_READ_REG(&sc->hw, E1000_TADV)); 3530 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3531 E1000_READ_REG(&sc->hw, E1000_RDTR), 3532 E1000_READ_REG(&sc->hw, E1000_RADV)); 3533 3534 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3535 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3536 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3537 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3538 } 3539 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3540 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3541 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3542 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3543 } 3544 3545 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3546 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3547 sc->tx_data[i].num_tx_desc_avail); 3548 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3549 sc->tx_data[i].tso_segments); 3550 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3551 sc->tx_data[i].tso_ctx_reused); 3552 } 3553 } 3554 3555 static void 3556 emx_print_hw_stats(struct emx_softc *sc) 3557 { 3558 device_t dev = sc->dev; 3559 3560 device_printf(dev, "Excessive collisions = %lld\n", 3561 (long long)sc->stats.ecol); 3562 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3563 device_printf(dev, "Symbol errors = %lld\n", 3564 (long long)sc->stats.symerrs); 3565 #endif 3566 device_printf(dev, "Sequence errors = %lld\n", 3567 (long long)sc->stats.sec); 3568 device_printf(dev, "Defer count = %lld\n", 3569 (long long)sc->stats.dc); 3570 device_printf(dev, "Missed Packets = %lld\n", 3571 (long long)sc->stats.mpc); 3572 device_printf(dev, "Receive No Buffers = %lld\n", 3573 (long long)sc->stats.rnbc); 3574 /* RLEC is inaccurate on some hardware, calculate our own. */ 3575 device_printf(dev, "Receive Length Errors = %lld\n", 3576 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3577 device_printf(dev, "Receive errors = %lld\n", 3578 (long long)sc->stats.rxerrc); 3579 device_printf(dev, "Crc errors = %lld\n", 3580 (long long)sc->stats.crcerrs); 3581 device_printf(dev, "Alignment errors = %lld\n", 3582 (long long)sc->stats.algnerrc); 3583 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3584 (long long)sc->stats.cexterr); 3585 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3586 device_printf(dev, "XON Rcvd = %lld\n", 3587 (long long)sc->stats.xonrxc); 3588 device_printf(dev, "XON Xmtd = %lld\n", 3589 (long long)sc->stats.xontxc); 3590 device_printf(dev, "XOFF Rcvd = %lld\n", 3591 (long long)sc->stats.xoffrxc); 3592 device_printf(dev, "XOFF Xmtd = %lld\n", 3593 (long long)sc->stats.xofftxc); 3594 device_printf(dev, "Good Packets Rcvd = %lld\n", 3595 (long long)sc->stats.gprc); 3596 device_printf(dev, "Good Packets Xmtd = %lld\n", 3597 (long long)sc->stats.gptc); 3598 } 3599 3600 static void 3601 emx_print_nvm_info(struct emx_softc *sc) 3602 { 3603 uint16_t eeprom_data; 3604 int i, j, row = 0; 3605 3606 /* Its a bit crude, but it gets the job done */ 3607 kprintf("\nInterface EEPROM Dump:\n"); 3608 kprintf("Offset\n0x0000 "); 3609 for (i = 0, j = 0; i < 32; i++, j++) { 3610 if (j == 8) { /* Make the offset block */ 3611 j = 0; ++row; 3612 kprintf("\n0x00%x0 ",row); 3613 } 3614 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3615 kprintf("%04x ", eeprom_data); 3616 } 3617 kprintf("\n"); 3618 } 3619 3620 static int 3621 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3622 { 3623 struct emx_softc *sc; 3624 struct ifnet *ifp; 3625 int error, result; 3626 3627 result = -1; 3628 error = sysctl_handle_int(oidp, &result, 0, req); 3629 if (error || !req->newptr) 3630 return (error); 3631 3632 sc = (struct emx_softc *)arg1; 3633 ifp = &sc->arpcom.ac_if; 3634 3635 ifnet_serialize_all(ifp); 3636 3637 if (result == 1) 3638 emx_print_debug_info(sc); 3639 3640 /* 3641 * This value will cause a hex dump of the 3642 * first 32 16-bit words of the EEPROM to 3643 * the screen. 3644 */ 3645 if (result == 2) 3646 emx_print_nvm_info(sc); 3647 3648 ifnet_deserialize_all(ifp); 3649 3650 return (error); 3651 } 3652 3653 static int 3654 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3655 { 3656 int error, result; 3657 3658 result = -1; 3659 error = sysctl_handle_int(oidp, &result, 0, req); 3660 if (error || !req->newptr) 3661 return (error); 3662 3663 if (result == 1) { 3664 struct emx_softc *sc = (struct emx_softc *)arg1; 3665 struct ifnet *ifp = &sc->arpcom.ac_if; 3666 3667 ifnet_serialize_all(ifp); 3668 emx_print_hw_stats(sc); 3669 ifnet_deserialize_all(ifp); 3670 } 3671 return (error); 3672 } 3673 3674 static void 3675 emx_add_sysctl(struct emx_softc *sc) 3676 { 3677 struct sysctl_ctx_list *ctx; 3678 struct sysctl_oid *tree; 3679 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3680 char pkt_desc[32]; 3681 int i; 3682 #endif 3683 3684 ctx = device_get_sysctl_ctx(sc->dev); 3685 tree = device_get_sysctl_tree(sc->dev); 3686 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3687 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3688 emx_sysctl_debug_info, "I", "Debug Information"); 3689 3690 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3691 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3692 emx_sysctl_stats, "I", "Statistics"); 3693 3694 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3695 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3696 "# of RX descs"); 3697 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3698 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3699 "# of TX descs"); 3700 3701 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3702 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3703 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3704 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3705 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3706 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3707 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3708 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3709 emx_sysctl_tx_wreg_nsegs, "I", 3710 "# segments sent before write to hardware register"); 3711 3712 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3713 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3714 "# of RX rings"); 3715 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3716 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3717 "# of TX rings"); 3718 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3719 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3720 "# of TX rings used"); 3721 3722 #ifdef IFPOLL_ENABLE 3723 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3724 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3725 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3726 "TX polling CPU map"); 3727 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3728 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3729 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3730 "RX polling CPU map"); 3731 #endif 3732 3733 #ifdef EMX_RSS_DEBUG 3734 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3735 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3736 0, "RSS debug level"); 3737 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3738 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3739 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3740 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3741 "RXed packets"); 3742 } 3743 #endif 3744 #ifdef EMX_TSS_DEBUG 3745 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3746 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3747 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3748 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3749 "TXed packets"); 3750 } 3751 #endif 3752 } 3753 3754 static int 3755 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3756 { 3757 struct emx_softc *sc = (void *)arg1; 3758 struct ifnet *ifp = &sc->arpcom.ac_if; 3759 int error, throttle; 3760 3761 throttle = sc->int_throttle_ceil; 3762 error = sysctl_handle_int(oidp, &throttle, 0, req); 3763 if (error || req->newptr == NULL) 3764 return error; 3765 if (throttle < 0 || throttle > 1000000000 / 256) 3766 return EINVAL; 3767 3768 if (throttle) { 3769 /* 3770 * Set the interrupt throttling rate in 256ns increments, 3771 * recalculate sysctl value assignment to get exact frequency. 3772 */ 3773 throttle = 1000000000 / 256 / throttle; 3774 3775 /* Upper 16bits of ITR is reserved and should be zero */ 3776 if (throttle & 0xffff0000) 3777 return EINVAL; 3778 } 3779 3780 ifnet_serialize_all(ifp); 3781 3782 if (throttle) 3783 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3784 else 3785 sc->int_throttle_ceil = 0; 3786 3787 if (ifp->if_flags & IFF_RUNNING) 3788 emx_set_itr(sc, throttle); 3789 3790 ifnet_deserialize_all(ifp); 3791 3792 if (bootverbose) { 3793 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3794 sc->int_throttle_ceil); 3795 } 3796 return 0; 3797 } 3798 3799 static int 3800 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3801 { 3802 struct emx_softc *sc = (void *)arg1; 3803 struct ifnet *ifp = &sc->arpcom.ac_if; 3804 struct emx_txdata *tdata = &sc->tx_data[0]; 3805 int error, segs; 3806 3807 segs = tdata->tx_intr_nsegs; 3808 error = sysctl_handle_int(oidp, &segs, 0, req); 3809 if (error || req->newptr == NULL) 3810 return error; 3811 if (segs <= 0) 3812 return EINVAL; 3813 3814 ifnet_serialize_all(ifp); 3815 3816 /* 3817 * Don't allow tx_intr_nsegs to become: 3818 * o Less the oact_tx_desc 3819 * o Too large that no TX desc will cause TX interrupt to 3820 * be generated (OACTIVE will never recover) 3821 * o Too small that will cause tx_dd[] overflow 3822 */ 3823 if (segs < tdata->oact_tx_desc || 3824 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3825 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3826 error = EINVAL; 3827 } else { 3828 int i; 3829 3830 error = 0; 3831 for (i = 0; i < sc->tx_ring_cnt; ++i) 3832 sc->tx_data[i].tx_intr_nsegs = segs; 3833 } 3834 3835 ifnet_deserialize_all(ifp); 3836 3837 return error; 3838 } 3839 3840 static int 3841 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3842 { 3843 struct emx_softc *sc = (void *)arg1; 3844 struct ifnet *ifp = &sc->arpcom.ac_if; 3845 int error, nsegs, i; 3846 3847 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3848 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3849 if (error || req->newptr == NULL) 3850 return error; 3851 3852 ifnet_serialize_all(ifp); 3853 for (i = 0; i < sc->tx_ring_cnt; ++i) 3854 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3855 ifnet_deserialize_all(ifp); 3856 3857 return 0; 3858 } 3859 3860 static int 3861 emx_dma_alloc(struct emx_softc *sc) 3862 { 3863 int error, i; 3864 3865 /* 3866 * Create top level busdma tag 3867 */ 3868 error = bus_dma_tag_create(NULL, 1, 0, 3869 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3870 NULL, NULL, 3871 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3872 0, &sc->parent_dtag); 3873 if (error) { 3874 device_printf(sc->dev, "could not create top level DMA tag\n"); 3875 return error; 3876 } 3877 3878 /* 3879 * Allocate transmit descriptors ring and buffers 3880 */ 3881 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3882 error = emx_create_tx_ring(&sc->tx_data[i]); 3883 if (error) { 3884 device_printf(sc->dev, 3885 "Could not setup transmit structures\n"); 3886 return error; 3887 } 3888 } 3889 3890 /* 3891 * Allocate receive descriptors ring and buffers 3892 */ 3893 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3894 error = emx_create_rx_ring(&sc->rx_data[i]); 3895 if (error) { 3896 device_printf(sc->dev, 3897 "Could not setup receive structures\n"); 3898 return error; 3899 } 3900 } 3901 return 0; 3902 } 3903 3904 static void 3905 emx_dma_free(struct emx_softc *sc) 3906 { 3907 int i; 3908 3909 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3910 emx_destroy_tx_ring(&sc->tx_data[i], 3911 sc->tx_data[i].num_tx_desc); 3912 } 3913 3914 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3915 emx_destroy_rx_ring(&sc->rx_data[i], 3916 sc->rx_data[i].num_rx_desc); 3917 } 3918 3919 /* Free top level busdma tag */ 3920 if (sc->parent_dtag != NULL) 3921 bus_dma_tag_destroy(sc->parent_dtag); 3922 } 3923 3924 static void 3925 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3926 { 3927 struct emx_softc *sc = ifp->if_softc; 3928 3929 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 3930 } 3931 3932 static void 3933 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3934 { 3935 struct emx_softc *sc = ifp->if_softc; 3936 3937 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 3938 } 3939 3940 static int 3941 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3942 { 3943 struct emx_softc *sc = ifp->if_softc; 3944 3945 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 3946 } 3947 3948 static void 3949 emx_serialize_skipmain(struct emx_softc *sc) 3950 { 3951 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3952 } 3953 3954 static void 3955 emx_deserialize_skipmain(struct emx_softc *sc) 3956 { 3957 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3958 } 3959 3960 #ifdef INVARIANTS 3961 3962 static void 3963 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3964 boolean_t serialized) 3965 { 3966 struct emx_softc *sc = ifp->if_softc; 3967 3968 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 3969 slz, serialized); 3970 } 3971 3972 #endif /* INVARIANTS */ 3973 3974 #ifdef IFPOLL_ENABLE 3975 3976 static void 3977 emx_npoll_status(struct ifnet *ifp) 3978 { 3979 struct emx_softc *sc = ifp->if_softc; 3980 uint32_t reg_icr; 3981 3982 ASSERT_SERIALIZED(&sc->main_serialize); 3983 3984 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3985 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3986 callout_stop(&sc->timer); 3987 sc->hw.mac.get_link_status = 1; 3988 emx_update_link_status(sc); 3989 callout_reset(&sc->timer, hz, emx_timer, sc); 3990 } 3991 } 3992 3993 static void 3994 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3995 { 3996 struct emx_txdata *tdata = arg; 3997 3998 ASSERT_SERIALIZED(&tdata->tx_serialize); 3999 4000 emx_txeof(tdata); 4001 if (!ifsq_is_empty(tdata->ifsq)) 4002 ifsq_devstart(tdata->ifsq); 4003 } 4004 4005 static void 4006 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4007 { 4008 struct emx_rxdata *rdata = arg; 4009 4010 ASSERT_SERIALIZED(&rdata->rx_serialize); 4011 4012 emx_rxeof(rdata, cycle); 4013 } 4014 4015 static void 4016 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4017 { 4018 struct emx_softc *sc = ifp->if_softc; 4019 int i, txr_cnt; 4020 4021 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4022 4023 if (info) { 4024 int cpu; 4025 4026 info->ifpi_status.status_func = emx_npoll_status; 4027 info->ifpi_status.serializer = &sc->main_serialize; 4028 4029 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4030 for (i = 0; i < txr_cnt; ++i) { 4031 struct emx_txdata *tdata = &sc->tx_data[i]; 4032 4033 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4034 KKASSERT(cpu < netisr_ncpus); 4035 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4036 info->ifpi_tx[cpu].arg = tdata; 4037 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4038 ifsq_set_cpuid(tdata->ifsq, cpu); 4039 } 4040 4041 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4042 struct emx_rxdata *rdata = &sc->rx_data[i]; 4043 4044 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4045 KKASSERT(cpu < netisr_ncpus); 4046 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4047 info->ifpi_rx[cpu].arg = rdata; 4048 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4049 } 4050 } else { 4051 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4052 struct emx_txdata *tdata = &sc->tx_data[i]; 4053 4054 ifsq_set_cpuid(tdata->ifsq, 4055 rman_get_cpuid(sc->intr_res)); 4056 } 4057 } 4058 if (ifp->if_flags & IFF_RUNNING) 4059 emx_init(sc); 4060 } 4061 4062 #endif /* IFPOLL_ENABLE */ 4063 4064 static void 4065 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4066 { 4067 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4068 if (sc->hw.mac.type == e1000_82574) { 4069 int i; 4070 4071 /* 4072 * When using MSIX interrupts we need to 4073 * throttle using the EITR register 4074 */ 4075 for (i = 0; i < 4; ++i) 4076 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4077 } 4078 } 4079 4080 /* 4081 * Disable the L0s, 82574L Errata #20 4082 */ 4083 static void 4084 emx_disable_aspm(struct emx_softc *sc) 4085 { 4086 uint16_t link_cap, link_ctrl, disable; 4087 uint8_t pcie_ptr, reg; 4088 device_t dev = sc->dev; 4089 4090 switch (sc->hw.mac.type) { 4091 case e1000_82571: 4092 case e1000_82572: 4093 case e1000_82573: 4094 /* 4095 * 82573 specification update 4096 * errata #8 disable L0s 4097 * errata #41 disable L1 4098 * 4099 * 82571/82572 specification update 4100 # errata #13 disable L1 4101 * errata #68 disable L0s 4102 */ 4103 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4104 break; 4105 4106 case e1000_82574: 4107 /* 4108 * 82574 specification update errata #20 4109 * 4110 * There is no need to disable L1 4111 */ 4112 disable = PCIEM_LNKCTL_ASPM_L0S; 4113 break; 4114 4115 default: 4116 return; 4117 } 4118 4119 pcie_ptr = pci_get_pciecap_ptr(dev); 4120 if (pcie_ptr == 0) 4121 return; 4122 4123 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4124 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4125 return; 4126 4127 if (bootverbose) 4128 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4129 4130 reg = pcie_ptr + PCIER_LINKCTRL; 4131 link_ctrl = pci_read_config(dev, reg, 2); 4132 link_ctrl &= ~disable; 4133 pci_write_config(dev, reg, link_ctrl, 2); 4134 } 4135 4136 static int 4137 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4138 { 4139 int iphlen, hoff, thoff, ex = 0; 4140 struct mbuf *m; 4141 struct ip *ip; 4142 4143 m = *mp; 4144 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4145 4146 iphlen = m->m_pkthdr.csum_iphlen; 4147 thoff = m->m_pkthdr.csum_thlen; 4148 hoff = m->m_pkthdr.csum_lhlen; 4149 4150 KASSERT(iphlen > 0, ("invalid ip hlen")); 4151 KASSERT(thoff > 0, ("invalid tcp hlen")); 4152 KASSERT(hoff > 0, ("invalid ether hlen")); 4153 4154 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4155 ex = 4; 4156 4157 if (m->m_len < hoff + iphlen + thoff + ex) { 4158 m = m_pullup(m, hoff + iphlen + thoff + ex); 4159 if (m == NULL) { 4160 *mp = NULL; 4161 return ENOBUFS; 4162 } 4163 *mp = m; 4164 } 4165 ip = mtodoff(m, struct ip *, hoff); 4166 ip->ip_len = 0; 4167 4168 return 0; 4169 } 4170 4171 static int 4172 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4173 uint32_t *txd_upper, uint32_t *txd_lower) 4174 { 4175 struct e1000_context_desc *TXD; 4176 int hoff, iphlen, thoff, hlen; 4177 int mss, pktlen, curr_txd; 4178 4179 #ifdef EMX_TSO_DEBUG 4180 tdata->tso_segments++; 4181 #endif 4182 4183 iphlen = mp->m_pkthdr.csum_iphlen; 4184 thoff = mp->m_pkthdr.csum_thlen; 4185 hoff = mp->m_pkthdr.csum_lhlen; 4186 mss = mp->m_pkthdr.tso_segsz; 4187 pktlen = mp->m_pkthdr.len; 4188 4189 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4190 tdata->csum_flags == CSUM_TSO && 4191 tdata->csum_iphlen == iphlen && 4192 tdata->csum_lhlen == hoff && 4193 tdata->csum_thlen == thoff && 4194 tdata->csum_mss == mss && 4195 tdata->csum_pktlen == pktlen) { 4196 *txd_upper = tdata->csum_txd_upper; 4197 *txd_lower = tdata->csum_txd_lower; 4198 #ifdef EMX_TSO_DEBUG 4199 tdata->tso_ctx_reused++; 4200 #endif 4201 return 0; 4202 } 4203 hlen = hoff + iphlen + thoff; 4204 4205 /* 4206 * Setup a new TSO context. 4207 */ 4208 4209 curr_txd = tdata->next_avail_tx_desc; 4210 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4211 4212 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4213 E1000_TXD_DTYP_D | /* Data descr type */ 4214 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4215 4216 /* IP and/or TCP header checksum calculation and insertion. */ 4217 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4218 4219 /* 4220 * Start offset for header checksum calculation. 4221 * End offset for header checksum calculation. 4222 * Offset of place put the checksum. 4223 */ 4224 TXD->lower_setup.ip_fields.ipcss = hoff; 4225 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4226 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4227 4228 /* 4229 * Start offset for payload checksum calculation. 4230 * End offset for payload checksum calculation. 4231 * Offset of place to put the checksum. 4232 */ 4233 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4234 TXD->upper_setup.tcp_fields.tucse = 0; 4235 TXD->upper_setup.tcp_fields.tucso = 4236 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4237 4238 /* 4239 * Payload size per packet w/o any headers. 4240 * Length of all headers up to payload. 4241 */ 4242 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4243 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4244 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4245 E1000_TXD_CMD_DEXT | /* Extended descr */ 4246 E1000_TXD_CMD_TSE | /* TSE context */ 4247 E1000_TXD_CMD_IP | /* Do IP csum */ 4248 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4249 (pktlen - hlen)); /* Total len */ 4250 4251 /* Save the information for this TSO context */ 4252 tdata->csum_flags = CSUM_TSO; 4253 tdata->csum_lhlen = hoff; 4254 tdata->csum_iphlen = iphlen; 4255 tdata->csum_thlen = thoff; 4256 tdata->csum_mss = mss; 4257 tdata->csum_pktlen = pktlen; 4258 tdata->csum_txd_upper = *txd_upper; 4259 tdata->csum_txd_lower = *txd_lower; 4260 4261 if (++curr_txd == tdata->num_tx_desc) 4262 curr_txd = 0; 4263 4264 KKASSERT(tdata->num_tx_desc_avail > 0); 4265 tdata->num_tx_desc_avail--; 4266 4267 tdata->next_avail_tx_desc = curr_txd; 4268 return 1; 4269 } 4270 4271 static int 4272 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4273 { 4274 if (polling) 4275 return sc->tx_ring_cnt; 4276 else 4277 return 1; 4278 } 4279