1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/ig_hal/e1000_dragonfly.h> 112 #include <dev/netif/emx/if_emx.h> 113 114 #define DEBUG_HW 0 115 116 #ifdef EMX_RSS_DEBUG 117 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 118 do { \ 119 if (sc->rss_debug >= lvl) \ 120 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 121 } while (0) 122 #else /* !EMX_RSS_DEBUG */ 123 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 124 #endif /* EMX_RSS_DEBUG */ 125 126 #define EMX_NAME "Intel(R) PRO/1000 " 127 128 #define EMX_DEVICE(id) \ 129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 130 #define EMX_DEVICE_NULL { 0, 0, NULL } 131 132 static const struct emx_device { 133 uint16_t vid; 134 uint16_t did; 135 const char *desc; 136 } emx_devices[] = { 137 EMX_DEVICE(82571EB_COPPER), 138 EMX_DEVICE(82571EB_FIBER), 139 EMX_DEVICE(82571EB_SERDES), 140 EMX_DEVICE(82571EB_SERDES_DUAL), 141 EMX_DEVICE(82571EB_SERDES_QUAD), 142 EMX_DEVICE(82571EB_QUAD_COPPER), 143 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 144 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 145 EMX_DEVICE(82571EB_QUAD_FIBER), 146 EMX_DEVICE(82571PT_QUAD_COPPER), 147 148 EMX_DEVICE(82572EI_COPPER), 149 EMX_DEVICE(82572EI_FIBER), 150 EMX_DEVICE(82572EI_SERDES), 151 EMX_DEVICE(82572EI), 152 153 EMX_DEVICE(82573E), 154 EMX_DEVICE(82573E_IAMT), 155 EMX_DEVICE(82573L), 156 157 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 158 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 159 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 160 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 161 162 EMX_DEVICE(82574L), 163 EMX_DEVICE(82574LA), 164 165 EMX_DEVICE(PCH_LPT_I217_LM), 166 EMX_DEVICE(PCH_LPT_I217_V), 167 EMX_DEVICE(PCH_LPTLP_I218_LM), 168 EMX_DEVICE(PCH_LPTLP_I218_V), 169 EMX_DEVICE(PCH_I218_LM2), 170 EMX_DEVICE(PCH_I218_V2), 171 EMX_DEVICE(PCH_I218_LM3), 172 EMX_DEVICE(PCH_I218_V3), 173 EMX_DEVICE(PCH_SPT_I219_LM), 174 EMX_DEVICE(PCH_SPT_I219_V), 175 EMX_DEVICE(PCH_SPT_I219_LM2), 176 EMX_DEVICE(PCH_SPT_I219_V2), 177 178 /* required last entry */ 179 EMX_DEVICE_NULL 180 }; 181 182 static int emx_probe(device_t); 183 static int emx_attach(device_t); 184 static int emx_detach(device_t); 185 static int emx_shutdown(device_t); 186 static int emx_suspend(device_t); 187 static int emx_resume(device_t); 188 189 static void emx_init(void *); 190 static void emx_stop(struct emx_softc *); 191 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 192 static void emx_start(struct ifnet *, struct ifaltq_subque *); 193 #ifdef IFPOLL_ENABLE 194 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 195 static void emx_npoll_status(struct ifnet *); 196 static void emx_npoll_tx(struct ifnet *, void *, int); 197 static void emx_npoll_rx(struct ifnet *, void *, int); 198 #endif 199 static void emx_watchdog(struct ifaltq_subque *); 200 static void emx_media_status(struct ifnet *, struct ifmediareq *); 201 static int emx_media_change(struct ifnet *); 202 static void emx_timer(void *); 203 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 204 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 205 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 206 #ifdef INVARIANTS 207 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 208 boolean_t); 209 #endif 210 211 static void emx_intr(void *); 212 static void emx_intr_mask(void *); 213 static void emx_intr_body(struct emx_softc *, boolean_t); 214 static void emx_rxeof(struct emx_rxdata *, int); 215 static void emx_txeof(struct emx_txdata *); 216 static void emx_tx_collect(struct emx_txdata *); 217 static void emx_tx_purge(struct emx_softc *); 218 static void emx_enable_intr(struct emx_softc *); 219 static void emx_disable_intr(struct emx_softc *); 220 221 static int emx_dma_alloc(struct emx_softc *); 222 static void emx_dma_free(struct emx_softc *); 223 static void emx_init_tx_ring(struct emx_txdata *); 224 static int emx_init_rx_ring(struct emx_rxdata *); 225 static void emx_free_tx_ring(struct emx_txdata *); 226 static void emx_free_rx_ring(struct emx_rxdata *); 227 static int emx_create_tx_ring(struct emx_txdata *); 228 static int emx_create_rx_ring(struct emx_rxdata *); 229 static void emx_destroy_tx_ring(struct emx_txdata *, int); 230 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 231 static int emx_newbuf(struct emx_rxdata *, int, int); 232 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 233 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 234 uint32_t *, uint32_t *); 235 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 236 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 237 uint32_t *, uint32_t *); 238 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 239 240 static int emx_is_valid_eaddr(const uint8_t *); 241 static int emx_reset(struct emx_softc *); 242 static void emx_setup_ifp(struct emx_softc *); 243 static void emx_init_tx_unit(struct emx_softc *); 244 static void emx_init_rx_unit(struct emx_softc *); 245 static void emx_update_stats(struct emx_softc *); 246 static void emx_set_promisc(struct emx_softc *); 247 static void emx_disable_promisc(struct emx_softc *); 248 static void emx_set_multi(struct emx_softc *); 249 static void emx_update_link_status(struct emx_softc *); 250 static void emx_smartspeed(struct emx_softc *); 251 static void emx_set_itr(struct emx_softc *, uint32_t); 252 static void emx_disable_aspm(struct emx_softc *); 253 254 static void emx_print_debug_info(struct emx_softc *); 255 static void emx_print_nvm_info(struct emx_softc *); 256 static void emx_print_hw_stats(struct emx_softc *); 257 258 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 259 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 261 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 262 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 263 #ifdef IFPOLL_ENABLE 264 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 265 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 266 #endif 267 static void emx_add_sysctl(struct emx_softc *); 268 269 static void emx_serialize_skipmain(struct emx_softc *); 270 static void emx_deserialize_skipmain(struct emx_softc *); 271 272 /* Management and WOL Support */ 273 static void emx_get_mgmt(struct emx_softc *); 274 static void emx_rel_mgmt(struct emx_softc *); 275 static void emx_get_hw_control(struct emx_softc *); 276 static void emx_rel_hw_control(struct emx_softc *); 277 static void emx_enable_wol(device_t); 278 279 static device_method_t emx_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, emx_probe), 282 DEVMETHOD(device_attach, emx_attach), 283 DEVMETHOD(device_detach, emx_detach), 284 DEVMETHOD(device_shutdown, emx_shutdown), 285 DEVMETHOD(device_suspend, emx_suspend), 286 DEVMETHOD(device_resume, emx_resume), 287 DEVMETHOD_END 288 }; 289 290 static driver_t emx_driver = { 291 "emx", 292 emx_methods, 293 sizeof(struct emx_softc), 294 }; 295 296 static devclass_t emx_devclass; 297 298 DECLARE_DUMMY_MODULE(if_emx); 299 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 300 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 301 302 /* 303 * Tunables 304 */ 305 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 306 static int emx_rxd = EMX_DEFAULT_RXD; 307 static int emx_txd = EMX_DEFAULT_TXD; 308 static int emx_smart_pwr_down = 0; 309 static int emx_rxr = 0; 310 static int emx_txr = 1; 311 312 /* Controls whether promiscuous also shows bad packets */ 313 static int emx_debug_sbp = 0; 314 315 static int emx_82573_workaround = 1; 316 static int emx_msi_enable = 1; 317 318 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 319 320 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 321 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 322 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 323 TUNABLE_INT("hw.emx.txd", &emx_txd); 324 TUNABLE_INT("hw.emx.txr", &emx_txr); 325 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 326 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 327 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 328 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 329 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 330 331 /* Global used in WOL setup with multiport cards */ 332 static int emx_global_quad_port_a = 0; 333 334 /* Set this to one to display debug statistics */ 335 static int emx_display_debug_stats = 0; 336 337 #if !defined(KTR_IF_EMX) 338 #define KTR_IF_EMX KTR_ALL 339 #endif 340 KTR_INFO_MASTER(if_emx); 341 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 342 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 343 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 344 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 345 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 346 #define logif(name) KTR_LOG(if_emx_ ## name) 347 348 static __inline void 349 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 350 { 351 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 352 /* DD bit must be cleared */ 353 rxd->rxd_staterr = 0; 354 } 355 356 static __inline void 357 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 358 { 359 /* Ignore Checksum bit is set */ 360 if (staterr & E1000_RXD_STAT_IXSM) 361 return; 362 363 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 364 E1000_RXD_STAT_IPCS) 365 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 366 367 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 368 E1000_RXD_STAT_TCPCS) { 369 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 370 CSUM_PSEUDO_HDR | 371 CSUM_FRAG_NOT_CHECKED; 372 mp->m_pkthdr.csum_data = htons(0xffff); 373 } 374 } 375 376 static __inline struct pktinfo * 377 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 378 uint32_t mrq, uint32_t hash, uint32_t staterr) 379 { 380 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 381 case EMX_RXDMRQ_IPV4_TCP: 382 pi->pi_netisr = NETISR_IP; 383 pi->pi_flags = 0; 384 pi->pi_l3proto = IPPROTO_TCP; 385 break; 386 387 case EMX_RXDMRQ_IPV6_TCP: 388 pi->pi_netisr = NETISR_IPV6; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_TCP; 391 break; 392 393 case EMX_RXDMRQ_IPV4: 394 if (staterr & E1000_RXD_STAT_IXSM) 395 return NULL; 396 397 if ((staterr & 398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 399 E1000_RXD_STAT_TCPCS) { 400 pi->pi_netisr = NETISR_IP; 401 pi->pi_flags = 0; 402 pi->pi_l3proto = IPPROTO_UDP; 403 break; 404 } 405 /* FALL THROUGH */ 406 default: 407 return NULL; 408 } 409 410 m->m_flags |= M_HASH; 411 m->m_pkthdr.hash = toeplitz_hash(hash); 412 return pi; 413 } 414 415 static int 416 emx_probe(device_t dev) 417 { 418 const struct emx_device *d; 419 uint16_t vid, did; 420 421 vid = pci_get_vendor(dev); 422 did = pci_get_device(dev); 423 424 for (d = emx_devices; d->desc != NULL; ++d) { 425 if (vid == d->vid && did == d->did) { 426 device_set_desc(dev, d->desc); 427 device_set_async_attach(dev, TRUE); 428 return 0; 429 } 430 } 431 return ENXIO; 432 } 433 434 static int 435 emx_attach(device_t dev) 436 { 437 struct emx_softc *sc = device_get_softc(dev); 438 int error = 0, i, throttle, msi_enable, tx_ring_max; 439 u_int intr_flags; 440 uint16_t eeprom_data, device_id, apme_mask; 441 driver_intr_t *intr_func; 442 char flowctrl[IFM_ETH_FC_STRLEN]; 443 #ifdef IFPOLL_ENABLE 444 int offset, offset_def; 445 #endif 446 447 /* 448 * Setup RX rings 449 */ 450 for (i = 0; i < EMX_NRX_RING; ++i) { 451 sc->rx_data[i].sc = sc; 452 sc->rx_data[i].idx = i; 453 } 454 455 /* 456 * Setup TX ring 457 */ 458 for (i = 0; i < EMX_NTX_RING; ++i) { 459 sc->tx_data[i].sc = sc; 460 sc->tx_data[i].idx = i; 461 } 462 463 /* 464 * Initialize serializers 465 */ 466 lwkt_serialize_init(&sc->main_serialize); 467 for (i = 0; i < EMX_NTX_RING; ++i) 468 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 469 for (i = 0; i < EMX_NRX_RING; ++i) 470 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 471 472 /* 473 * Initialize serializer array 474 */ 475 i = 0; 476 477 KKASSERT(i < EMX_NSERIALIZE); 478 sc->serializes[i++] = &sc->main_serialize; 479 480 KKASSERT(i < EMX_NSERIALIZE); 481 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 482 KKASSERT(i < EMX_NSERIALIZE); 483 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 484 485 KKASSERT(i < EMX_NSERIALIZE); 486 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 487 KKASSERT(i < EMX_NSERIALIZE); 488 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 489 490 KKASSERT(i == EMX_NSERIALIZE); 491 492 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 493 emx_media_change, emx_media_status); 494 callout_init_mp(&sc->timer); 495 496 sc->dev = sc->osdep.dev = dev; 497 498 /* 499 * Determine hardware and mac type 500 */ 501 sc->hw.vendor_id = pci_get_vendor(dev); 502 sc->hw.device_id = pci_get_device(dev); 503 sc->hw.revision_id = pci_get_revid(dev); 504 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 505 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 506 507 if (e1000_set_mac_type(&sc->hw)) 508 return ENXIO; 509 510 /* Enable bus mastering */ 511 pci_enable_busmaster(dev); 512 513 /* 514 * Allocate IO memory 515 */ 516 sc->memory_rid = EMX_BAR_MEM; 517 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 518 &sc->memory_rid, RF_ACTIVE); 519 if (sc->memory == NULL) { 520 device_printf(dev, "Unable to allocate bus resource: memory\n"); 521 error = ENXIO; 522 goto fail; 523 } 524 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 525 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 526 527 /* XXX This is quite goofy, it is not actually used */ 528 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 529 530 /* 531 * Don't enable MSI-X on 82574, see: 532 * 82574 specification update errata #15 533 * 534 * Don't enable MSI on 82571/82572, see: 535 * 82571/82572 specification update errata #63 536 */ 537 msi_enable = emx_msi_enable; 538 if (msi_enable && 539 (sc->hw.mac.type == e1000_82571 || 540 sc->hw.mac.type == e1000_82572)) 541 msi_enable = 0; 542 543 /* 544 * Allocate interrupt 545 */ 546 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 547 &sc->intr_rid, &intr_flags); 548 549 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 550 int unshared; 551 552 unshared = device_getenv_int(dev, "irq.unshared", 0); 553 if (!unshared) { 554 sc->flags |= EMX_FLAG_SHARED_INTR; 555 if (bootverbose) 556 device_printf(dev, "IRQ shared\n"); 557 } else { 558 intr_flags &= ~RF_SHAREABLE; 559 if (bootverbose) 560 device_printf(dev, "IRQ unshared\n"); 561 } 562 } 563 564 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 565 intr_flags); 566 if (sc->intr_res == NULL) { 567 device_printf(dev, "Unable to allocate bus resource: " 568 "interrupt\n"); 569 error = ENXIO; 570 goto fail; 571 } 572 573 /* Save PCI command register for Shared Code */ 574 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 575 sc->hw.back = &sc->osdep; 576 577 /* 578 * For I217/I218, we need to map the flash memory and this 579 * must happen after the MAC is identified. 580 */ 581 if (sc->hw.mac.type == e1000_pch_lpt) { 582 sc->flash_rid = EMX_BAR_FLASH; 583 584 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 585 &sc->flash_rid, RF_ACTIVE); 586 if (sc->flash == NULL) { 587 device_printf(dev, "Mapping of Flash failed\n"); 588 error = ENXIO; 589 goto fail; 590 } 591 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 592 sc->osdep.flash_bus_space_handle = 593 rman_get_bushandle(sc->flash); 594 595 /* 596 * This is used in the shared code 597 * XXX this goof is actually not used. 598 */ 599 sc->hw.flash_address = (uint8_t *)sc->flash; 600 } 601 602 /* Do Shared Code initialization */ 603 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 604 device_printf(dev, "Setup of Shared code failed\n"); 605 error = ENXIO; 606 goto fail; 607 } 608 e1000_get_bus_info(&sc->hw); 609 610 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 611 sc->hw.phy.autoneg_wait_to_complete = FALSE; 612 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 613 614 /* 615 * Interrupt throttle rate 616 */ 617 throttle = device_getenv_int(dev, "int_throttle_ceil", 618 emx_int_throttle_ceil); 619 if (throttle == 0) { 620 sc->int_throttle_ceil = 0; 621 } else { 622 if (throttle < 0) 623 throttle = EMX_DEFAULT_ITR; 624 625 /* Recalculate the tunable value to get the exact frequency. */ 626 throttle = 1000000000 / 256 / throttle; 627 628 /* Upper 16bits of ITR is reserved and should be zero */ 629 if (throttle & 0xffff0000) 630 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 631 632 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 633 } 634 635 e1000_init_script_state_82541(&sc->hw, TRUE); 636 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 637 638 /* Copper options */ 639 if (sc->hw.phy.media_type == e1000_media_type_copper) { 640 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 641 sc->hw.phy.disable_polarity_correction = FALSE; 642 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 643 } 644 645 /* Set the frame limits assuming standard ethernet sized frames. */ 646 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 647 648 /* This controls when hardware reports transmit completion status. */ 649 sc->hw.mac.report_tx_early = 1; 650 651 /* Calculate # of RX rings */ 652 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 653 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 654 655 /* 656 * Calculate # of TX rings 657 * 658 * XXX 659 * I217/I218 claims to have 2 TX queues 660 * 661 * NOTE: 662 * Don't enable multiple TX queues on 82574; it always gives 663 * watchdog timeout on TX queue0, when multiple TCP streams are 664 * received. It was originally suspected that the hardware TX 665 * checksum offloading caused this watchdog timeout, since only 666 * TCP ACKs are sent during TCP receiving tests. However, even 667 * if the hardware TX checksum offloading is disable, TX queue0 668 * still will give watchdog. 669 */ 670 tx_ring_max = 1; 671 if (sc->hw.mac.type == e1000_82571 || 672 sc->hw.mac.type == e1000_82572 || 673 sc->hw.mac.type == e1000_80003es2lan || 674 sc->hw.mac.type == e1000_pch_lpt || 675 sc->hw.mac.type == e1000_pch_spt || 676 sc->hw.mac.type == e1000_82574) 677 tx_ring_max = EMX_NTX_RING; 678 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 679 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 680 681 /* Allocate RX/TX rings' busdma(9) stuffs */ 682 error = emx_dma_alloc(sc); 683 if (error) 684 goto fail; 685 686 /* Allocate multicast array memory. */ 687 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 688 M_DEVBUF, M_WAITOK); 689 690 /* Indicate SOL/IDER usage */ 691 if (e1000_check_reset_block(&sc->hw)) { 692 device_printf(dev, 693 "PHY reset is blocked due to SOL/IDER session.\n"); 694 } 695 696 /* Disable EEE on I217/I218 */ 697 sc->hw.dev_spec.ich8lan.eee_disable = 1; 698 699 /* 700 * Start from a known state, this is important in reading the 701 * nvm and mac from that. 702 */ 703 e1000_reset_hw(&sc->hw); 704 705 /* Make sure we have a good EEPROM before we read from it */ 706 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 707 /* 708 * Some PCI-E parts fail the first check due to 709 * the link being in sleep state, call it again, 710 * if it fails a second time its a real issue. 711 */ 712 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 713 device_printf(dev, 714 "The EEPROM Checksum Is Not Valid\n"); 715 error = EIO; 716 goto fail; 717 } 718 } 719 720 /* Copy the permanent MAC address out of the EEPROM */ 721 if (e1000_read_mac_addr(&sc->hw) < 0) { 722 device_printf(dev, "EEPROM read error while reading MAC" 723 " address\n"); 724 error = EIO; 725 goto fail; 726 } 727 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 728 device_printf(dev, "Invalid MAC address\n"); 729 error = EIO; 730 goto fail; 731 } 732 733 /* Disable ULP support */ 734 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 735 736 /* Determine if we have to control management hardware */ 737 if (e1000_enable_mng_pass_thru(&sc->hw)) 738 sc->flags |= EMX_FLAG_HAS_MGMT; 739 740 /* 741 * Setup Wake-on-Lan 742 */ 743 apme_mask = EMX_EEPROM_APME; 744 eeprom_data = 0; 745 switch (sc->hw.mac.type) { 746 case e1000_82573: 747 sc->flags |= EMX_FLAG_HAS_AMT; 748 /* FALL THROUGH */ 749 750 case e1000_82571: 751 case e1000_82572: 752 case e1000_80003es2lan: 753 if (sc->hw.bus.func == 1) { 754 e1000_read_nvm(&sc->hw, 755 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 756 } else { 757 e1000_read_nvm(&sc->hw, 758 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 759 } 760 break; 761 762 default: 763 e1000_read_nvm(&sc->hw, 764 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 765 break; 766 } 767 if (eeprom_data & apme_mask) 768 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 769 770 /* 771 * We have the eeprom settings, now apply the special cases 772 * where the eeprom may be wrong or the board won't support 773 * wake on lan on a particular port 774 */ 775 device_id = pci_get_device(dev); 776 switch (device_id) { 777 case E1000_DEV_ID_82571EB_FIBER: 778 /* 779 * Wake events only supported on port A for dual fiber 780 * regardless of eeprom setting 781 */ 782 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 783 E1000_STATUS_FUNC_1) 784 sc->wol = 0; 785 break; 786 787 case E1000_DEV_ID_82571EB_QUAD_COPPER: 788 case E1000_DEV_ID_82571EB_QUAD_FIBER: 789 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 790 /* if quad port sc, disable WoL on all but port A */ 791 if (emx_global_quad_port_a != 0) 792 sc->wol = 0; 793 /* Reset for multiple quad port adapters */ 794 if (++emx_global_quad_port_a == 4) 795 emx_global_quad_port_a = 0; 796 break; 797 } 798 799 /* XXX disable wol */ 800 sc->wol = 0; 801 802 #ifdef IFPOLL_ENABLE 803 /* 804 * NPOLLING RX CPU offset 805 */ 806 if (sc->rx_ring_cnt == ncpus2) { 807 offset = 0; 808 } else { 809 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 810 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 811 if (offset >= ncpus2 || 812 offset % sc->rx_ring_cnt != 0) { 813 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 814 offset, offset_def); 815 offset = offset_def; 816 } 817 } 818 sc->rx_npoll_off = offset; 819 820 /* 821 * NPOLLING TX CPU offset 822 */ 823 if (sc->tx_ring_cnt == ncpus2) { 824 offset = 0; 825 } else { 826 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 827 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 828 if (offset >= ncpus2 || 829 offset % sc->tx_ring_cnt != 0) { 830 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 831 offset, offset_def); 832 offset = offset_def; 833 } 834 } 835 sc->tx_npoll_off = offset; 836 #endif 837 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 838 839 /* Setup flow control. */ 840 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 841 emx_flowctrl); 842 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 843 844 /* Setup OS specific network interface */ 845 emx_setup_ifp(sc); 846 847 /* Add sysctl tree, must after em_setup_ifp() */ 848 emx_add_sysctl(sc); 849 850 /* Reset the hardware */ 851 error = emx_reset(sc); 852 if (error) { 853 /* 854 * Some 82573 parts fail the first reset, call it again, 855 * if it fails a second time its a real issue. 856 */ 857 error = emx_reset(sc); 858 if (error) { 859 device_printf(dev, "Unable to reset the hardware\n"); 860 ether_ifdetach(&sc->arpcom.ac_if); 861 goto fail; 862 } 863 } 864 865 /* Initialize statistics */ 866 emx_update_stats(sc); 867 868 sc->hw.mac.get_link_status = 1; 869 emx_update_link_status(sc); 870 871 /* Non-AMT based hardware can now take control from firmware */ 872 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 873 EMX_FLAG_HAS_MGMT) 874 emx_get_hw_control(sc); 875 876 /* 877 * Missing Interrupt Following ICR read: 878 * 879 * 82571/82572 specification update errata #76 880 * 82573 specification update errata #31 881 * 82574 specification update errata #12 882 */ 883 intr_func = emx_intr; 884 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 885 (sc->hw.mac.type == e1000_82571 || 886 sc->hw.mac.type == e1000_82572 || 887 sc->hw.mac.type == e1000_82573 || 888 sc->hw.mac.type == e1000_82574)) 889 intr_func = emx_intr_mask; 890 891 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 892 &sc->intr_tag, &sc->main_serialize); 893 if (error) { 894 device_printf(dev, "Failed to register interrupt handler"); 895 ether_ifdetach(&sc->arpcom.ac_if); 896 goto fail; 897 } 898 return (0); 899 fail: 900 emx_detach(dev); 901 return (error); 902 } 903 904 static int 905 emx_detach(device_t dev) 906 { 907 struct emx_softc *sc = device_get_softc(dev); 908 909 if (device_is_attached(dev)) { 910 struct ifnet *ifp = &sc->arpcom.ac_if; 911 912 ifnet_serialize_all(ifp); 913 914 emx_stop(sc); 915 916 e1000_phy_hw_reset(&sc->hw); 917 918 emx_rel_mgmt(sc); 919 emx_rel_hw_control(sc); 920 921 if (sc->wol) { 922 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 923 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 924 emx_enable_wol(dev); 925 } 926 927 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 928 929 ifnet_deserialize_all(ifp); 930 931 ether_ifdetach(ifp); 932 } else if (sc->memory != NULL) { 933 emx_rel_hw_control(sc); 934 } 935 936 ifmedia_removeall(&sc->media); 937 bus_generic_detach(dev); 938 939 if (sc->intr_res != NULL) { 940 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 941 sc->intr_res); 942 } 943 944 if (sc->intr_type == PCI_INTR_TYPE_MSI) 945 pci_release_msi(dev); 946 947 if (sc->memory != NULL) { 948 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 949 sc->memory); 950 } 951 952 if (sc->flash != NULL) { 953 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 954 sc->flash); 955 } 956 957 emx_dma_free(sc); 958 959 if (sc->mta != NULL) 960 kfree(sc->mta, M_DEVBUF); 961 962 return (0); 963 } 964 965 static int 966 emx_shutdown(device_t dev) 967 { 968 return emx_suspend(dev); 969 } 970 971 static int 972 emx_suspend(device_t dev) 973 { 974 struct emx_softc *sc = device_get_softc(dev); 975 struct ifnet *ifp = &sc->arpcom.ac_if; 976 977 ifnet_serialize_all(ifp); 978 979 emx_stop(sc); 980 981 emx_rel_mgmt(sc); 982 emx_rel_hw_control(sc); 983 984 if (sc->wol) { 985 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 986 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 987 emx_enable_wol(dev); 988 } 989 990 ifnet_deserialize_all(ifp); 991 992 return bus_generic_suspend(dev); 993 } 994 995 static int 996 emx_resume(device_t dev) 997 { 998 struct emx_softc *sc = device_get_softc(dev); 999 struct ifnet *ifp = &sc->arpcom.ac_if; 1000 int i; 1001 1002 ifnet_serialize_all(ifp); 1003 1004 emx_init(sc); 1005 emx_get_mgmt(sc); 1006 for (i = 0; i < sc->tx_ring_inuse; ++i) 1007 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1008 1009 ifnet_deserialize_all(ifp); 1010 1011 return bus_generic_resume(dev); 1012 } 1013 1014 static void 1015 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1016 { 1017 struct emx_softc *sc = ifp->if_softc; 1018 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1019 struct mbuf *m_head; 1020 int idx = -1, nsegs = 0; 1021 1022 KKASSERT(tdata->ifsq == ifsq); 1023 ASSERT_SERIALIZED(&tdata->tx_serialize); 1024 1025 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1026 return; 1027 1028 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1029 ifsq_purge(ifsq); 1030 return; 1031 } 1032 1033 while (!ifsq_is_empty(ifsq)) { 1034 /* Now do we at least have a minimal? */ 1035 if (EMX_IS_OACTIVE(tdata)) { 1036 emx_tx_collect(tdata); 1037 if (EMX_IS_OACTIVE(tdata)) { 1038 ifsq_set_oactive(ifsq); 1039 break; 1040 } 1041 } 1042 1043 logif(pkt_txqueue); 1044 m_head = ifsq_dequeue(ifsq); 1045 if (m_head == NULL) 1046 break; 1047 1048 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1049 IFNET_STAT_INC(ifp, oerrors, 1); 1050 emx_tx_collect(tdata); 1051 continue; 1052 } 1053 1054 /* 1055 * TX interrupt are aggressively aggregated, so increasing 1056 * opackets at TX interrupt time will make the opackets 1057 * statistics vastly inaccurate; we do the opackets increment 1058 * now. 1059 */ 1060 IFNET_STAT_INC(ifp, opackets, 1); 1061 1062 if (nsegs >= tdata->tx_wreg_nsegs) { 1063 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1064 nsegs = 0; 1065 idx = -1; 1066 } 1067 1068 /* Send a copy of the frame to the BPF listener */ 1069 ETHER_BPF_MTAP(ifp, m_head); 1070 1071 /* Set timeout in case hardware has problems transmitting. */ 1072 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1073 } 1074 if (idx >= 0) 1075 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1076 } 1077 1078 static int 1079 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1080 { 1081 struct emx_softc *sc = ifp->if_softc; 1082 struct ifreq *ifr = (struct ifreq *)data; 1083 uint16_t eeprom_data = 0; 1084 int max_frame_size, mask, reinit; 1085 int error = 0; 1086 1087 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1088 1089 switch (command) { 1090 case SIOCSIFMTU: 1091 switch (sc->hw.mac.type) { 1092 case e1000_82573: 1093 /* 1094 * 82573 only supports jumbo frames 1095 * if ASPM is disabled. 1096 */ 1097 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1098 &eeprom_data); 1099 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1100 max_frame_size = ETHER_MAX_LEN; 1101 break; 1102 } 1103 /* FALL THROUGH */ 1104 1105 /* Limit Jumbo Frame size */ 1106 case e1000_82571: 1107 case e1000_82572: 1108 case e1000_82574: 1109 case e1000_pch_lpt: 1110 case e1000_pch_spt: 1111 case e1000_80003es2lan: 1112 max_frame_size = 9234; 1113 break; 1114 1115 default: 1116 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1117 break; 1118 } 1119 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1120 ETHER_CRC_LEN) { 1121 error = EINVAL; 1122 break; 1123 } 1124 1125 ifp->if_mtu = ifr->ifr_mtu; 1126 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1127 ETHER_CRC_LEN; 1128 1129 if (ifp->if_flags & IFF_RUNNING) 1130 emx_init(sc); 1131 break; 1132 1133 case SIOCSIFFLAGS: 1134 if (ifp->if_flags & IFF_UP) { 1135 if ((ifp->if_flags & IFF_RUNNING)) { 1136 if ((ifp->if_flags ^ sc->if_flags) & 1137 (IFF_PROMISC | IFF_ALLMULTI)) { 1138 emx_disable_promisc(sc); 1139 emx_set_promisc(sc); 1140 } 1141 } else { 1142 emx_init(sc); 1143 } 1144 } else if (ifp->if_flags & IFF_RUNNING) { 1145 emx_stop(sc); 1146 } 1147 sc->if_flags = ifp->if_flags; 1148 break; 1149 1150 case SIOCADDMULTI: 1151 case SIOCDELMULTI: 1152 if (ifp->if_flags & IFF_RUNNING) { 1153 emx_disable_intr(sc); 1154 emx_set_multi(sc); 1155 #ifdef IFPOLL_ENABLE 1156 if (!(ifp->if_flags & IFF_NPOLLING)) 1157 #endif 1158 emx_enable_intr(sc); 1159 } 1160 break; 1161 1162 case SIOCSIFMEDIA: 1163 /* Check SOL/IDER usage */ 1164 if (e1000_check_reset_block(&sc->hw)) { 1165 device_printf(sc->dev, "Media change is" 1166 " blocked due to SOL/IDER session.\n"); 1167 break; 1168 } 1169 /* FALL THROUGH */ 1170 1171 case SIOCGIFMEDIA: 1172 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1173 break; 1174 1175 case SIOCSIFCAP: 1176 reinit = 0; 1177 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1178 if (mask & IFCAP_RXCSUM) { 1179 ifp->if_capenable ^= IFCAP_RXCSUM; 1180 reinit = 1; 1181 } 1182 if (mask & IFCAP_VLAN_HWTAGGING) { 1183 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1184 reinit = 1; 1185 } 1186 if (mask & IFCAP_TXCSUM) { 1187 ifp->if_capenable ^= IFCAP_TXCSUM; 1188 if (ifp->if_capenable & IFCAP_TXCSUM) 1189 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1190 else 1191 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1192 } 1193 if (mask & IFCAP_TSO) { 1194 ifp->if_capenable ^= IFCAP_TSO; 1195 if (ifp->if_capenable & IFCAP_TSO) 1196 ifp->if_hwassist |= CSUM_TSO; 1197 else 1198 ifp->if_hwassist &= ~CSUM_TSO; 1199 } 1200 if (mask & IFCAP_RSS) 1201 ifp->if_capenable ^= IFCAP_RSS; 1202 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1203 emx_init(sc); 1204 break; 1205 1206 default: 1207 error = ether_ioctl(ifp, command, data); 1208 break; 1209 } 1210 return (error); 1211 } 1212 1213 static void 1214 emx_watchdog(struct ifaltq_subque *ifsq) 1215 { 1216 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1217 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1218 struct emx_softc *sc = ifp->if_softc; 1219 int i; 1220 1221 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1222 1223 /* 1224 * The timer is set to 5 every time start queues a packet. 1225 * Then txeof keeps resetting it as long as it cleans at 1226 * least one descriptor. 1227 * Finally, anytime all descriptors are clean the timer is 1228 * set to 0. 1229 */ 1230 1231 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1232 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1233 /* 1234 * If we reach here, all TX jobs are completed and 1235 * the TX engine should have been idled for some time. 1236 * We don't need to call ifsq_devstart_sched() here. 1237 */ 1238 ifsq_clr_oactive(ifsq); 1239 tdata->tx_watchdog.wd_timer = 0; 1240 return; 1241 } 1242 1243 /* 1244 * If we are in this routine because of pause frames, then 1245 * don't reset the hardware. 1246 */ 1247 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1248 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1249 return; 1250 } 1251 1252 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1253 1254 IFNET_STAT_INC(ifp, oerrors, 1); 1255 1256 emx_init(sc); 1257 for (i = 0; i < sc->tx_ring_inuse; ++i) 1258 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1259 } 1260 1261 static void 1262 emx_init(void *xsc) 1263 { 1264 struct emx_softc *sc = xsc; 1265 struct ifnet *ifp = &sc->arpcom.ac_if; 1266 device_t dev = sc->dev; 1267 boolean_t polling; 1268 int i; 1269 1270 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1271 1272 emx_stop(sc); 1273 1274 /* Get the latest mac address, User can use a LAA */ 1275 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1276 1277 /* Put the address into the Receive Address Array */ 1278 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1279 1280 /* 1281 * With the 82571 sc, RAR[0] may be overwritten 1282 * when the other port is reset, we make a duplicate 1283 * in RAR[14] for that eventuality, this assures 1284 * the interface continues to function. 1285 */ 1286 if (sc->hw.mac.type == e1000_82571) { 1287 e1000_set_laa_state_82571(&sc->hw, TRUE); 1288 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1289 E1000_RAR_ENTRIES - 1); 1290 } 1291 1292 /* Initialize the hardware */ 1293 if (emx_reset(sc)) { 1294 device_printf(dev, "Unable to reset the hardware\n"); 1295 /* XXX emx_stop()? */ 1296 return; 1297 } 1298 emx_update_link_status(sc); 1299 1300 /* Setup VLAN support, basic and offload if available */ 1301 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1302 1303 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1304 uint32_t ctrl; 1305 1306 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1307 ctrl |= E1000_CTRL_VME; 1308 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1309 } 1310 1311 /* Configure for OS presence */ 1312 emx_get_mgmt(sc); 1313 1314 polling = FALSE; 1315 #ifdef IFPOLL_ENABLE 1316 if (ifp->if_flags & IFF_NPOLLING) 1317 polling = TRUE; 1318 #endif 1319 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1320 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1321 1322 /* Prepare transmit descriptors and buffers */ 1323 for (i = 0; i < sc->tx_ring_inuse; ++i) 1324 emx_init_tx_ring(&sc->tx_data[i]); 1325 emx_init_tx_unit(sc); 1326 1327 /* Setup Multicast table */ 1328 emx_set_multi(sc); 1329 1330 /* Prepare receive descriptors and buffers */ 1331 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1332 if (emx_init_rx_ring(&sc->rx_data[i])) { 1333 device_printf(dev, 1334 "Could not setup receive structures\n"); 1335 emx_stop(sc); 1336 return; 1337 } 1338 } 1339 emx_init_rx_unit(sc); 1340 1341 /* Don't lose promiscuous settings */ 1342 emx_set_promisc(sc); 1343 1344 ifp->if_flags |= IFF_RUNNING; 1345 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1346 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1347 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1348 } 1349 1350 callout_reset(&sc->timer, hz, emx_timer, sc); 1351 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1352 1353 /* MSI/X configuration for 82574 */ 1354 if (sc->hw.mac.type == e1000_82574) { 1355 int tmp; 1356 1357 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1358 tmp |= E1000_CTRL_EXT_PBA_CLR; 1359 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1360 /* 1361 * XXX MSIX 1362 * Set the IVAR - interrupt vector routing. 1363 * Each nibble represents a vector, high bit 1364 * is enable, other 3 bits are the MSIX table 1365 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1366 * Link (other) to 2, hence the magic number. 1367 */ 1368 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1369 } 1370 1371 /* 1372 * Only enable interrupts if we are not polling, make sure 1373 * they are off otherwise. 1374 */ 1375 if (polling) 1376 emx_disable_intr(sc); 1377 else 1378 emx_enable_intr(sc); 1379 1380 /* AMT based hardware can now take control from firmware */ 1381 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1382 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1383 emx_get_hw_control(sc); 1384 } 1385 1386 static void 1387 emx_intr(void *xsc) 1388 { 1389 emx_intr_body(xsc, TRUE); 1390 } 1391 1392 static void 1393 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1394 { 1395 struct ifnet *ifp = &sc->arpcom.ac_if; 1396 uint32_t reg_icr; 1397 1398 logif(intr_beg); 1399 ASSERT_SERIALIZED(&sc->main_serialize); 1400 1401 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1402 1403 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1404 logif(intr_end); 1405 return; 1406 } 1407 1408 /* 1409 * XXX: some laptops trigger several spurious interrupts 1410 * on emx(4) when in the resume cycle. The ICR register 1411 * reports all-ones value in this case. Processing such 1412 * interrupts would lead to a freeze. I don't know why. 1413 */ 1414 if (reg_icr == 0xffffffff) { 1415 logif(intr_end); 1416 return; 1417 } 1418 1419 if (ifp->if_flags & IFF_RUNNING) { 1420 if (reg_icr & 1421 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1422 int i; 1423 1424 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1425 lwkt_serialize_enter( 1426 &sc->rx_data[i].rx_serialize); 1427 emx_rxeof(&sc->rx_data[i], -1); 1428 lwkt_serialize_exit( 1429 &sc->rx_data[i].rx_serialize); 1430 } 1431 } 1432 if (reg_icr & E1000_ICR_TXDW) { 1433 struct emx_txdata *tdata = &sc->tx_data[0]; 1434 1435 lwkt_serialize_enter(&tdata->tx_serialize); 1436 emx_txeof(tdata); 1437 if (!ifsq_is_empty(tdata->ifsq)) 1438 ifsq_devstart(tdata->ifsq); 1439 lwkt_serialize_exit(&tdata->tx_serialize); 1440 } 1441 } 1442 1443 /* Link status change */ 1444 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1445 emx_serialize_skipmain(sc); 1446 1447 callout_stop(&sc->timer); 1448 sc->hw.mac.get_link_status = 1; 1449 emx_update_link_status(sc); 1450 1451 /* Deal with TX cruft when link lost */ 1452 emx_tx_purge(sc); 1453 1454 callout_reset(&sc->timer, hz, emx_timer, sc); 1455 1456 emx_deserialize_skipmain(sc); 1457 } 1458 1459 if (reg_icr & E1000_ICR_RXO) 1460 sc->rx_overruns++; 1461 1462 logif(intr_end); 1463 } 1464 1465 static void 1466 emx_intr_mask(void *xsc) 1467 { 1468 struct emx_softc *sc = xsc; 1469 1470 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1471 /* 1472 * NOTE: 1473 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1474 * so don't check it. 1475 */ 1476 emx_intr_body(sc, FALSE); 1477 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1478 } 1479 1480 static void 1481 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1482 { 1483 struct emx_softc *sc = ifp->if_softc; 1484 1485 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1486 1487 emx_update_link_status(sc); 1488 1489 ifmr->ifm_status = IFM_AVALID; 1490 ifmr->ifm_active = IFM_ETHER; 1491 1492 if (!sc->link_active) { 1493 if (sc->hw.mac.autoneg) 1494 ifmr->ifm_active |= IFM_NONE; 1495 else 1496 ifmr->ifm_active |= sc->media.ifm_media; 1497 return; 1498 } 1499 1500 ifmr->ifm_status |= IFM_ACTIVE; 1501 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1502 ifmr->ifm_active |= sc->ifm_flowctrl; 1503 1504 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1505 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1506 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1507 } else { 1508 switch (sc->link_speed) { 1509 case 10: 1510 ifmr->ifm_active |= IFM_10_T; 1511 break; 1512 case 100: 1513 ifmr->ifm_active |= IFM_100_TX; 1514 break; 1515 1516 case 1000: 1517 ifmr->ifm_active |= IFM_1000_T; 1518 break; 1519 } 1520 if (sc->link_duplex == FULL_DUPLEX) 1521 ifmr->ifm_active |= IFM_FDX; 1522 else 1523 ifmr->ifm_active |= IFM_HDX; 1524 } 1525 if (ifmr->ifm_active & IFM_FDX) 1526 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1527 } 1528 1529 static int 1530 emx_media_change(struct ifnet *ifp) 1531 { 1532 struct emx_softc *sc = ifp->if_softc; 1533 struct ifmedia *ifm = &sc->media; 1534 1535 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1536 1537 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1538 return (EINVAL); 1539 1540 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1541 case IFM_AUTO: 1542 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1543 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1544 break; 1545 1546 case IFM_1000_SX: 1547 case IFM_1000_T: 1548 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1549 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1550 break; 1551 1552 case IFM_100_TX: 1553 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1554 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1555 } else { 1556 if (IFM_OPTIONS(ifm->ifm_media) & 1557 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1558 if (bootverbose) { 1559 if_printf(ifp, "Flow control is not " 1560 "allowed for half-duplex\n"); 1561 } 1562 return EINVAL; 1563 } 1564 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1565 } 1566 sc->hw.mac.autoneg = FALSE; 1567 sc->hw.phy.autoneg_advertised = 0; 1568 break; 1569 1570 case IFM_10_T: 1571 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1572 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1573 } else { 1574 if (IFM_OPTIONS(ifm->ifm_media) & 1575 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1576 if (bootverbose) { 1577 if_printf(ifp, "Flow control is not " 1578 "allowed for half-duplex\n"); 1579 } 1580 return EINVAL; 1581 } 1582 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1583 } 1584 sc->hw.mac.autoneg = FALSE; 1585 sc->hw.phy.autoneg_advertised = 0; 1586 break; 1587 1588 default: 1589 if (bootverbose) { 1590 if_printf(ifp, "Unsupported media type %d\n", 1591 IFM_SUBTYPE(ifm->ifm_media)); 1592 } 1593 return EINVAL; 1594 } 1595 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1596 1597 if (ifp->if_flags & IFF_RUNNING) 1598 emx_init(sc); 1599 1600 return (0); 1601 } 1602 1603 static int 1604 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1605 int *segs_used, int *idx) 1606 { 1607 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1608 bus_dmamap_t map; 1609 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1610 struct e1000_tx_desc *ctxd = NULL; 1611 struct mbuf *m_head = *m_headp; 1612 uint32_t txd_upper, txd_lower, cmd = 0; 1613 int maxsegs, nsegs, i, j, first, last = 0, error; 1614 1615 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1616 error = emx_tso_pullup(tdata, m_headp); 1617 if (error) 1618 return error; 1619 m_head = *m_headp; 1620 } 1621 1622 txd_upper = txd_lower = 0; 1623 1624 /* 1625 * Capture the first descriptor index, this descriptor 1626 * will have the index of the EOP which is the only one 1627 * that now gets a DONE bit writeback. 1628 */ 1629 first = tdata->next_avail_tx_desc; 1630 tx_buffer = &tdata->tx_buf[first]; 1631 tx_buffer_mapped = tx_buffer; 1632 map = tx_buffer->map; 1633 1634 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1635 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1636 if (maxsegs > EMX_MAX_SCATTER) 1637 maxsegs = EMX_MAX_SCATTER; 1638 1639 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1640 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1641 if (error) { 1642 m_freem(*m_headp); 1643 *m_headp = NULL; 1644 return error; 1645 } 1646 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1647 1648 m_head = *m_headp; 1649 tdata->tx_nsegs += nsegs; 1650 *segs_used += nsegs; 1651 1652 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1653 /* TSO will consume one TX desc */ 1654 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1655 tdata->tx_nsegs += i; 1656 *segs_used += i; 1657 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1658 /* TX csum offloading will consume one TX desc */ 1659 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1660 tdata->tx_nsegs += i; 1661 *segs_used += i; 1662 } 1663 1664 /* Handle VLAN tag */ 1665 if (m_head->m_flags & M_VLANTAG) { 1666 /* Set the vlan id. */ 1667 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1668 /* Tell hardware to add tag */ 1669 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1670 } 1671 1672 i = tdata->next_avail_tx_desc; 1673 1674 /* Set up our transmit descriptors */ 1675 for (j = 0; j < nsegs; j++) { 1676 tx_buffer = &tdata->tx_buf[i]; 1677 ctxd = &tdata->tx_desc_base[i]; 1678 1679 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1680 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1681 txd_lower | segs[j].ds_len); 1682 ctxd->upper.data = htole32(txd_upper); 1683 1684 last = i; 1685 if (++i == tdata->num_tx_desc) 1686 i = 0; 1687 } 1688 1689 tdata->next_avail_tx_desc = i; 1690 1691 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1692 tdata->num_tx_desc_avail -= nsegs; 1693 1694 tx_buffer->m_head = m_head; 1695 tx_buffer_mapped->map = tx_buffer->map; 1696 tx_buffer->map = map; 1697 1698 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1699 tdata->tx_nsegs = 0; 1700 1701 /* 1702 * Report Status (RS) is turned on 1703 * every tx_intr_nsegs descriptors. 1704 */ 1705 cmd = E1000_TXD_CMD_RS; 1706 1707 /* 1708 * Keep track of the descriptor, which will 1709 * be written back by hardware. 1710 */ 1711 tdata->tx_dd[tdata->tx_dd_tail] = last; 1712 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1713 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1714 } 1715 1716 /* 1717 * Last Descriptor of Packet needs End Of Packet (EOP) 1718 */ 1719 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1720 1721 /* 1722 * Defer TDT updating, until enough descriptors are setup 1723 */ 1724 *idx = i; 1725 1726 #ifdef EMX_TSS_DEBUG 1727 tdata->tx_pkts++; 1728 #endif 1729 1730 return (0); 1731 } 1732 1733 static void 1734 emx_set_promisc(struct emx_softc *sc) 1735 { 1736 struct ifnet *ifp = &sc->arpcom.ac_if; 1737 uint32_t reg_rctl; 1738 1739 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1740 1741 if (ifp->if_flags & IFF_PROMISC) { 1742 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1743 /* Turn this on if you want to see bad packets */ 1744 if (emx_debug_sbp) 1745 reg_rctl |= E1000_RCTL_SBP; 1746 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1747 } else if (ifp->if_flags & IFF_ALLMULTI) { 1748 reg_rctl |= E1000_RCTL_MPE; 1749 reg_rctl &= ~E1000_RCTL_UPE; 1750 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1751 } 1752 } 1753 1754 static void 1755 emx_disable_promisc(struct emx_softc *sc) 1756 { 1757 uint32_t reg_rctl; 1758 1759 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1760 1761 reg_rctl &= ~E1000_RCTL_UPE; 1762 reg_rctl &= ~E1000_RCTL_MPE; 1763 reg_rctl &= ~E1000_RCTL_SBP; 1764 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1765 } 1766 1767 static void 1768 emx_set_multi(struct emx_softc *sc) 1769 { 1770 struct ifnet *ifp = &sc->arpcom.ac_if; 1771 struct ifmultiaddr *ifma; 1772 uint32_t reg_rctl = 0; 1773 uint8_t *mta; 1774 int mcnt = 0; 1775 1776 mta = sc->mta; 1777 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1778 1779 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1780 if (ifma->ifma_addr->sa_family != AF_LINK) 1781 continue; 1782 1783 if (mcnt == EMX_MCAST_ADDR_MAX) 1784 break; 1785 1786 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1787 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1788 mcnt++; 1789 } 1790 1791 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1792 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1793 reg_rctl |= E1000_RCTL_MPE; 1794 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1795 } else { 1796 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1797 } 1798 } 1799 1800 /* 1801 * This routine checks for link status and updates statistics. 1802 */ 1803 static void 1804 emx_timer(void *xsc) 1805 { 1806 struct emx_softc *sc = xsc; 1807 struct ifnet *ifp = &sc->arpcom.ac_if; 1808 1809 lwkt_serialize_enter(&sc->main_serialize); 1810 1811 emx_update_link_status(sc); 1812 emx_update_stats(sc); 1813 1814 /* Reset LAA into RAR[0] on 82571 */ 1815 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1816 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1817 1818 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1819 emx_print_hw_stats(sc); 1820 1821 emx_smartspeed(sc); 1822 1823 callout_reset(&sc->timer, hz, emx_timer, sc); 1824 1825 lwkt_serialize_exit(&sc->main_serialize); 1826 } 1827 1828 static void 1829 emx_update_link_status(struct emx_softc *sc) 1830 { 1831 struct e1000_hw *hw = &sc->hw; 1832 struct ifnet *ifp = &sc->arpcom.ac_if; 1833 device_t dev = sc->dev; 1834 uint32_t link_check = 0; 1835 1836 /* Get the cached link value or read phy for real */ 1837 switch (hw->phy.media_type) { 1838 case e1000_media_type_copper: 1839 if (hw->mac.get_link_status) { 1840 /* Do the work to read phy */ 1841 e1000_check_for_link(hw); 1842 link_check = !hw->mac.get_link_status; 1843 if (link_check) /* ESB2 fix */ 1844 e1000_cfg_on_link_up(hw); 1845 } else { 1846 link_check = TRUE; 1847 } 1848 break; 1849 1850 case e1000_media_type_fiber: 1851 e1000_check_for_link(hw); 1852 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1853 break; 1854 1855 case e1000_media_type_internal_serdes: 1856 e1000_check_for_link(hw); 1857 link_check = sc->hw.mac.serdes_has_link; 1858 break; 1859 1860 case e1000_media_type_unknown: 1861 default: 1862 break; 1863 } 1864 1865 /* Now check for a transition */ 1866 if (link_check && sc->link_active == 0) { 1867 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1868 &sc->link_duplex); 1869 1870 /* 1871 * Check if we should enable/disable SPEED_MODE bit on 1872 * 82571EB/82572EI 1873 */ 1874 if (sc->link_speed != SPEED_1000 && 1875 (hw->mac.type == e1000_82571 || 1876 hw->mac.type == e1000_82572)) { 1877 int tarc0; 1878 1879 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1880 tarc0 &= ~EMX_TARC_SPEED_MODE; 1881 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1882 } 1883 if (bootverbose) { 1884 char flowctrl[IFM_ETH_FC_STRLEN]; 1885 1886 e1000_fc2str(hw->fc.current_mode, flowctrl, 1887 sizeof(flowctrl)); 1888 device_printf(dev, "Link is up %d Mbps %s, " 1889 "Flow control: %s\n", 1890 sc->link_speed, 1891 (sc->link_duplex == FULL_DUPLEX) ? 1892 "Full Duplex" : "Half Duplex", 1893 flowctrl); 1894 } 1895 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1896 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1897 sc->link_active = 1; 1898 sc->smartspeed = 0; 1899 ifp->if_baudrate = sc->link_speed * 1000000; 1900 ifp->if_link_state = LINK_STATE_UP; 1901 if_link_state_change(ifp); 1902 } else if (!link_check && sc->link_active == 1) { 1903 ifp->if_baudrate = sc->link_speed = 0; 1904 sc->link_duplex = 0; 1905 if (bootverbose) 1906 device_printf(dev, "Link is Down\n"); 1907 sc->link_active = 0; 1908 ifp->if_link_state = LINK_STATE_DOWN; 1909 if_link_state_change(ifp); 1910 } 1911 } 1912 1913 static void 1914 emx_stop(struct emx_softc *sc) 1915 { 1916 struct ifnet *ifp = &sc->arpcom.ac_if; 1917 int i; 1918 1919 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1920 1921 emx_disable_intr(sc); 1922 1923 callout_stop(&sc->timer); 1924 1925 ifp->if_flags &= ~IFF_RUNNING; 1926 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1927 struct emx_txdata *tdata = &sc->tx_data[i]; 1928 1929 ifsq_clr_oactive(tdata->ifsq); 1930 ifsq_watchdog_stop(&tdata->tx_watchdog); 1931 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1932 } 1933 1934 /* 1935 * Disable multiple receive queues. 1936 * 1937 * NOTE: 1938 * We should disable multiple receive queues before 1939 * resetting the hardware. 1940 */ 1941 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1942 1943 e1000_reset_hw(&sc->hw); 1944 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1945 1946 for (i = 0; i < sc->tx_ring_cnt; ++i) 1947 emx_free_tx_ring(&sc->tx_data[i]); 1948 for (i = 0; i < sc->rx_ring_cnt; ++i) 1949 emx_free_rx_ring(&sc->rx_data[i]); 1950 } 1951 1952 static int 1953 emx_reset(struct emx_softc *sc) 1954 { 1955 device_t dev = sc->dev; 1956 uint16_t rx_buffer_size; 1957 uint32_t pba; 1958 1959 /* Set up smart power down as default off on newer adapters. */ 1960 if (!emx_smart_pwr_down && 1961 (sc->hw.mac.type == e1000_82571 || 1962 sc->hw.mac.type == e1000_82572)) { 1963 uint16_t phy_tmp = 0; 1964 1965 /* Speed up time to link by disabling smart power down. */ 1966 e1000_read_phy_reg(&sc->hw, 1967 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1968 phy_tmp &= ~IGP02E1000_PM_SPD; 1969 e1000_write_phy_reg(&sc->hw, 1970 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1971 } 1972 1973 /* 1974 * Packet Buffer Allocation (PBA) 1975 * Writing PBA sets the receive portion of the buffer 1976 * the remainder is used for the transmit buffer. 1977 */ 1978 switch (sc->hw.mac.type) { 1979 /* Total Packet Buffer on these is 48K */ 1980 case e1000_82571: 1981 case e1000_82572: 1982 case e1000_80003es2lan: 1983 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1984 break; 1985 1986 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1987 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1988 break; 1989 1990 case e1000_82574: 1991 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1992 break; 1993 1994 case e1000_pch_lpt: 1995 case e1000_pch_spt: 1996 pba = E1000_PBA_26K; 1997 break; 1998 1999 default: 2000 /* Devices before 82547 had a Packet Buffer of 64K. */ 2001 if (sc->hw.mac.max_frame_size > 8192) 2002 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2003 else 2004 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2005 } 2006 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2007 2008 /* 2009 * These parameters control the automatic generation (Tx) and 2010 * response (Rx) to Ethernet PAUSE frames. 2011 * - High water mark should allow for at least two frames to be 2012 * received after sending an XOFF. 2013 * - Low water mark works best when it is very near the high water mark. 2014 * This allows the receiver to restart by sending XON when it has 2015 * drained a bit. Here we use an arbitary value of 1500 which will 2016 * restart after one full frame is pulled from the buffer. There 2017 * could be several smaller frames in the buffer and if so they will 2018 * not trigger the XON until their total number reduces the buffer 2019 * by 1500. 2020 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2021 */ 2022 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2023 2024 sc->hw.fc.high_water = rx_buffer_size - 2025 roundup2(sc->hw.mac.max_frame_size, 1024); 2026 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2027 2028 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2029 sc->hw.fc.send_xon = TRUE; 2030 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2031 2032 /* 2033 * Device specific overrides/settings 2034 */ 2035 if (sc->hw.mac.type == e1000_pch_lpt || 2036 sc->hw.mac.type == e1000_pch_spt) { 2037 sc->hw.fc.high_water = 0x5C20; 2038 sc->hw.fc.low_water = 0x5048; 2039 sc->hw.fc.pause_time = 0x0650; 2040 sc->hw.fc.refresh_time = 0x0400; 2041 /* Jumbos need adjusted PBA */ 2042 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2043 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2044 else 2045 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2046 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2047 sc->hw.fc.pause_time = 0xFFFF; 2048 } 2049 2050 /* Issue a global reset */ 2051 e1000_reset_hw(&sc->hw); 2052 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2053 emx_disable_aspm(sc); 2054 2055 if (e1000_init_hw(&sc->hw) < 0) { 2056 device_printf(dev, "Hardware Initialization Failed\n"); 2057 return (EIO); 2058 } 2059 2060 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2061 e1000_get_phy_info(&sc->hw); 2062 e1000_check_for_link(&sc->hw); 2063 2064 return (0); 2065 } 2066 2067 static void 2068 emx_setup_ifp(struct emx_softc *sc) 2069 { 2070 struct ifnet *ifp = &sc->arpcom.ac_if; 2071 int i; 2072 2073 if_initname(ifp, device_get_name(sc->dev), 2074 device_get_unit(sc->dev)); 2075 ifp->if_softc = sc; 2076 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2077 ifp->if_init = emx_init; 2078 ifp->if_ioctl = emx_ioctl; 2079 ifp->if_start = emx_start; 2080 #ifdef IFPOLL_ENABLE 2081 ifp->if_npoll = emx_npoll; 2082 #endif 2083 ifp->if_serialize = emx_serialize; 2084 ifp->if_deserialize = emx_deserialize; 2085 ifp->if_tryserialize = emx_tryserialize; 2086 #ifdef INVARIANTS 2087 ifp->if_serialize_assert = emx_serialize_assert; 2088 #endif 2089 2090 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2091 2092 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2093 ifq_set_ready(&ifp->if_snd); 2094 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2095 2096 ifp->if_mapsubq = ifq_mapsubq_mask; 2097 ifq_set_subq_mask(&ifp->if_snd, 0); 2098 2099 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2100 2101 ifp->if_capabilities = IFCAP_HWCSUM | 2102 IFCAP_VLAN_HWTAGGING | 2103 IFCAP_VLAN_MTU | 2104 IFCAP_TSO; 2105 if (sc->rx_ring_cnt > 1) 2106 ifp->if_capabilities |= IFCAP_RSS; 2107 ifp->if_capenable = ifp->if_capabilities; 2108 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2109 2110 /* 2111 * Tell the upper layer(s) we support long frames. 2112 */ 2113 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2114 2115 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2116 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2117 struct emx_txdata *tdata = &sc->tx_data[i]; 2118 2119 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2120 ifsq_set_priv(ifsq, tdata); 2121 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2122 tdata->ifsq = ifsq; 2123 2124 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2125 } 2126 2127 /* 2128 * Specify the media types supported by this sc and register 2129 * callbacks to update media and link information 2130 */ 2131 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2132 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2133 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2134 0, NULL); 2135 } else { 2136 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2137 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2138 0, NULL); 2139 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2140 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2141 0, NULL); 2142 if (sc->hw.phy.type != e1000_phy_ife) { 2143 ifmedia_add(&sc->media, 2144 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2145 } 2146 } 2147 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2148 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2149 } 2150 2151 /* 2152 * Workaround for SmartSpeed on 82541 and 82547 controllers 2153 */ 2154 static void 2155 emx_smartspeed(struct emx_softc *sc) 2156 { 2157 uint16_t phy_tmp; 2158 2159 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2160 sc->hw.mac.autoneg == 0 || 2161 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2162 return; 2163 2164 if (sc->smartspeed == 0) { 2165 /* 2166 * If Master/Slave config fault is asserted twice, 2167 * we assume back-to-back 2168 */ 2169 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2170 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2171 return; 2172 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2173 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2174 e1000_read_phy_reg(&sc->hw, 2175 PHY_1000T_CTRL, &phy_tmp); 2176 if (phy_tmp & CR_1000T_MS_ENABLE) { 2177 phy_tmp &= ~CR_1000T_MS_ENABLE; 2178 e1000_write_phy_reg(&sc->hw, 2179 PHY_1000T_CTRL, phy_tmp); 2180 sc->smartspeed++; 2181 if (sc->hw.mac.autoneg && 2182 !e1000_phy_setup_autoneg(&sc->hw) && 2183 !e1000_read_phy_reg(&sc->hw, 2184 PHY_CONTROL, &phy_tmp)) { 2185 phy_tmp |= MII_CR_AUTO_NEG_EN | 2186 MII_CR_RESTART_AUTO_NEG; 2187 e1000_write_phy_reg(&sc->hw, 2188 PHY_CONTROL, phy_tmp); 2189 } 2190 } 2191 } 2192 return; 2193 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2194 /* If still no link, perhaps using 2/3 pair cable */ 2195 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2196 phy_tmp |= CR_1000T_MS_ENABLE; 2197 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2198 if (sc->hw.mac.autoneg && 2199 !e1000_phy_setup_autoneg(&sc->hw) && 2200 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2201 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2202 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2203 } 2204 } 2205 2206 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2207 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2208 sc->smartspeed = 0; 2209 } 2210 2211 static int 2212 emx_create_tx_ring(struct emx_txdata *tdata) 2213 { 2214 device_t dev = tdata->sc->dev; 2215 struct emx_txbuf *tx_buffer; 2216 int error, i, tsize, ntxd; 2217 2218 /* 2219 * Validate number of transmit descriptors. It must not exceed 2220 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2221 */ 2222 ntxd = device_getenv_int(dev, "txd", emx_txd); 2223 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2224 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2225 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2226 EMX_DEFAULT_TXD, ntxd); 2227 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2228 } else { 2229 tdata->num_tx_desc = ntxd; 2230 } 2231 2232 /* 2233 * Allocate Transmit Descriptor ring 2234 */ 2235 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2236 EMX_DBA_ALIGN); 2237 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2238 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2239 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2240 &tdata->tx_desc_paddr); 2241 if (tdata->tx_desc_base == NULL) { 2242 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2243 return ENOMEM; 2244 } 2245 2246 tsize = __VM_CACHELINE_ALIGN( 2247 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2248 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2249 2250 /* 2251 * Create DMA tags for tx buffers 2252 */ 2253 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2254 1, 0, /* alignment, bounds */ 2255 BUS_SPACE_MAXADDR, /* lowaddr */ 2256 BUS_SPACE_MAXADDR, /* highaddr */ 2257 NULL, NULL, /* filter, filterarg */ 2258 EMX_TSO_SIZE, /* maxsize */ 2259 EMX_MAX_SCATTER, /* nsegments */ 2260 EMX_MAX_SEGSIZE, /* maxsegsize */ 2261 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2262 BUS_DMA_ONEBPAGE, /* flags */ 2263 &tdata->txtag); 2264 if (error) { 2265 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2266 kfree(tdata->tx_buf, M_DEVBUF); 2267 tdata->tx_buf = NULL; 2268 return error; 2269 } 2270 2271 /* 2272 * Create DMA maps for tx buffers 2273 */ 2274 for (i = 0; i < tdata->num_tx_desc; i++) { 2275 tx_buffer = &tdata->tx_buf[i]; 2276 2277 error = bus_dmamap_create(tdata->txtag, 2278 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2279 &tx_buffer->map); 2280 if (error) { 2281 device_printf(dev, "Unable to create TX DMA map\n"); 2282 emx_destroy_tx_ring(tdata, i); 2283 return error; 2284 } 2285 } 2286 2287 /* 2288 * Setup TX parameters 2289 */ 2290 tdata->spare_tx_desc = EMX_TX_SPARE; 2291 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2292 2293 /* 2294 * Keep following relationship between spare_tx_desc, oact_tx_desc 2295 * and tx_intr_nsegs: 2296 * (spare_tx_desc + EMX_TX_RESERVED) <= 2297 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2298 */ 2299 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2300 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2301 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2302 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2303 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2304 2305 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2306 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2307 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2308 2309 /* 2310 * Pullup extra 4bytes into the first data segment for TSO, see: 2311 * 82571/82572 specification update errata #7 2312 * 2313 * Same applies to I217 (and maybe I218 and I219). 2314 * 2315 * NOTE: 2316 * 4bytes instead of 2bytes, which are mentioned in the errata, 2317 * are pulled; mainly to keep rest of the data properly aligned. 2318 */ 2319 if (tdata->sc->hw.mac.type == e1000_82571 || 2320 tdata->sc->hw.mac.type == e1000_82572 || 2321 tdata->sc->hw.mac.type == e1000_pch_lpt || 2322 tdata->sc->hw.mac.type == e1000_pch_spt) 2323 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2324 2325 return (0); 2326 } 2327 2328 static void 2329 emx_init_tx_ring(struct emx_txdata *tdata) 2330 { 2331 /* Clear the old ring contents */ 2332 bzero(tdata->tx_desc_base, 2333 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2334 2335 /* Reset state */ 2336 tdata->next_avail_tx_desc = 0; 2337 tdata->next_tx_to_clean = 0; 2338 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2339 2340 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2341 if (tdata->sc->tx_ring_inuse > 1) { 2342 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2343 if (bootverbose) { 2344 if_printf(&tdata->sc->arpcom.ac_if, 2345 "TX %d force ctx setup\n", tdata->idx); 2346 } 2347 } 2348 } 2349 2350 static void 2351 emx_init_tx_unit(struct emx_softc *sc) 2352 { 2353 uint32_t tctl, tarc, tipg = 0, txdctl; 2354 int i; 2355 2356 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2357 struct emx_txdata *tdata = &sc->tx_data[i]; 2358 uint64_t bus_addr; 2359 2360 /* Setup the Base and Length of the Tx Descriptor Ring */ 2361 bus_addr = tdata->tx_desc_paddr; 2362 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2363 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2364 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2365 (uint32_t)(bus_addr >> 32)); 2366 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2367 (uint32_t)bus_addr); 2368 /* Setup the HW Tx Head and Tail descriptor pointers */ 2369 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2370 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2371 } 2372 2373 /* Set the default values for the Tx Inter Packet Gap timer */ 2374 switch (sc->hw.mac.type) { 2375 case e1000_80003es2lan: 2376 tipg = DEFAULT_82543_TIPG_IPGR1; 2377 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2378 E1000_TIPG_IPGR2_SHIFT; 2379 break; 2380 2381 default: 2382 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2383 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2384 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2385 else 2386 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2387 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2388 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2389 break; 2390 } 2391 2392 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2393 2394 /* NOTE: 0 is not allowed for TIDV */ 2395 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2396 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2397 2398 /* 2399 * Errata workaround (obtained from Linux). This is necessary 2400 * to make multiple TX queues work on 82574. 2401 * XXX can't find it in any published errata though. 2402 */ 2403 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2404 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2405 2406 if (sc->hw.mac.type == e1000_82571 || 2407 sc->hw.mac.type == e1000_82572) { 2408 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2409 tarc |= EMX_TARC_SPEED_MODE; 2410 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2411 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2412 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2413 tarc |= 1; 2414 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2415 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2416 tarc |= 1; 2417 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2418 } 2419 2420 /* Program the Transmit Control Register */ 2421 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2422 tctl &= ~E1000_TCTL_CT; 2423 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2424 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2425 tctl |= E1000_TCTL_MULR; 2426 2427 /* This write will effectively turn on the transmit unit. */ 2428 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2429 2430 if (sc->hw.mac.type == e1000_82571 || 2431 sc->hw.mac.type == e1000_82572 || 2432 sc->hw.mac.type == e1000_80003es2lan) { 2433 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2434 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2435 tarc &= ~(1 << 28); 2436 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2437 } 2438 2439 if (sc->tx_ring_inuse > 1) { 2440 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2441 tarc &= ~EMX_TARC_COUNT_MASK; 2442 tarc |= 1; 2443 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2444 2445 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2446 tarc &= ~EMX_TARC_COUNT_MASK; 2447 tarc |= 1; 2448 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2449 } 2450 } 2451 2452 static void 2453 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2454 { 2455 struct emx_txbuf *tx_buffer; 2456 int i; 2457 2458 /* Free Transmit Descriptor ring */ 2459 if (tdata->tx_desc_base) { 2460 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2461 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2462 tdata->tx_desc_dmap); 2463 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2464 2465 tdata->tx_desc_base = NULL; 2466 } 2467 2468 if (tdata->tx_buf == NULL) 2469 return; 2470 2471 for (i = 0; i < ndesc; i++) { 2472 tx_buffer = &tdata->tx_buf[i]; 2473 2474 KKASSERT(tx_buffer->m_head == NULL); 2475 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2476 } 2477 bus_dma_tag_destroy(tdata->txtag); 2478 2479 kfree(tdata->tx_buf, M_DEVBUF); 2480 tdata->tx_buf = NULL; 2481 } 2482 2483 /* 2484 * The offload context needs to be set when we transfer the first 2485 * packet of a particular protocol (TCP/UDP). This routine has been 2486 * enhanced to deal with inserted VLAN headers. 2487 * 2488 * If the new packet's ether header length, ip header length and 2489 * csum offloading type are same as the previous packet, we should 2490 * avoid allocating a new csum context descriptor; mainly to take 2491 * advantage of the pipeline effect of the TX data read request. 2492 * 2493 * This function returns number of TX descrptors allocated for 2494 * csum context. 2495 */ 2496 static int 2497 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2498 uint32_t *txd_upper, uint32_t *txd_lower) 2499 { 2500 struct e1000_context_desc *TXD; 2501 int curr_txd, ehdrlen, csum_flags; 2502 uint32_t cmd, hdr_len, ip_hlen; 2503 2504 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2505 ip_hlen = mp->m_pkthdr.csum_iphlen; 2506 ehdrlen = mp->m_pkthdr.csum_lhlen; 2507 2508 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2509 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2510 tdata->csum_flags == csum_flags) { 2511 /* 2512 * Same csum offload context as the previous packets; 2513 * just return. 2514 */ 2515 *txd_upper = tdata->csum_txd_upper; 2516 *txd_lower = tdata->csum_txd_lower; 2517 return 0; 2518 } 2519 2520 /* 2521 * Setup a new csum offload context. 2522 */ 2523 2524 curr_txd = tdata->next_avail_tx_desc; 2525 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2526 2527 cmd = 0; 2528 2529 /* Setup of IP header checksum. */ 2530 if (csum_flags & CSUM_IP) { 2531 /* 2532 * Start offset for header checksum calculation. 2533 * End offset for header checksum calculation. 2534 * Offset of place to put the checksum. 2535 */ 2536 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2537 TXD->lower_setup.ip_fields.ipcse = 2538 htole16(ehdrlen + ip_hlen - 1); 2539 TXD->lower_setup.ip_fields.ipcso = 2540 ehdrlen + offsetof(struct ip, ip_sum); 2541 cmd |= E1000_TXD_CMD_IP; 2542 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2543 } 2544 hdr_len = ehdrlen + ip_hlen; 2545 2546 if (csum_flags & CSUM_TCP) { 2547 /* 2548 * Start offset for payload checksum calculation. 2549 * End offset for payload checksum calculation. 2550 * Offset of place to put the checksum. 2551 */ 2552 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2553 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2554 TXD->upper_setup.tcp_fields.tucso = 2555 hdr_len + offsetof(struct tcphdr, th_sum); 2556 cmd |= E1000_TXD_CMD_TCP; 2557 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2558 } else if (csum_flags & CSUM_UDP) { 2559 /* 2560 * Start offset for header checksum calculation. 2561 * End offset for header checksum calculation. 2562 * Offset of place to put the checksum. 2563 */ 2564 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2565 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2566 TXD->upper_setup.tcp_fields.tucso = 2567 hdr_len + offsetof(struct udphdr, uh_sum); 2568 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2569 } 2570 2571 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2572 E1000_TXD_DTYP_D; /* Data descr */ 2573 2574 /* Save the information for this csum offloading context */ 2575 tdata->csum_lhlen = ehdrlen; 2576 tdata->csum_iphlen = ip_hlen; 2577 tdata->csum_flags = csum_flags; 2578 tdata->csum_txd_upper = *txd_upper; 2579 tdata->csum_txd_lower = *txd_lower; 2580 2581 TXD->tcp_seg_setup.data = htole32(0); 2582 TXD->cmd_and_length = 2583 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2584 2585 if (++curr_txd == tdata->num_tx_desc) 2586 curr_txd = 0; 2587 2588 KKASSERT(tdata->num_tx_desc_avail > 0); 2589 tdata->num_tx_desc_avail--; 2590 2591 tdata->next_avail_tx_desc = curr_txd; 2592 return 1; 2593 } 2594 2595 static void 2596 emx_txeof(struct emx_txdata *tdata) 2597 { 2598 struct emx_txbuf *tx_buffer; 2599 int first, num_avail; 2600 2601 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2602 return; 2603 2604 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2605 return; 2606 2607 num_avail = tdata->num_tx_desc_avail; 2608 first = tdata->next_tx_to_clean; 2609 2610 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2611 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2612 struct e1000_tx_desc *tx_desc; 2613 2614 tx_desc = &tdata->tx_desc_base[dd_idx]; 2615 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2616 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2617 2618 if (++dd_idx == tdata->num_tx_desc) 2619 dd_idx = 0; 2620 2621 while (first != dd_idx) { 2622 logif(pkt_txclean); 2623 2624 num_avail++; 2625 2626 tx_buffer = &tdata->tx_buf[first]; 2627 if (tx_buffer->m_head) { 2628 bus_dmamap_unload(tdata->txtag, 2629 tx_buffer->map); 2630 m_freem(tx_buffer->m_head); 2631 tx_buffer->m_head = NULL; 2632 } 2633 2634 if (++first == tdata->num_tx_desc) 2635 first = 0; 2636 } 2637 } else { 2638 break; 2639 } 2640 } 2641 tdata->next_tx_to_clean = first; 2642 tdata->num_tx_desc_avail = num_avail; 2643 2644 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2645 tdata->tx_dd_head = 0; 2646 tdata->tx_dd_tail = 0; 2647 } 2648 2649 if (!EMX_IS_OACTIVE(tdata)) { 2650 ifsq_clr_oactive(tdata->ifsq); 2651 2652 /* All clean, turn off the timer */ 2653 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2654 tdata->tx_watchdog.wd_timer = 0; 2655 } 2656 } 2657 2658 static void 2659 emx_tx_collect(struct emx_txdata *tdata) 2660 { 2661 struct emx_txbuf *tx_buffer; 2662 int tdh, first, num_avail, dd_idx = -1; 2663 2664 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2665 return; 2666 2667 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2668 if (tdh == tdata->next_tx_to_clean) 2669 return; 2670 2671 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2672 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2673 2674 num_avail = tdata->num_tx_desc_avail; 2675 first = tdata->next_tx_to_clean; 2676 2677 while (first != tdh) { 2678 logif(pkt_txclean); 2679 2680 num_avail++; 2681 2682 tx_buffer = &tdata->tx_buf[first]; 2683 if (tx_buffer->m_head) { 2684 bus_dmamap_unload(tdata->txtag, 2685 tx_buffer->map); 2686 m_freem(tx_buffer->m_head); 2687 tx_buffer->m_head = NULL; 2688 } 2689 2690 if (first == dd_idx) { 2691 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2692 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2693 tdata->tx_dd_head = 0; 2694 tdata->tx_dd_tail = 0; 2695 dd_idx = -1; 2696 } else { 2697 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2698 } 2699 } 2700 2701 if (++first == tdata->num_tx_desc) 2702 first = 0; 2703 } 2704 tdata->next_tx_to_clean = first; 2705 tdata->num_tx_desc_avail = num_avail; 2706 2707 if (!EMX_IS_OACTIVE(tdata)) { 2708 ifsq_clr_oactive(tdata->ifsq); 2709 2710 /* All clean, turn off the timer */ 2711 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2712 tdata->tx_watchdog.wd_timer = 0; 2713 } 2714 } 2715 2716 /* 2717 * When Link is lost sometimes there is work still in the TX ring 2718 * which will result in a watchdog, rather than allow that do an 2719 * attempted cleanup and then reinit here. Note that this has been 2720 * seens mostly with fiber adapters. 2721 */ 2722 static void 2723 emx_tx_purge(struct emx_softc *sc) 2724 { 2725 int i; 2726 2727 if (sc->link_active) 2728 return; 2729 2730 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2731 struct emx_txdata *tdata = &sc->tx_data[i]; 2732 2733 if (tdata->tx_watchdog.wd_timer) { 2734 emx_tx_collect(tdata); 2735 if (tdata->tx_watchdog.wd_timer) { 2736 if_printf(&sc->arpcom.ac_if, 2737 "Link lost, TX pending, reinit\n"); 2738 emx_init(sc); 2739 return; 2740 } 2741 } 2742 } 2743 } 2744 2745 static int 2746 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2747 { 2748 struct mbuf *m; 2749 bus_dma_segment_t seg; 2750 bus_dmamap_t map; 2751 struct emx_rxbuf *rx_buffer; 2752 int error, nseg; 2753 2754 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2755 if (m == NULL) { 2756 if (init) { 2757 if_printf(&rdata->sc->arpcom.ac_if, 2758 "Unable to allocate RX mbuf\n"); 2759 } 2760 return (ENOBUFS); 2761 } 2762 m->m_len = m->m_pkthdr.len = MCLBYTES; 2763 2764 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2765 m_adj(m, ETHER_ALIGN); 2766 2767 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2768 rdata->rx_sparemap, m, 2769 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2770 if (error) { 2771 m_freem(m); 2772 if (init) { 2773 if_printf(&rdata->sc->arpcom.ac_if, 2774 "Unable to load RX mbuf\n"); 2775 } 2776 return (error); 2777 } 2778 2779 rx_buffer = &rdata->rx_buf[i]; 2780 if (rx_buffer->m_head != NULL) 2781 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2782 2783 map = rx_buffer->map; 2784 rx_buffer->map = rdata->rx_sparemap; 2785 rdata->rx_sparemap = map; 2786 2787 rx_buffer->m_head = m; 2788 rx_buffer->paddr = seg.ds_addr; 2789 2790 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2791 return (0); 2792 } 2793 2794 static int 2795 emx_create_rx_ring(struct emx_rxdata *rdata) 2796 { 2797 device_t dev = rdata->sc->dev; 2798 struct emx_rxbuf *rx_buffer; 2799 int i, error, rsize, nrxd; 2800 2801 /* 2802 * Validate number of receive descriptors. It must not exceed 2803 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2804 */ 2805 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2806 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2807 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2808 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2809 EMX_DEFAULT_RXD, nrxd); 2810 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2811 } else { 2812 rdata->num_rx_desc = nrxd; 2813 } 2814 2815 /* 2816 * Allocate Receive Descriptor ring 2817 */ 2818 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2819 EMX_DBA_ALIGN); 2820 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2821 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2822 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2823 &rdata->rx_desc_paddr); 2824 if (rdata->rx_desc == NULL) { 2825 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2826 return ENOMEM; 2827 } 2828 2829 rsize = __VM_CACHELINE_ALIGN( 2830 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2831 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2832 2833 /* 2834 * Create DMA tag for rx buffers 2835 */ 2836 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2837 1, 0, /* alignment, bounds */ 2838 BUS_SPACE_MAXADDR, /* lowaddr */ 2839 BUS_SPACE_MAXADDR, /* highaddr */ 2840 NULL, NULL, /* filter, filterarg */ 2841 MCLBYTES, /* maxsize */ 2842 1, /* nsegments */ 2843 MCLBYTES, /* maxsegsize */ 2844 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2845 &rdata->rxtag); 2846 if (error) { 2847 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2848 kfree(rdata->rx_buf, M_DEVBUF); 2849 rdata->rx_buf = NULL; 2850 return error; 2851 } 2852 2853 /* 2854 * Create spare DMA map for rx buffers 2855 */ 2856 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2857 &rdata->rx_sparemap); 2858 if (error) { 2859 device_printf(dev, "Unable to create spare RX DMA map\n"); 2860 bus_dma_tag_destroy(rdata->rxtag); 2861 kfree(rdata->rx_buf, M_DEVBUF); 2862 rdata->rx_buf = NULL; 2863 return error; 2864 } 2865 2866 /* 2867 * Create DMA maps for rx buffers 2868 */ 2869 for (i = 0; i < rdata->num_rx_desc; i++) { 2870 rx_buffer = &rdata->rx_buf[i]; 2871 2872 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2873 &rx_buffer->map); 2874 if (error) { 2875 device_printf(dev, "Unable to create RX DMA map\n"); 2876 emx_destroy_rx_ring(rdata, i); 2877 return error; 2878 } 2879 } 2880 return (0); 2881 } 2882 2883 static void 2884 emx_free_rx_ring(struct emx_rxdata *rdata) 2885 { 2886 int i; 2887 2888 for (i = 0; i < rdata->num_rx_desc; i++) { 2889 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2890 2891 if (rx_buffer->m_head != NULL) { 2892 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2893 m_freem(rx_buffer->m_head); 2894 rx_buffer->m_head = NULL; 2895 } 2896 } 2897 2898 if (rdata->fmp != NULL) 2899 m_freem(rdata->fmp); 2900 rdata->fmp = NULL; 2901 rdata->lmp = NULL; 2902 } 2903 2904 static void 2905 emx_free_tx_ring(struct emx_txdata *tdata) 2906 { 2907 int i; 2908 2909 for (i = 0; i < tdata->num_tx_desc; i++) { 2910 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2911 2912 if (tx_buffer->m_head != NULL) { 2913 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2914 m_freem(tx_buffer->m_head); 2915 tx_buffer->m_head = NULL; 2916 } 2917 } 2918 2919 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2920 2921 tdata->csum_flags = 0; 2922 tdata->csum_lhlen = 0; 2923 tdata->csum_iphlen = 0; 2924 tdata->csum_thlen = 0; 2925 tdata->csum_mss = 0; 2926 tdata->csum_pktlen = 0; 2927 2928 tdata->tx_dd_head = 0; 2929 tdata->tx_dd_tail = 0; 2930 tdata->tx_nsegs = 0; 2931 } 2932 2933 static int 2934 emx_init_rx_ring(struct emx_rxdata *rdata) 2935 { 2936 int i, error; 2937 2938 /* Reset descriptor ring */ 2939 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2940 2941 /* Allocate new ones. */ 2942 for (i = 0; i < rdata->num_rx_desc; i++) { 2943 error = emx_newbuf(rdata, i, 1); 2944 if (error) 2945 return (error); 2946 } 2947 2948 /* Setup our descriptor pointers */ 2949 rdata->next_rx_desc_to_check = 0; 2950 2951 return (0); 2952 } 2953 2954 static void 2955 emx_init_rx_unit(struct emx_softc *sc) 2956 { 2957 struct ifnet *ifp = &sc->arpcom.ac_if; 2958 uint64_t bus_addr; 2959 uint32_t rctl, itr, rfctl; 2960 int i; 2961 2962 /* 2963 * Make sure receives are disabled while setting 2964 * up the descriptor ring 2965 */ 2966 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2967 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2968 2969 /* 2970 * Set the interrupt throttling rate. Value is calculated 2971 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2972 */ 2973 if (sc->int_throttle_ceil) 2974 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2975 else 2976 itr = 0; 2977 emx_set_itr(sc, itr); 2978 2979 /* Use extended RX descriptor */ 2980 rfctl = E1000_RFCTL_EXTEN; 2981 2982 /* Disable accelerated ackknowledge */ 2983 if (sc->hw.mac.type == e1000_82574) 2984 rfctl |= E1000_RFCTL_ACK_DIS; 2985 2986 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2987 2988 /* 2989 * Receive Checksum Offload for TCP and UDP 2990 * 2991 * Checksum offloading is also enabled if multiple receive 2992 * queue is to be supported, since we need it to figure out 2993 * packet type. 2994 */ 2995 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2996 sc->rx_ring_cnt > 1) { 2997 uint32_t rxcsum; 2998 2999 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3000 3001 /* 3002 * NOTE: 3003 * PCSD must be enabled to enable multiple 3004 * receive queues. 3005 */ 3006 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3007 E1000_RXCSUM_PCSD; 3008 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3009 } 3010 3011 /* 3012 * Configure multiple receive queue (RSS) 3013 */ 3014 if (sc->rx_ring_cnt > 1) { 3015 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3016 uint32_t reta; 3017 3018 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3019 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3020 3021 /* 3022 * NOTE: 3023 * When we reach here, RSS has already been disabled 3024 * in emx_stop(), so we could safely configure RSS key 3025 * and redirect table. 3026 */ 3027 3028 /* 3029 * Configure RSS key 3030 */ 3031 toeplitz_get_key(key, sizeof(key)); 3032 for (i = 0; i < EMX_NRSSRK; ++i) { 3033 uint32_t rssrk; 3034 3035 rssrk = EMX_RSSRK_VAL(key, i); 3036 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3037 3038 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3039 } 3040 3041 /* 3042 * Configure RSS redirect table in following fashion: 3043 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3044 */ 3045 reta = 0; 3046 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3047 uint32_t q; 3048 3049 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 3050 reta |= q << (8 * i); 3051 } 3052 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3053 3054 for (i = 0; i < EMX_NRETA; ++i) 3055 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 3056 3057 /* 3058 * Enable multiple receive queues. 3059 * Enable IPv4 RSS standard hash functions. 3060 * Disable RSS interrupt. 3061 */ 3062 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3063 E1000_MRQC_ENABLE_RSS_2Q | 3064 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3065 E1000_MRQC_RSS_FIELD_IPV4); 3066 } 3067 3068 /* 3069 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3070 * long latencies are observed, like Lenovo X60. This 3071 * change eliminates the problem, but since having positive 3072 * values in RDTR is a known source of problems on other 3073 * platforms another solution is being sought. 3074 */ 3075 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3076 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3077 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3078 } 3079 3080 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3081 struct emx_rxdata *rdata = &sc->rx_data[i]; 3082 3083 /* 3084 * Setup the Base and Length of the Rx Descriptor Ring 3085 */ 3086 bus_addr = rdata->rx_desc_paddr; 3087 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3088 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3089 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3090 (uint32_t)(bus_addr >> 32)); 3091 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3092 (uint32_t)bus_addr); 3093 3094 /* 3095 * Setup the HW Rx Head and Tail Descriptor Pointers 3096 */ 3097 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3098 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3099 sc->rx_data[i].num_rx_desc - 1); 3100 } 3101 3102 if (sc->hw.mac.type >= e1000_pch2lan) { 3103 if (ifp->if_mtu > ETHERMTU) 3104 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3105 else 3106 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3107 } 3108 3109 /* Setup the Receive Control Register */ 3110 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3111 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3112 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3113 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3114 3115 /* Make sure VLAN Filters are off */ 3116 rctl &= ~E1000_RCTL_VFE; 3117 3118 /* Don't store bad paket */ 3119 rctl &= ~E1000_RCTL_SBP; 3120 3121 /* MCLBYTES */ 3122 rctl |= E1000_RCTL_SZ_2048; 3123 3124 if (ifp->if_mtu > ETHERMTU) 3125 rctl |= E1000_RCTL_LPE; 3126 else 3127 rctl &= ~E1000_RCTL_LPE; 3128 3129 /* Enable Receives */ 3130 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3131 } 3132 3133 static void 3134 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3135 { 3136 struct emx_rxbuf *rx_buffer; 3137 int i; 3138 3139 /* Free Receive Descriptor ring */ 3140 if (rdata->rx_desc) { 3141 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3142 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3143 rdata->rx_desc_dmap); 3144 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3145 3146 rdata->rx_desc = NULL; 3147 } 3148 3149 if (rdata->rx_buf == NULL) 3150 return; 3151 3152 for (i = 0; i < ndesc; i++) { 3153 rx_buffer = &rdata->rx_buf[i]; 3154 3155 KKASSERT(rx_buffer->m_head == NULL); 3156 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3157 } 3158 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3159 bus_dma_tag_destroy(rdata->rxtag); 3160 3161 kfree(rdata->rx_buf, M_DEVBUF); 3162 rdata->rx_buf = NULL; 3163 } 3164 3165 static void 3166 emx_rxeof(struct emx_rxdata *rdata, int count) 3167 { 3168 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3169 uint32_t staterr; 3170 emx_rxdesc_t *current_desc; 3171 struct mbuf *mp; 3172 int i, cpuid = mycpuid; 3173 3174 i = rdata->next_rx_desc_to_check; 3175 current_desc = &rdata->rx_desc[i]; 3176 staterr = le32toh(current_desc->rxd_staterr); 3177 3178 if (!(staterr & E1000_RXD_STAT_DD)) 3179 return; 3180 3181 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3182 struct pktinfo *pi = NULL, pi0; 3183 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3184 struct mbuf *m = NULL; 3185 int eop, len; 3186 3187 logif(pkt_receive); 3188 3189 mp = rx_buf->m_head; 3190 3191 /* 3192 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3193 * needs to access the last received byte in the mbuf. 3194 */ 3195 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3196 BUS_DMASYNC_POSTREAD); 3197 3198 len = le16toh(current_desc->rxd_length); 3199 if (staterr & E1000_RXD_STAT_EOP) { 3200 count--; 3201 eop = 1; 3202 } else { 3203 eop = 0; 3204 } 3205 3206 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3207 uint16_t vlan = 0; 3208 uint32_t mrq, rss_hash; 3209 3210 /* 3211 * Save several necessary information, 3212 * before emx_newbuf() destroy it. 3213 */ 3214 if ((staterr & E1000_RXD_STAT_VP) && eop) 3215 vlan = le16toh(current_desc->rxd_vlan); 3216 3217 mrq = le32toh(current_desc->rxd_mrq); 3218 rss_hash = le32toh(current_desc->rxd_rss); 3219 3220 EMX_RSS_DPRINTF(rdata->sc, 10, 3221 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3222 rdata->idx, mrq, rss_hash); 3223 3224 if (emx_newbuf(rdata, i, 0) != 0) { 3225 IFNET_STAT_INC(ifp, iqdrops, 1); 3226 goto discard; 3227 } 3228 3229 /* Assign correct length to the current fragment */ 3230 mp->m_len = len; 3231 3232 if (rdata->fmp == NULL) { 3233 mp->m_pkthdr.len = len; 3234 rdata->fmp = mp; /* Store the first mbuf */ 3235 rdata->lmp = mp; 3236 } else { 3237 /* 3238 * Chain mbuf's together 3239 */ 3240 rdata->lmp->m_next = mp; 3241 rdata->lmp = rdata->lmp->m_next; 3242 rdata->fmp->m_pkthdr.len += len; 3243 } 3244 3245 if (eop) { 3246 rdata->fmp->m_pkthdr.rcvif = ifp; 3247 IFNET_STAT_INC(ifp, ipackets, 1); 3248 3249 if (ifp->if_capenable & IFCAP_RXCSUM) 3250 emx_rxcsum(staterr, rdata->fmp); 3251 3252 if (staterr & E1000_RXD_STAT_VP) { 3253 rdata->fmp->m_pkthdr.ether_vlantag = 3254 vlan; 3255 rdata->fmp->m_flags |= M_VLANTAG; 3256 } 3257 m = rdata->fmp; 3258 rdata->fmp = NULL; 3259 rdata->lmp = NULL; 3260 3261 if (ifp->if_capenable & IFCAP_RSS) { 3262 pi = emx_rssinfo(m, &pi0, mrq, 3263 rss_hash, staterr); 3264 } 3265 #ifdef EMX_RSS_DEBUG 3266 rdata->rx_pkts++; 3267 #endif 3268 } 3269 } else { 3270 IFNET_STAT_INC(ifp, ierrors, 1); 3271 discard: 3272 emx_setup_rxdesc(current_desc, rx_buf); 3273 if (rdata->fmp != NULL) { 3274 m_freem(rdata->fmp); 3275 rdata->fmp = NULL; 3276 rdata->lmp = NULL; 3277 } 3278 m = NULL; 3279 } 3280 3281 if (m != NULL) 3282 ifp->if_input(ifp, m, pi, cpuid); 3283 3284 /* Advance our pointers to the next descriptor. */ 3285 if (++i == rdata->num_rx_desc) 3286 i = 0; 3287 3288 current_desc = &rdata->rx_desc[i]; 3289 staterr = le32toh(current_desc->rxd_staterr); 3290 } 3291 rdata->next_rx_desc_to_check = i; 3292 3293 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3294 if (--i < 0) 3295 i = rdata->num_rx_desc - 1; 3296 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3297 } 3298 3299 static void 3300 emx_enable_intr(struct emx_softc *sc) 3301 { 3302 uint32_t ims_mask = IMS_ENABLE_MASK; 3303 3304 lwkt_serialize_handler_enable(&sc->main_serialize); 3305 3306 #if 0 3307 if (sc->hw.mac.type == e1000_82574) { 3308 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3309 ims_mask |= EM_MSIX_MASK; 3310 } 3311 #endif 3312 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3313 } 3314 3315 static void 3316 emx_disable_intr(struct emx_softc *sc) 3317 { 3318 if (sc->hw.mac.type == e1000_82574) 3319 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3320 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3321 3322 lwkt_serialize_handler_disable(&sc->main_serialize); 3323 } 3324 3325 /* 3326 * Bit of a misnomer, what this really means is 3327 * to enable OS management of the system... aka 3328 * to disable special hardware management features 3329 */ 3330 static void 3331 emx_get_mgmt(struct emx_softc *sc) 3332 { 3333 /* A shared code workaround */ 3334 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3335 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3336 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3337 3338 /* disable hardware interception of ARP */ 3339 manc &= ~(E1000_MANC_ARP_EN); 3340 3341 /* enable receiving management packets to the host */ 3342 manc |= E1000_MANC_EN_MNG2HOST; 3343 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3344 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3345 manc2h |= E1000_MNG2HOST_PORT_623; 3346 manc2h |= E1000_MNG2HOST_PORT_664; 3347 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3348 3349 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3350 } 3351 } 3352 3353 /* 3354 * Give control back to hardware management 3355 * controller if there is one. 3356 */ 3357 static void 3358 emx_rel_mgmt(struct emx_softc *sc) 3359 { 3360 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3361 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3362 3363 /* re-enable hardware interception of ARP */ 3364 manc |= E1000_MANC_ARP_EN; 3365 manc &= ~E1000_MANC_EN_MNG2HOST; 3366 3367 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3368 } 3369 } 3370 3371 /* 3372 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3373 * For ASF and Pass Through versions of f/w this means that 3374 * the driver is loaded. For AMT version (only with 82573) 3375 * of the f/w this means that the network i/f is open. 3376 */ 3377 static void 3378 emx_get_hw_control(struct emx_softc *sc) 3379 { 3380 /* Let firmware know the driver has taken over */ 3381 if (sc->hw.mac.type == e1000_82573) { 3382 uint32_t swsm; 3383 3384 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3385 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3386 swsm | E1000_SWSM_DRV_LOAD); 3387 } else { 3388 uint32_t ctrl_ext; 3389 3390 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3391 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3392 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3393 } 3394 sc->flags |= EMX_FLAG_HW_CTRL; 3395 } 3396 3397 /* 3398 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3399 * For ASF and Pass Through versions of f/w this means that the 3400 * driver is no longer loaded. For AMT version (only with 82573) 3401 * of the f/w this means that the network i/f is closed. 3402 */ 3403 static void 3404 emx_rel_hw_control(struct emx_softc *sc) 3405 { 3406 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3407 return; 3408 sc->flags &= ~EMX_FLAG_HW_CTRL; 3409 3410 /* Let firmware taken over control of h/w */ 3411 if (sc->hw.mac.type == e1000_82573) { 3412 uint32_t swsm; 3413 3414 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3415 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3416 swsm & ~E1000_SWSM_DRV_LOAD); 3417 } else { 3418 uint32_t ctrl_ext; 3419 3420 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3421 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3422 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3423 } 3424 } 3425 3426 static int 3427 emx_is_valid_eaddr(const uint8_t *addr) 3428 { 3429 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3430 3431 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3432 return (FALSE); 3433 3434 return (TRUE); 3435 } 3436 3437 /* 3438 * Enable PCI Wake On Lan capability 3439 */ 3440 void 3441 emx_enable_wol(device_t dev) 3442 { 3443 uint16_t cap, status; 3444 uint8_t id; 3445 3446 /* First find the capabilities pointer*/ 3447 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3448 3449 /* Read the PM Capabilities */ 3450 id = pci_read_config(dev, cap, 1); 3451 if (id != PCIY_PMG) /* Something wrong */ 3452 return; 3453 3454 /* 3455 * OK, we have the power capabilities, 3456 * so now get the status register 3457 */ 3458 cap += PCIR_POWER_STATUS; 3459 status = pci_read_config(dev, cap, 2); 3460 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3461 pci_write_config(dev, cap, status, 2); 3462 } 3463 3464 static void 3465 emx_update_stats(struct emx_softc *sc) 3466 { 3467 struct ifnet *ifp = &sc->arpcom.ac_if; 3468 3469 if (sc->hw.phy.media_type == e1000_media_type_copper || 3470 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3471 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3472 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3473 } 3474 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3475 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3476 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3477 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3478 3479 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3480 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3481 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3482 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3483 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3484 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3485 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3486 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3487 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3488 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3489 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3490 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3491 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3492 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3493 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3494 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3495 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3496 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3497 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3498 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3499 3500 /* For the 64-bit byte counters the low dword must be read first. */ 3501 /* Both registers clear on the read of the high dword */ 3502 3503 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3504 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3505 3506 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3507 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3508 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3509 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3510 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3511 3512 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3513 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3514 3515 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3516 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3517 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3518 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3519 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3520 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3521 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3522 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3523 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3524 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3525 3526 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3527 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3528 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3529 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3530 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3531 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3532 3533 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3534 3535 /* Rx Errors */ 3536 IFNET_STAT_SET(ifp, ierrors, 3537 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3538 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3539 3540 /* Tx Errors */ 3541 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3542 } 3543 3544 static void 3545 emx_print_debug_info(struct emx_softc *sc) 3546 { 3547 device_t dev = sc->dev; 3548 uint8_t *hw_addr = sc->hw.hw_addr; 3549 int i; 3550 3551 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3552 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3553 E1000_READ_REG(&sc->hw, E1000_CTRL), 3554 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3555 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3556 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3557 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3558 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3559 sc->hw.fc.high_water, sc->hw.fc.low_water); 3560 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3561 E1000_READ_REG(&sc->hw, E1000_TIDV), 3562 E1000_READ_REG(&sc->hw, E1000_TADV)); 3563 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3564 E1000_READ_REG(&sc->hw, E1000_RDTR), 3565 E1000_READ_REG(&sc->hw, E1000_RADV)); 3566 3567 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3568 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3569 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3570 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3571 } 3572 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3573 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3574 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3575 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3576 } 3577 3578 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3579 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3580 sc->tx_data[i].num_tx_desc_avail); 3581 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3582 sc->tx_data[i].tso_segments); 3583 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3584 sc->tx_data[i].tso_ctx_reused); 3585 } 3586 } 3587 3588 static void 3589 emx_print_hw_stats(struct emx_softc *sc) 3590 { 3591 device_t dev = sc->dev; 3592 3593 device_printf(dev, "Excessive collisions = %lld\n", 3594 (long long)sc->stats.ecol); 3595 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3596 device_printf(dev, "Symbol errors = %lld\n", 3597 (long long)sc->stats.symerrs); 3598 #endif 3599 device_printf(dev, "Sequence errors = %lld\n", 3600 (long long)sc->stats.sec); 3601 device_printf(dev, "Defer count = %lld\n", 3602 (long long)sc->stats.dc); 3603 device_printf(dev, "Missed Packets = %lld\n", 3604 (long long)sc->stats.mpc); 3605 device_printf(dev, "Receive No Buffers = %lld\n", 3606 (long long)sc->stats.rnbc); 3607 /* RLEC is inaccurate on some hardware, calculate our own. */ 3608 device_printf(dev, "Receive Length Errors = %lld\n", 3609 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3610 device_printf(dev, "Receive errors = %lld\n", 3611 (long long)sc->stats.rxerrc); 3612 device_printf(dev, "Crc errors = %lld\n", 3613 (long long)sc->stats.crcerrs); 3614 device_printf(dev, "Alignment errors = %lld\n", 3615 (long long)sc->stats.algnerrc); 3616 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3617 (long long)sc->stats.cexterr); 3618 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3619 device_printf(dev, "XON Rcvd = %lld\n", 3620 (long long)sc->stats.xonrxc); 3621 device_printf(dev, "XON Xmtd = %lld\n", 3622 (long long)sc->stats.xontxc); 3623 device_printf(dev, "XOFF Rcvd = %lld\n", 3624 (long long)sc->stats.xoffrxc); 3625 device_printf(dev, "XOFF Xmtd = %lld\n", 3626 (long long)sc->stats.xofftxc); 3627 device_printf(dev, "Good Packets Rcvd = %lld\n", 3628 (long long)sc->stats.gprc); 3629 device_printf(dev, "Good Packets Xmtd = %lld\n", 3630 (long long)sc->stats.gptc); 3631 } 3632 3633 static void 3634 emx_print_nvm_info(struct emx_softc *sc) 3635 { 3636 uint16_t eeprom_data; 3637 int i, j, row = 0; 3638 3639 /* Its a bit crude, but it gets the job done */ 3640 kprintf("\nInterface EEPROM Dump:\n"); 3641 kprintf("Offset\n0x0000 "); 3642 for (i = 0, j = 0; i < 32; i++, j++) { 3643 if (j == 8) { /* Make the offset block */ 3644 j = 0; ++row; 3645 kprintf("\n0x00%x0 ",row); 3646 } 3647 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3648 kprintf("%04x ", eeprom_data); 3649 } 3650 kprintf("\n"); 3651 } 3652 3653 static int 3654 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3655 { 3656 struct emx_softc *sc; 3657 struct ifnet *ifp; 3658 int error, result; 3659 3660 result = -1; 3661 error = sysctl_handle_int(oidp, &result, 0, req); 3662 if (error || !req->newptr) 3663 return (error); 3664 3665 sc = (struct emx_softc *)arg1; 3666 ifp = &sc->arpcom.ac_if; 3667 3668 ifnet_serialize_all(ifp); 3669 3670 if (result == 1) 3671 emx_print_debug_info(sc); 3672 3673 /* 3674 * This value will cause a hex dump of the 3675 * first 32 16-bit words of the EEPROM to 3676 * the screen. 3677 */ 3678 if (result == 2) 3679 emx_print_nvm_info(sc); 3680 3681 ifnet_deserialize_all(ifp); 3682 3683 return (error); 3684 } 3685 3686 static int 3687 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3688 { 3689 int error, result; 3690 3691 result = -1; 3692 error = sysctl_handle_int(oidp, &result, 0, req); 3693 if (error || !req->newptr) 3694 return (error); 3695 3696 if (result == 1) { 3697 struct emx_softc *sc = (struct emx_softc *)arg1; 3698 struct ifnet *ifp = &sc->arpcom.ac_if; 3699 3700 ifnet_serialize_all(ifp); 3701 emx_print_hw_stats(sc); 3702 ifnet_deserialize_all(ifp); 3703 } 3704 return (error); 3705 } 3706 3707 static void 3708 emx_add_sysctl(struct emx_softc *sc) 3709 { 3710 struct sysctl_ctx_list *ctx; 3711 struct sysctl_oid *tree; 3712 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3713 char pkt_desc[32]; 3714 int i; 3715 #endif 3716 3717 ctx = device_get_sysctl_ctx(sc->dev); 3718 tree = device_get_sysctl_tree(sc->dev); 3719 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3720 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3721 emx_sysctl_debug_info, "I", "Debug Information"); 3722 3723 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3724 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3725 emx_sysctl_stats, "I", "Statistics"); 3726 3727 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3728 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3729 "# of RX descs"); 3730 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3731 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3732 "# of TX descs"); 3733 3734 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3735 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3736 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3737 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3738 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3739 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3740 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3741 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3742 emx_sysctl_tx_wreg_nsegs, "I", 3743 "# segments sent before write to hardware register"); 3744 3745 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3746 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3747 "# of RX rings"); 3748 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3749 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3750 "# of TX rings"); 3751 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3752 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3753 "# of TX rings used"); 3754 3755 #ifdef IFPOLL_ENABLE 3756 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3757 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3758 sc, 0, emx_sysctl_npoll_rxoff, "I", 3759 "NPOLLING RX cpu offset"); 3760 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3761 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3762 sc, 0, emx_sysctl_npoll_txoff, "I", 3763 "NPOLLING TX cpu offset"); 3764 #endif 3765 3766 #ifdef EMX_RSS_DEBUG 3767 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3768 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3769 0, "RSS debug level"); 3770 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3771 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3772 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3773 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3774 "RXed packets"); 3775 } 3776 #endif 3777 #ifdef EMX_TSS_DEBUG 3778 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3779 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3780 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3781 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3782 "TXed packets"); 3783 } 3784 #endif 3785 } 3786 3787 static int 3788 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3789 { 3790 struct emx_softc *sc = (void *)arg1; 3791 struct ifnet *ifp = &sc->arpcom.ac_if; 3792 int error, throttle; 3793 3794 throttle = sc->int_throttle_ceil; 3795 error = sysctl_handle_int(oidp, &throttle, 0, req); 3796 if (error || req->newptr == NULL) 3797 return error; 3798 if (throttle < 0 || throttle > 1000000000 / 256) 3799 return EINVAL; 3800 3801 if (throttle) { 3802 /* 3803 * Set the interrupt throttling rate in 256ns increments, 3804 * recalculate sysctl value assignment to get exact frequency. 3805 */ 3806 throttle = 1000000000 / 256 / throttle; 3807 3808 /* Upper 16bits of ITR is reserved and should be zero */ 3809 if (throttle & 0xffff0000) 3810 return EINVAL; 3811 } 3812 3813 ifnet_serialize_all(ifp); 3814 3815 if (throttle) 3816 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3817 else 3818 sc->int_throttle_ceil = 0; 3819 3820 if (ifp->if_flags & IFF_RUNNING) 3821 emx_set_itr(sc, throttle); 3822 3823 ifnet_deserialize_all(ifp); 3824 3825 if (bootverbose) { 3826 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3827 sc->int_throttle_ceil); 3828 } 3829 return 0; 3830 } 3831 3832 static int 3833 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3834 { 3835 struct emx_softc *sc = (void *)arg1; 3836 struct ifnet *ifp = &sc->arpcom.ac_if; 3837 struct emx_txdata *tdata = &sc->tx_data[0]; 3838 int error, segs; 3839 3840 segs = tdata->tx_intr_nsegs; 3841 error = sysctl_handle_int(oidp, &segs, 0, req); 3842 if (error || req->newptr == NULL) 3843 return error; 3844 if (segs <= 0) 3845 return EINVAL; 3846 3847 ifnet_serialize_all(ifp); 3848 3849 /* 3850 * Don't allow tx_intr_nsegs to become: 3851 * o Less the oact_tx_desc 3852 * o Too large that no TX desc will cause TX interrupt to 3853 * be generated (OACTIVE will never recover) 3854 * o Too small that will cause tx_dd[] overflow 3855 */ 3856 if (segs < tdata->oact_tx_desc || 3857 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3858 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3859 error = EINVAL; 3860 } else { 3861 int i; 3862 3863 error = 0; 3864 for (i = 0; i < sc->tx_ring_cnt; ++i) 3865 sc->tx_data[i].tx_intr_nsegs = segs; 3866 } 3867 3868 ifnet_deserialize_all(ifp); 3869 3870 return error; 3871 } 3872 3873 static int 3874 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3875 { 3876 struct emx_softc *sc = (void *)arg1; 3877 struct ifnet *ifp = &sc->arpcom.ac_if; 3878 int error, nsegs, i; 3879 3880 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3881 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3882 if (error || req->newptr == NULL) 3883 return error; 3884 3885 ifnet_serialize_all(ifp); 3886 for (i = 0; i < sc->tx_ring_cnt; ++i) 3887 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3888 ifnet_deserialize_all(ifp); 3889 3890 return 0; 3891 } 3892 3893 #ifdef IFPOLL_ENABLE 3894 3895 static int 3896 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3897 { 3898 struct emx_softc *sc = (void *)arg1; 3899 struct ifnet *ifp = &sc->arpcom.ac_if; 3900 int error, off; 3901 3902 off = sc->rx_npoll_off; 3903 error = sysctl_handle_int(oidp, &off, 0, req); 3904 if (error || req->newptr == NULL) 3905 return error; 3906 if (off < 0) 3907 return EINVAL; 3908 3909 ifnet_serialize_all(ifp); 3910 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3911 error = EINVAL; 3912 } else { 3913 error = 0; 3914 sc->rx_npoll_off = off; 3915 } 3916 ifnet_deserialize_all(ifp); 3917 3918 return error; 3919 } 3920 3921 static int 3922 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3923 { 3924 struct emx_softc *sc = (void *)arg1; 3925 struct ifnet *ifp = &sc->arpcom.ac_if; 3926 int error, off; 3927 3928 off = sc->tx_npoll_off; 3929 error = sysctl_handle_int(oidp, &off, 0, req); 3930 if (error || req->newptr == NULL) 3931 return error; 3932 if (off < 0) 3933 return EINVAL; 3934 3935 ifnet_serialize_all(ifp); 3936 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3937 error = EINVAL; 3938 } else { 3939 error = 0; 3940 sc->tx_npoll_off = off; 3941 } 3942 ifnet_deserialize_all(ifp); 3943 3944 return error; 3945 } 3946 3947 #endif /* IFPOLL_ENABLE */ 3948 3949 static int 3950 emx_dma_alloc(struct emx_softc *sc) 3951 { 3952 int error, i; 3953 3954 /* 3955 * Create top level busdma tag 3956 */ 3957 error = bus_dma_tag_create(NULL, 1, 0, 3958 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3959 NULL, NULL, 3960 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3961 0, &sc->parent_dtag); 3962 if (error) { 3963 device_printf(sc->dev, "could not create top level DMA tag\n"); 3964 return error; 3965 } 3966 3967 /* 3968 * Allocate transmit descriptors ring and buffers 3969 */ 3970 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3971 error = emx_create_tx_ring(&sc->tx_data[i]); 3972 if (error) { 3973 device_printf(sc->dev, 3974 "Could not setup transmit structures\n"); 3975 return error; 3976 } 3977 } 3978 3979 /* 3980 * Allocate receive descriptors ring and buffers 3981 */ 3982 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3983 error = emx_create_rx_ring(&sc->rx_data[i]); 3984 if (error) { 3985 device_printf(sc->dev, 3986 "Could not setup receive structures\n"); 3987 return error; 3988 } 3989 } 3990 return 0; 3991 } 3992 3993 static void 3994 emx_dma_free(struct emx_softc *sc) 3995 { 3996 int i; 3997 3998 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3999 emx_destroy_tx_ring(&sc->tx_data[i], 4000 sc->tx_data[i].num_tx_desc); 4001 } 4002 4003 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4004 emx_destroy_rx_ring(&sc->rx_data[i], 4005 sc->rx_data[i].num_rx_desc); 4006 } 4007 4008 /* Free top level busdma tag */ 4009 if (sc->parent_dtag != NULL) 4010 bus_dma_tag_destroy(sc->parent_dtag); 4011 } 4012 4013 static void 4014 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4015 { 4016 struct emx_softc *sc = ifp->if_softc; 4017 4018 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4019 } 4020 4021 static void 4022 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4023 { 4024 struct emx_softc *sc = ifp->if_softc; 4025 4026 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4027 } 4028 4029 static int 4030 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4031 { 4032 struct emx_softc *sc = ifp->if_softc; 4033 4034 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4035 } 4036 4037 static void 4038 emx_serialize_skipmain(struct emx_softc *sc) 4039 { 4040 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4041 } 4042 4043 static void 4044 emx_deserialize_skipmain(struct emx_softc *sc) 4045 { 4046 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4047 } 4048 4049 #ifdef INVARIANTS 4050 4051 static void 4052 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4053 boolean_t serialized) 4054 { 4055 struct emx_softc *sc = ifp->if_softc; 4056 4057 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4058 slz, serialized); 4059 } 4060 4061 #endif /* INVARIANTS */ 4062 4063 #ifdef IFPOLL_ENABLE 4064 4065 static void 4066 emx_npoll_status(struct ifnet *ifp) 4067 { 4068 struct emx_softc *sc = ifp->if_softc; 4069 uint32_t reg_icr; 4070 4071 ASSERT_SERIALIZED(&sc->main_serialize); 4072 4073 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4074 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4075 callout_stop(&sc->timer); 4076 sc->hw.mac.get_link_status = 1; 4077 emx_update_link_status(sc); 4078 callout_reset(&sc->timer, hz, emx_timer, sc); 4079 } 4080 } 4081 4082 static void 4083 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4084 { 4085 struct emx_txdata *tdata = arg; 4086 4087 ASSERT_SERIALIZED(&tdata->tx_serialize); 4088 4089 emx_txeof(tdata); 4090 if (!ifsq_is_empty(tdata->ifsq)) 4091 ifsq_devstart(tdata->ifsq); 4092 } 4093 4094 static void 4095 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4096 { 4097 struct emx_rxdata *rdata = arg; 4098 4099 ASSERT_SERIALIZED(&rdata->rx_serialize); 4100 4101 emx_rxeof(rdata, cycle); 4102 } 4103 4104 static void 4105 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4106 { 4107 struct emx_softc *sc = ifp->if_softc; 4108 int i, txr_cnt; 4109 4110 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4111 4112 if (info) { 4113 int off; 4114 4115 info->ifpi_status.status_func = emx_npoll_status; 4116 info->ifpi_status.serializer = &sc->main_serialize; 4117 4118 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4119 off = sc->tx_npoll_off; 4120 for (i = 0; i < txr_cnt; ++i) { 4121 struct emx_txdata *tdata = &sc->tx_data[i]; 4122 int idx = i + off; 4123 4124 KKASSERT(idx < ncpus2); 4125 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4126 info->ifpi_tx[idx].arg = tdata; 4127 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4128 ifsq_set_cpuid(tdata->ifsq, idx); 4129 } 4130 4131 off = sc->rx_npoll_off; 4132 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4133 struct emx_rxdata *rdata = &sc->rx_data[i]; 4134 int idx = i + off; 4135 4136 KKASSERT(idx < ncpus2); 4137 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4138 info->ifpi_rx[idx].arg = rdata; 4139 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4140 } 4141 4142 if (ifp->if_flags & IFF_RUNNING) { 4143 if (txr_cnt == sc->tx_ring_inuse) 4144 emx_disable_intr(sc); 4145 else 4146 emx_init(sc); 4147 } 4148 } else { 4149 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4150 struct emx_txdata *tdata = &sc->tx_data[i]; 4151 4152 ifsq_set_cpuid(tdata->ifsq, 4153 rman_get_cpuid(sc->intr_res)); 4154 } 4155 4156 if (ifp->if_flags & IFF_RUNNING) { 4157 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4158 if (txr_cnt == sc->tx_ring_inuse) 4159 emx_enable_intr(sc); 4160 else 4161 emx_init(sc); 4162 } 4163 } 4164 } 4165 4166 #endif /* IFPOLL_ENABLE */ 4167 4168 static void 4169 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4170 { 4171 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4172 if (sc->hw.mac.type == e1000_82574) { 4173 int i; 4174 4175 /* 4176 * When using MSIX interrupts we need to 4177 * throttle using the EITR register 4178 */ 4179 for (i = 0; i < 4; ++i) 4180 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4181 } 4182 } 4183 4184 /* 4185 * Disable the L0s, 82574L Errata #20 4186 */ 4187 static void 4188 emx_disable_aspm(struct emx_softc *sc) 4189 { 4190 uint16_t link_cap, link_ctrl, disable; 4191 uint8_t pcie_ptr, reg; 4192 device_t dev = sc->dev; 4193 4194 switch (sc->hw.mac.type) { 4195 case e1000_82571: 4196 case e1000_82572: 4197 case e1000_82573: 4198 /* 4199 * 82573 specification update 4200 * errata #8 disable L0s 4201 * errata #41 disable L1 4202 * 4203 * 82571/82572 specification update 4204 # errata #13 disable L1 4205 * errata #68 disable L0s 4206 */ 4207 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4208 break; 4209 4210 case e1000_82574: 4211 /* 4212 * 82574 specification update errata #20 4213 * 4214 * There is no need to disable L1 4215 */ 4216 disable = PCIEM_LNKCTL_ASPM_L0S; 4217 break; 4218 4219 default: 4220 return; 4221 } 4222 4223 pcie_ptr = pci_get_pciecap_ptr(dev); 4224 if (pcie_ptr == 0) 4225 return; 4226 4227 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4228 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4229 return; 4230 4231 if (bootverbose) 4232 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4233 4234 reg = pcie_ptr + PCIER_LINKCTRL; 4235 link_ctrl = pci_read_config(dev, reg, 2); 4236 link_ctrl &= ~disable; 4237 pci_write_config(dev, reg, link_ctrl, 2); 4238 } 4239 4240 static int 4241 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4242 { 4243 int iphlen, hoff, thoff, ex = 0; 4244 struct mbuf *m; 4245 struct ip *ip; 4246 4247 m = *mp; 4248 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4249 4250 iphlen = m->m_pkthdr.csum_iphlen; 4251 thoff = m->m_pkthdr.csum_thlen; 4252 hoff = m->m_pkthdr.csum_lhlen; 4253 4254 KASSERT(iphlen > 0, ("invalid ip hlen")); 4255 KASSERT(thoff > 0, ("invalid tcp hlen")); 4256 KASSERT(hoff > 0, ("invalid ether hlen")); 4257 4258 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4259 ex = 4; 4260 4261 if (m->m_len < hoff + iphlen + thoff + ex) { 4262 m = m_pullup(m, hoff + iphlen + thoff + ex); 4263 if (m == NULL) { 4264 *mp = NULL; 4265 return ENOBUFS; 4266 } 4267 *mp = m; 4268 } 4269 ip = mtodoff(m, struct ip *, hoff); 4270 ip->ip_len = 0; 4271 4272 return 0; 4273 } 4274 4275 static int 4276 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4277 uint32_t *txd_upper, uint32_t *txd_lower) 4278 { 4279 struct e1000_context_desc *TXD; 4280 int hoff, iphlen, thoff, hlen; 4281 int mss, pktlen, curr_txd; 4282 4283 #ifdef EMX_TSO_DEBUG 4284 tdata->tso_segments++; 4285 #endif 4286 4287 iphlen = mp->m_pkthdr.csum_iphlen; 4288 thoff = mp->m_pkthdr.csum_thlen; 4289 hoff = mp->m_pkthdr.csum_lhlen; 4290 mss = mp->m_pkthdr.tso_segsz; 4291 pktlen = mp->m_pkthdr.len; 4292 4293 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4294 tdata->csum_flags == CSUM_TSO && 4295 tdata->csum_iphlen == iphlen && 4296 tdata->csum_lhlen == hoff && 4297 tdata->csum_thlen == thoff && 4298 tdata->csum_mss == mss && 4299 tdata->csum_pktlen == pktlen) { 4300 *txd_upper = tdata->csum_txd_upper; 4301 *txd_lower = tdata->csum_txd_lower; 4302 #ifdef EMX_TSO_DEBUG 4303 tdata->tso_ctx_reused++; 4304 #endif 4305 return 0; 4306 } 4307 hlen = hoff + iphlen + thoff; 4308 4309 /* 4310 * Setup a new TSO context. 4311 */ 4312 4313 curr_txd = tdata->next_avail_tx_desc; 4314 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4315 4316 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4317 E1000_TXD_DTYP_D | /* Data descr type */ 4318 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4319 4320 /* IP and/or TCP header checksum calculation and insertion. */ 4321 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4322 4323 /* 4324 * Start offset for header checksum calculation. 4325 * End offset for header checksum calculation. 4326 * Offset of place put the checksum. 4327 */ 4328 TXD->lower_setup.ip_fields.ipcss = hoff; 4329 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4330 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4331 4332 /* 4333 * Start offset for payload checksum calculation. 4334 * End offset for payload checksum calculation. 4335 * Offset of place to put the checksum. 4336 */ 4337 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4338 TXD->upper_setup.tcp_fields.tucse = 0; 4339 TXD->upper_setup.tcp_fields.tucso = 4340 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4341 4342 /* 4343 * Payload size per packet w/o any headers. 4344 * Length of all headers up to payload. 4345 */ 4346 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4347 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4348 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4349 E1000_TXD_CMD_DEXT | /* Extended descr */ 4350 E1000_TXD_CMD_TSE | /* TSE context */ 4351 E1000_TXD_CMD_IP | /* Do IP csum */ 4352 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4353 (pktlen - hlen)); /* Total len */ 4354 4355 /* Save the information for this TSO context */ 4356 tdata->csum_flags = CSUM_TSO; 4357 tdata->csum_lhlen = hoff; 4358 tdata->csum_iphlen = iphlen; 4359 tdata->csum_thlen = thoff; 4360 tdata->csum_mss = mss; 4361 tdata->csum_pktlen = pktlen; 4362 tdata->csum_txd_upper = *txd_upper; 4363 tdata->csum_txd_lower = *txd_lower; 4364 4365 if (++curr_txd == tdata->num_tx_desc) 4366 curr_txd = 0; 4367 4368 KKASSERT(tdata->num_tx_desc_avail > 0); 4369 tdata->num_tx_desc_avail--; 4370 4371 tdata->next_avail_tx_desc = curr_txd; 4372 return 1; 4373 } 4374 4375 static int 4376 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4377 { 4378 if (polling) 4379 return sc->tx_ring_cnt; 4380 else 4381 return 1; 4382 } 4383