1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/ig_hal/e1000_dragonfly.h> 112 #include <dev/netif/emx/if_emx.h> 113 114 #define DEBUG_HW 0 115 116 #ifdef EMX_RSS_DEBUG 117 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 118 do { \ 119 if (sc->rss_debug >= lvl) \ 120 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 121 } while (0) 122 #else /* !EMX_RSS_DEBUG */ 123 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 124 #endif /* EMX_RSS_DEBUG */ 125 126 #define EMX_NAME "Intel(R) PRO/1000 " 127 128 #define EMX_DEVICE(id) \ 129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 130 #define EMX_DEVICE_NULL { 0, 0, NULL } 131 132 static const struct emx_device { 133 uint16_t vid; 134 uint16_t did; 135 const char *desc; 136 } emx_devices[] = { 137 EMX_DEVICE(82571EB_COPPER), 138 EMX_DEVICE(82571EB_FIBER), 139 EMX_DEVICE(82571EB_SERDES), 140 EMX_DEVICE(82571EB_SERDES_DUAL), 141 EMX_DEVICE(82571EB_SERDES_QUAD), 142 EMX_DEVICE(82571EB_QUAD_COPPER), 143 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 144 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 145 EMX_DEVICE(82571EB_QUAD_FIBER), 146 EMX_DEVICE(82571PT_QUAD_COPPER), 147 148 EMX_DEVICE(82572EI_COPPER), 149 EMX_DEVICE(82572EI_FIBER), 150 EMX_DEVICE(82572EI_SERDES), 151 EMX_DEVICE(82572EI), 152 153 EMX_DEVICE(82573E), 154 EMX_DEVICE(82573E_IAMT), 155 EMX_DEVICE(82573L), 156 157 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 158 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 159 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 160 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 161 162 EMX_DEVICE(82574L), 163 EMX_DEVICE(82574LA), 164 165 EMX_DEVICE(PCH_LPT_I217_LM), 166 EMX_DEVICE(PCH_LPT_I217_V), 167 EMX_DEVICE(PCH_LPTLP_I218_LM), 168 EMX_DEVICE(PCH_LPTLP_I218_V), 169 EMX_DEVICE(PCH_I218_LM2), 170 EMX_DEVICE(PCH_I218_V2), 171 EMX_DEVICE(PCH_I218_LM3), 172 EMX_DEVICE(PCH_I218_V3), 173 EMX_DEVICE(PCH_SPT_I219_LM), 174 EMX_DEVICE(PCH_SPT_I219_V), 175 EMX_DEVICE(PCH_SPT_I219_LM2), 176 EMX_DEVICE(PCH_SPT_I219_V2), 177 178 /* required last entry */ 179 EMX_DEVICE_NULL 180 }; 181 182 static int emx_probe(device_t); 183 static int emx_attach(device_t); 184 static int emx_detach(device_t); 185 static int emx_shutdown(device_t); 186 static int emx_suspend(device_t); 187 static int emx_resume(device_t); 188 189 static void emx_init(void *); 190 static void emx_stop(struct emx_softc *); 191 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 192 static void emx_start(struct ifnet *, struct ifaltq_subque *); 193 #ifdef IFPOLL_ENABLE 194 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 195 static void emx_npoll_status(struct ifnet *); 196 static void emx_npoll_tx(struct ifnet *, void *, int); 197 static void emx_npoll_rx(struct ifnet *, void *, int); 198 #endif 199 static void emx_watchdog(struct ifaltq_subque *); 200 static void emx_media_status(struct ifnet *, struct ifmediareq *); 201 static int emx_media_change(struct ifnet *); 202 static void emx_timer(void *); 203 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 204 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 205 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 206 #ifdef INVARIANTS 207 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 208 boolean_t); 209 #endif 210 211 static void emx_intr(void *); 212 static void emx_intr_mask(void *); 213 static void emx_intr_body(struct emx_softc *, boolean_t); 214 static void emx_rxeof(struct emx_rxdata *, int); 215 static void emx_txeof(struct emx_txdata *); 216 static void emx_tx_collect(struct emx_txdata *); 217 static void emx_tx_purge(struct emx_softc *); 218 static void emx_enable_intr(struct emx_softc *); 219 static void emx_disable_intr(struct emx_softc *); 220 221 static int emx_dma_alloc(struct emx_softc *); 222 static void emx_dma_free(struct emx_softc *); 223 static void emx_init_tx_ring(struct emx_txdata *); 224 static int emx_init_rx_ring(struct emx_rxdata *); 225 static void emx_free_tx_ring(struct emx_txdata *); 226 static void emx_free_rx_ring(struct emx_rxdata *); 227 static int emx_create_tx_ring(struct emx_txdata *); 228 static int emx_create_rx_ring(struct emx_rxdata *); 229 static void emx_destroy_tx_ring(struct emx_txdata *, int); 230 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 231 static int emx_newbuf(struct emx_rxdata *, int, int); 232 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 233 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 234 uint32_t *, uint32_t *); 235 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 236 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 237 uint32_t *, uint32_t *); 238 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 239 240 static int emx_is_valid_eaddr(const uint8_t *); 241 static int emx_reset(struct emx_softc *); 242 static void emx_setup_ifp(struct emx_softc *); 243 static void emx_init_tx_unit(struct emx_softc *); 244 static void emx_init_rx_unit(struct emx_softc *); 245 static void emx_update_stats(struct emx_softc *); 246 static void emx_set_promisc(struct emx_softc *); 247 static void emx_disable_promisc(struct emx_softc *); 248 static void emx_set_multi(struct emx_softc *); 249 static void emx_update_link_status(struct emx_softc *); 250 static void emx_smartspeed(struct emx_softc *); 251 static void emx_set_itr(struct emx_softc *, uint32_t); 252 static void emx_disable_aspm(struct emx_softc *); 253 254 static void emx_print_debug_info(struct emx_softc *); 255 static void emx_print_nvm_info(struct emx_softc *); 256 static void emx_print_hw_stats(struct emx_softc *); 257 258 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 259 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 261 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 262 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 263 #ifdef IFPOLL_ENABLE 264 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 265 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 266 #endif 267 static void emx_add_sysctl(struct emx_softc *); 268 269 static void emx_serialize_skipmain(struct emx_softc *); 270 static void emx_deserialize_skipmain(struct emx_softc *); 271 272 /* Management and WOL Support */ 273 static void emx_get_mgmt(struct emx_softc *); 274 static void emx_rel_mgmt(struct emx_softc *); 275 static void emx_get_hw_control(struct emx_softc *); 276 static void emx_rel_hw_control(struct emx_softc *); 277 static void emx_enable_wol(device_t); 278 279 static device_method_t emx_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, emx_probe), 282 DEVMETHOD(device_attach, emx_attach), 283 DEVMETHOD(device_detach, emx_detach), 284 DEVMETHOD(device_shutdown, emx_shutdown), 285 DEVMETHOD(device_suspend, emx_suspend), 286 DEVMETHOD(device_resume, emx_resume), 287 DEVMETHOD_END 288 }; 289 290 static driver_t emx_driver = { 291 "emx", 292 emx_methods, 293 sizeof(struct emx_softc), 294 }; 295 296 static devclass_t emx_devclass; 297 298 DECLARE_DUMMY_MODULE(if_emx); 299 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 300 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 301 302 /* 303 * Tunables 304 */ 305 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 306 static int emx_rxd = EMX_DEFAULT_RXD; 307 static int emx_txd = EMX_DEFAULT_TXD; 308 static int emx_smart_pwr_down = 0; 309 static int emx_rxr = 0; 310 static int emx_txr = 1; 311 312 /* Controls whether promiscuous also shows bad packets */ 313 static int emx_debug_sbp = 0; 314 315 static int emx_82573_workaround = 1; 316 static int emx_msi_enable = 1; 317 318 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 319 320 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 321 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 322 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 323 TUNABLE_INT("hw.emx.txd", &emx_txd); 324 TUNABLE_INT("hw.emx.txr", &emx_txr); 325 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 326 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 327 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 328 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 329 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 330 331 /* Global used in WOL setup with multiport cards */ 332 static int emx_global_quad_port_a = 0; 333 334 /* Set this to one to display debug statistics */ 335 static int emx_display_debug_stats = 0; 336 337 #if !defined(KTR_IF_EMX) 338 #define KTR_IF_EMX KTR_ALL 339 #endif 340 KTR_INFO_MASTER(if_emx); 341 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 342 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 343 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 344 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 345 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 346 #define logif(name) KTR_LOG(if_emx_ ## name) 347 348 static __inline void 349 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 350 { 351 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 352 /* DD bit must be cleared */ 353 rxd->rxd_staterr = 0; 354 } 355 356 static __inline void 357 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 358 { 359 /* Ignore Checksum bit is set */ 360 if (staterr & E1000_RXD_STAT_IXSM) 361 return; 362 363 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 364 E1000_RXD_STAT_IPCS) 365 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 366 367 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 368 E1000_RXD_STAT_TCPCS) { 369 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 370 CSUM_PSEUDO_HDR | 371 CSUM_FRAG_NOT_CHECKED; 372 mp->m_pkthdr.csum_data = htons(0xffff); 373 } 374 } 375 376 static __inline struct pktinfo * 377 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 378 uint32_t mrq, uint32_t hash, uint32_t staterr) 379 { 380 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 381 case EMX_RXDMRQ_IPV4_TCP: 382 pi->pi_netisr = NETISR_IP; 383 pi->pi_flags = 0; 384 pi->pi_l3proto = IPPROTO_TCP; 385 break; 386 387 case EMX_RXDMRQ_IPV6_TCP: 388 pi->pi_netisr = NETISR_IPV6; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_TCP; 391 break; 392 393 case EMX_RXDMRQ_IPV4: 394 if (staterr & E1000_RXD_STAT_IXSM) 395 return NULL; 396 397 if ((staterr & 398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 399 E1000_RXD_STAT_TCPCS) { 400 pi->pi_netisr = NETISR_IP; 401 pi->pi_flags = 0; 402 pi->pi_l3proto = IPPROTO_UDP; 403 break; 404 } 405 /* FALL THROUGH */ 406 default: 407 return NULL; 408 } 409 410 m_sethash(m, toeplitz_hash(hash)); 411 return pi; 412 } 413 414 static int 415 emx_probe(device_t dev) 416 { 417 const struct emx_device *d; 418 uint16_t vid, did; 419 420 vid = pci_get_vendor(dev); 421 did = pci_get_device(dev); 422 423 for (d = emx_devices; d->desc != NULL; ++d) { 424 if (vid == d->vid && did == d->did) { 425 device_set_desc(dev, d->desc); 426 device_set_async_attach(dev, TRUE); 427 return 0; 428 } 429 } 430 return ENXIO; 431 } 432 433 static int 434 emx_attach(device_t dev) 435 { 436 struct emx_softc *sc = device_get_softc(dev); 437 int error = 0, i, throttle, msi_enable, tx_ring_max; 438 u_int intr_flags; 439 uint16_t eeprom_data, device_id, apme_mask; 440 driver_intr_t *intr_func; 441 char flowctrl[IFM_ETH_FC_STRLEN]; 442 #ifdef IFPOLL_ENABLE 443 int offset, offset_def; 444 #endif 445 446 /* 447 * Setup RX rings 448 */ 449 for (i = 0; i < EMX_NRX_RING; ++i) { 450 sc->rx_data[i].sc = sc; 451 sc->rx_data[i].idx = i; 452 } 453 454 /* 455 * Setup TX ring 456 */ 457 for (i = 0; i < EMX_NTX_RING; ++i) { 458 sc->tx_data[i].sc = sc; 459 sc->tx_data[i].idx = i; 460 } 461 462 /* 463 * Initialize serializers 464 */ 465 lwkt_serialize_init(&sc->main_serialize); 466 for (i = 0; i < EMX_NTX_RING; ++i) 467 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 468 for (i = 0; i < EMX_NRX_RING; ++i) 469 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 470 471 /* 472 * Initialize serializer array 473 */ 474 i = 0; 475 476 KKASSERT(i < EMX_NSERIALIZE); 477 sc->serializes[i++] = &sc->main_serialize; 478 479 KKASSERT(i < EMX_NSERIALIZE); 480 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 481 KKASSERT(i < EMX_NSERIALIZE); 482 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 483 484 KKASSERT(i < EMX_NSERIALIZE); 485 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 486 KKASSERT(i < EMX_NSERIALIZE); 487 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 488 489 KKASSERT(i == EMX_NSERIALIZE); 490 491 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 492 emx_media_change, emx_media_status); 493 callout_init_mp(&sc->timer); 494 495 sc->dev = sc->osdep.dev = dev; 496 497 /* 498 * Determine hardware and mac type 499 */ 500 sc->hw.vendor_id = pci_get_vendor(dev); 501 sc->hw.device_id = pci_get_device(dev); 502 sc->hw.revision_id = pci_get_revid(dev); 503 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 504 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 505 506 if (e1000_set_mac_type(&sc->hw)) 507 return ENXIO; 508 509 /* Enable bus mastering */ 510 pci_enable_busmaster(dev); 511 512 /* 513 * Allocate IO memory 514 */ 515 sc->memory_rid = EMX_BAR_MEM; 516 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 517 &sc->memory_rid, RF_ACTIVE); 518 if (sc->memory == NULL) { 519 device_printf(dev, "Unable to allocate bus resource: memory\n"); 520 error = ENXIO; 521 goto fail; 522 } 523 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 524 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 525 526 /* XXX This is quite goofy, it is not actually used */ 527 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 528 529 /* 530 * Don't enable MSI-X on 82574, see: 531 * 82574 specification update errata #15 532 * 533 * Don't enable MSI on 82571/82572, see: 534 * 82571/82572 specification update errata #63 535 */ 536 msi_enable = emx_msi_enable; 537 if (msi_enable && 538 (sc->hw.mac.type == e1000_82571 || 539 sc->hw.mac.type == e1000_82572)) 540 msi_enable = 0; 541 542 /* 543 * Allocate interrupt 544 */ 545 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 546 &sc->intr_rid, &intr_flags); 547 548 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 549 int unshared; 550 551 unshared = device_getenv_int(dev, "irq.unshared", 0); 552 if (!unshared) { 553 sc->flags |= EMX_FLAG_SHARED_INTR; 554 if (bootverbose) 555 device_printf(dev, "IRQ shared\n"); 556 } else { 557 intr_flags &= ~RF_SHAREABLE; 558 if (bootverbose) 559 device_printf(dev, "IRQ unshared\n"); 560 } 561 } 562 563 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 564 intr_flags); 565 if (sc->intr_res == NULL) { 566 device_printf(dev, "Unable to allocate bus resource: " 567 "interrupt\n"); 568 error = ENXIO; 569 goto fail; 570 } 571 572 /* Save PCI command register for Shared Code */ 573 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 574 sc->hw.back = &sc->osdep; 575 576 /* 577 * For I217/I218, we need to map the flash memory and this 578 * must happen after the MAC is identified. 579 */ 580 if (sc->hw.mac.type == e1000_pch_lpt) { 581 sc->flash_rid = EMX_BAR_FLASH; 582 583 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 584 &sc->flash_rid, RF_ACTIVE); 585 if (sc->flash == NULL) { 586 device_printf(dev, "Mapping of Flash failed\n"); 587 error = ENXIO; 588 goto fail; 589 } 590 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 591 sc->osdep.flash_bus_space_handle = 592 rman_get_bushandle(sc->flash); 593 594 /* 595 * This is used in the shared code 596 * XXX this goof is actually not used. 597 */ 598 sc->hw.flash_address = (uint8_t *)sc->flash; 599 } 600 601 /* Do Shared Code initialization */ 602 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 603 device_printf(dev, "Setup of Shared code failed\n"); 604 error = ENXIO; 605 goto fail; 606 } 607 e1000_get_bus_info(&sc->hw); 608 609 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 610 sc->hw.phy.autoneg_wait_to_complete = FALSE; 611 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 612 613 /* 614 * Interrupt throttle rate 615 */ 616 throttle = device_getenv_int(dev, "int_throttle_ceil", 617 emx_int_throttle_ceil); 618 if (throttle == 0) { 619 sc->int_throttle_ceil = 0; 620 } else { 621 if (throttle < 0) 622 throttle = EMX_DEFAULT_ITR; 623 624 /* Recalculate the tunable value to get the exact frequency. */ 625 throttle = 1000000000 / 256 / throttle; 626 627 /* Upper 16bits of ITR is reserved and should be zero */ 628 if (throttle & 0xffff0000) 629 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 630 631 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 632 } 633 634 e1000_init_script_state_82541(&sc->hw, TRUE); 635 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 636 637 /* Copper options */ 638 if (sc->hw.phy.media_type == e1000_media_type_copper) { 639 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 640 sc->hw.phy.disable_polarity_correction = FALSE; 641 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 642 } 643 644 /* Set the frame limits assuming standard ethernet sized frames. */ 645 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 646 647 /* This controls when hardware reports transmit completion status. */ 648 sc->hw.mac.report_tx_early = 1; 649 650 /* Calculate # of RX rings */ 651 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 652 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 653 654 /* 655 * Calculate # of TX rings 656 * 657 * XXX 658 * I217/I218 claims to have 2 TX queues 659 * 660 * NOTE: 661 * Don't enable multiple TX queues on 82574; it always gives 662 * watchdog timeout on TX queue0, when multiple TCP streams are 663 * received. It was originally suspected that the hardware TX 664 * checksum offloading caused this watchdog timeout, since only 665 * TCP ACKs are sent during TCP receiving tests. However, even 666 * if the hardware TX checksum offloading is disable, TX queue0 667 * still will give watchdog. 668 */ 669 tx_ring_max = 1; 670 if (sc->hw.mac.type == e1000_82571 || 671 sc->hw.mac.type == e1000_82572 || 672 sc->hw.mac.type == e1000_80003es2lan || 673 sc->hw.mac.type == e1000_pch_lpt || 674 sc->hw.mac.type == e1000_pch_spt || 675 sc->hw.mac.type == e1000_82574) 676 tx_ring_max = EMX_NTX_RING; 677 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 678 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 679 680 /* Allocate RX/TX rings' busdma(9) stuffs */ 681 error = emx_dma_alloc(sc); 682 if (error) 683 goto fail; 684 685 /* Allocate multicast array memory. */ 686 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 687 M_DEVBUF, M_WAITOK); 688 689 /* Indicate SOL/IDER usage */ 690 if (e1000_check_reset_block(&sc->hw)) { 691 device_printf(dev, 692 "PHY reset is blocked due to SOL/IDER session.\n"); 693 } 694 695 /* Disable EEE on I217/I218 */ 696 sc->hw.dev_spec.ich8lan.eee_disable = 1; 697 698 /* 699 * Start from a known state, this is important in reading the 700 * nvm and mac from that. 701 */ 702 e1000_reset_hw(&sc->hw); 703 704 /* Make sure we have a good EEPROM before we read from it */ 705 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 706 /* 707 * Some PCI-E parts fail the first check due to 708 * the link being in sleep state, call it again, 709 * if it fails a second time its a real issue. 710 */ 711 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 712 device_printf(dev, 713 "The EEPROM Checksum Is Not Valid\n"); 714 error = EIO; 715 goto fail; 716 } 717 } 718 719 /* Copy the permanent MAC address out of the EEPROM */ 720 if (e1000_read_mac_addr(&sc->hw) < 0) { 721 device_printf(dev, "EEPROM read error while reading MAC" 722 " address\n"); 723 error = EIO; 724 goto fail; 725 } 726 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 727 device_printf(dev, "Invalid MAC address\n"); 728 error = EIO; 729 goto fail; 730 } 731 732 /* Disable ULP support */ 733 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 734 735 /* Determine if we have to control management hardware */ 736 if (e1000_enable_mng_pass_thru(&sc->hw)) 737 sc->flags |= EMX_FLAG_HAS_MGMT; 738 739 /* 740 * Setup Wake-on-Lan 741 */ 742 apme_mask = EMX_EEPROM_APME; 743 eeprom_data = 0; 744 switch (sc->hw.mac.type) { 745 case e1000_82573: 746 sc->flags |= EMX_FLAG_HAS_AMT; 747 /* FALL THROUGH */ 748 749 case e1000_82571: 750 case e1000_82572: 751 case e1000_80003es2lan: 752 if (sc->hw.bus.func == 1) { 753 e1000_read_nvm(&sc->hw, 754 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 755 } else { 756 e1000_read_nvm(&sc->hw, 757 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 758 } 759 break; 760 761 default: 762 e1000_read_nvm(&sc->hw, 763 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 764 break; 765 } 766 if (eeprom_data & apme_mask) 767 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 768 769 /* 770 * We have the eeprom settings, now apply the special cases 771 * where the eeprom may be wrong or the board won't support 772 * wake on lan on a particular port 773 */ 774 device_id = pci_get_device(dev); 775 switch (device_id) { 776 case E1000_DEV_ID_82571EB_FIBER: 777 /* 778 * Wake events only supported on port A for dual fiber 779 * regardless of eeprom setting 780 */ 781 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 782 E1000_STATUS_FUNC_1) 783 sc->wol = 0; 784 break; 785 786 case E1000_DEV_ID_82571EB_QUAD_COPPER: 787 case E1000_DEV_ID_82571EB_QUAD_FIBER: 788 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 789 /* if quad port sc, disable WoL on all but port A */ 790 if (emx_global_quad_port_a != 0) 791 sc->wol = 0; 792 /* Reset for multiple quad port adapters */ 793 if (++emx_global_quad_port_a == 4) 794 emx_global_quad_port_a = 0; 795 break; 796 } 797 798 /* XXX disable wol */ 799 sc->wol = 0; 800 801 #ifdef IFPOLL_ENABLE 802 /* 803 * NPOLLING RX CPU offset 804 */ 805 if (sc->rx_ring_cnt == ncpus2) { 806 offset = 0; 807 } else { 808 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 809 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 810 if (offset >= ncpus2 || 811 offset % sc->rx_ring_cnt != 0) { 812 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 813 offset, offset_def); 814 offset = offset_def; 815 } 816 } 817 sc->rx_npoll_off = offset; 818 819 /* 820 * NPOLLING TX CPU offset 821 */ 822 if (sc->tx_ring_cnt == ncpus2) { 823 offset = 0; 824 } else { 825 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 826 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 827 if (offset >= ncpus2 || 828 offset % sc->tx_ring_cnt != 0) { 829 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 830 offset, offset_def); 831 offset = offset_def; 832 } 833 } 834 sc->tx_npoll_off = offset; 835 #endif 836 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 837 838 /* Setup flow control. */ 839 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 840 emx_flowctrl); 841 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 842 843 /* Setup OS specific network interface */ 844 emx_setup_ifp(sc); 845 846 /* Add sysctl tree, must after em_setup_ifp() */ 847 emx_add_sysctl(sc); 848 849 /* Reset the hardware */ 850 error = emx_reset(sc); 851 if (error) { 852 /* 853 * Some 82573 parts fail the first reset, call it again, 854 * if it fails a second time its a real issue. 855 */ 856 error = emx_reset(sc); 857 if (error) { 858 device_printf(dev, "Unable to reset the hardware\n"); 859 ether_ifdetach(&sc->arpcom.ac_if); 860 goto fail; 861 } 862 } 863 864 /* Initialize statistics */ 865 emx_update_stats(sc); 866 867 sc->hw.mac.get_link_status = 1; 868 emx_update_link_status(sc); 869 870 /* Non-AMT based hardware can now take control from firmware */ 871 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 872 EMX_FLAG_HAS_MGMT) 873 emx_get_hw_control(sc); 874 875 /* 876 * Missing Interrupt Following ICR read: 877 * 878 * 82571/82572 specification update errata #76 879 * 82573 specification update errata #31 880 * 82574 specification update errata #12 881 */ 882 intr_func = emx_intr; 883 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 884 (sc->hw.mac.type == e1000_82571 || 885 sc->hw.mac.type == e1000_82572 || 886 sc->hw.mac.type == e1000_82573 || 887 sc->hw.mac.type == e1000_82574)) 888 intr_func = emx_intr_mask; 889 890 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 891 &sc->intr_tag, &sc->main_serialize); 892 if (error) { 893 device_printf(dev, "Failed to register interrupt handler"); 894 ether_ifdetach(&sc->arpcom.ac_if); 895 goto fail; 896 } 897 return (0); 898 fail: 899 emx_detach(dev); 900 return (error); 901 } 902 903 static int 904 emx_detach(device_t dev) 905 { 906 struct emx_softc *sc = device_get_softc(dev); 907 908 if (device_is_attached(dev)) { 909 struct ifnet *ifp = &sc->arpcom.ac_if; 910 911 ifnet_serialize_all(ifp); 912 913 emx_stop(sc); 914 915 e1000_phy_hw_reset(&sc->hw); 916 917 emx_rel_mgmt(sc); 918 emx_rel_hw_control(sc); 919 920 if (sc->wol) { 921 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 922 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 923 emx_enable_wol(dev); 924 } 925 926 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 927 928 ifnet_deserialize_all(ifp); 929 930 ether_ifdetach(ifp); 931 } else if (sc->memory != NULL) { 932 emx_rel_hw_control(sc); 933 } 934 935 ifmedia_removeall(&sc->media); 936 bus_generic_detach(dev); 937 938 if (sc->intr_res != NULL) { 939 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 940 sc->intr_res); 941 } 942 943 if (sc->intr_type == PCI_INTR_TYPE_MSI) 944 pci_release_msi(dev); 945 946 if (sc->memory != NULL) { 947 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 948 sc->memory); 949 } 950 951 if (sc->flash != NULL) { 952 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 953 sc->flash); 954 } 955 956 emx_dma_free(sc); 957 958 if (sc->mta != NULL) 959 kfree(sc->mta, M_DEVBUF); 960 961 return (0); 962 } 963 964 static int 965 emx_shutdown(device_t dev) 966 { 967 return emx_suspend(dev); 968 } 969 970 static int 971 emx_suspend(device_t dev) 972 { 973 struct emx_softc *sc = device_get_softc(dev); 974 struct ifnet *ifp = &sc->arpcom.ac_if; 975 976 ifnet_serialize_all(ifp); 977 978 emx_stop(sc); 979 980 emx_rel_mgmt(sc); 981 emx_rel_hw_control(sc); 982 983 if (sc->wol) { 984 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 985 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 986 emx_enable_wol(dev); 987 } 988 989 ifnet_deserialize_all(ifp); 990 991 return bus_generic_suspend(dev); 992 } 993 994 static int 995 emx_resume(device_t dev) 996 { 997 struct emx_softc *sc = device_get_softc(dev); 998 struct ifnet *ifp = &sc->arpcom.ac_if; 999 int i; 1000 1001 ifnet_serialize_all(ifp); 1002 1003 emx_init(sc); 1004 emx_get_mgmt(sc); 1005 for (i = 0; i < sc->tx_ring_inuse; ++i) 1006 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1007 1008 ifnet_deserialize_all(ifp); 1009 1010 return bus_generic_resume(dev); 1011 } 1012 1013 static void 1014 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1015 { 1016 struct emx_softc *sc = ifp->if_softc; 1017 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1018 struct mbuf *m_head; 1019 int idx = -1, nsegs = 0; 1020 1021 KKASSERT(tdata->ifsq == ifsq); 1022 ASSERT_SERIALIZED(&tdata->tx_serialize); 1023 1024 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1025 return; 1026 1027 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1028 ifsq_purge(ifsq); 1029 return; 1030 } 1031 1032 while (!ifsq_is_empty(ifsq)) { 1033 /* Now do we at least have a minimal? */ 1034 if (EMX_IS_OACTIVE(tdata)) { 1035 emx_tx_collect(tdata); 1036 if (EMX_IS_OACTIVE(tdata)) { 1037 ifsq_set_oactive(ifsq); 1038 break; 1039 } 1040 } 1041 1042 logif(pkt_txqueue); 1043 m_head = ifsq_dequeue(ifsq); 1044 if (m_head == NULL) 1045 break; 1046 1047 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1048 IFNET_STAT_INC(ifp, oerrors, 1); 1049 emx_tx_collect(tdata); 1050 continue; 1051 } 1052 1053 /* 1054 * TX interrupt are aggressively aggregated, so increasing 1055 * opackets at TX interrupt time will make the opackets 1056 * statistics vastly inaccurate; we do the opackets increment 1057 * now. 1058 */ 1059 IFNET_STAT_INC(ifp, opackets, 1); 1060 1061 if (nsegs >= tdata->tx_wreg_nsegs) { 1062 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1063 nsegs = 0; 1064 idx = -1; 1065 } 1066 1067 /* Send a copy of the frame to the BPF listener */ 1068 ETHER_BPF_MTAP(ifp, m_head); 1069 1070 /* Set timeout in case hardware has problems transmitting. */ 1071 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1072 } 1073 if (idx >= 0) 1074 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1075 } 1076 1077 static int 1078 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1079 { 1080 struct emx_softc *sc = ifp->if_softc; 1081 struct ifreq *ifr = (struct ifreq *)data; 1082 uint16_t eeprom_data = 0; 1083 int max_frame_size, mask, reinit; 1084 int error = 0; 1085 1086 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1087 1088 switch (command) { 1089 case SIOCSIFMTU: 1090 switch (sc->hw.mac.type) { 1091 case e1000_82573: 1092 /* 1093 * 82573 only supports jumbo frames 1094 * if ASPM is disabled. 1095 */ 1096 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1097 &eeprom_data); 1098 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1099 max_frame_size = ETHER_MAX_LEN; 1100 break; 1101 } 1102 /* FALL THROUGH */ 1103 1104 /* Limit Jumbo Frame size */ 1105 case e1000_82571: 1106 case e1000_82572: 1107 case e1000_82574: 1108 case e1000_pch_lpt: 1109 case e1000_pch_spt: 1110 case e1000_80003es2lan: 1111 max_frame_size = 9234; 1112 break; 1113 1114 default: 1115 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1116 break; 1117 } 1118 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1119 ETHER_CRC_LEN) { 1120 error = EINVAL; 1121 break; 1122 } 1123 1124 ifp->if_mtu = ifr->ifr_mtu; 1125 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1126 ETHER_CRC_LEN; 1127 1128 if (ifp->if_flags & IFF_RUNNING) 1129 emx_init(sc); 1130 break; 1131 1132 case SIOCSIFFLAGS: 1133 if (ifp->if_flags & IFF_UP) { 1134 if ((ifp->if_flags & IFF_RUNNING)) { 1135 if ((ifp->if_flags ^ sc->if_flags) & 1136 (IFF_PROMISC | IFF_ALLMULTI)) { 1137 emx_disable_promisc(sc); 1138 emx_set_promisc(sc); 1139 } 1140 } else { 1141 emx_init(sc); 1142 } 1143 } else if (ifp->if_flags & IFF_RUNNING) { 1144 emx_stop(sc); 1145 } 1146 sc->if_flags = ifp->if_flags; 1147 break; 1148 1149 case SIOCADDMULTI: 1150 case SIOCDELMULTI: 1151 if (ifp->if_flags & IFF_RUNNING) { 1152 emx_disable_intr(sc); 1153 emx_set_multi(sc); 1154 #ifdef IFPOLL_ENABLE 1155 if (!(ifp->if_flags & IFF_NPOLLING)) 1156 #endif 1157 emx_enable_intr(sc); 1158 } 1159 break; 1160 1161 case SIOCSIFMEDIA: 1162 /* Check SOL/IDER usage */ 1163 if (e1000_check_reset_block(&sc->hw)) { 1164 device_printf(sc->dev, "Media change is" 1165 " blocked due to SOL/IDER session.\n"); 1166 break; 1167 } 1168 /* FALL THROUGH */ 1169 1170 case SIOCGIFMEDIA: 1171 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1172 break; 1173 1174 case SIOCSIFCAP: 1175 reinit = 0; 1176 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1177 if (mask & IFCAP_RXCSUM) { 1178 ifp->if_capenable ^= IFCAP_RXCSUM; 1179 reinit = 1; 1180 } 1181 if (mask & IFCAP_VLAN_HWTAGGING) { 1182 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1183 reinit = 1; 1184 } 1185 if (mask & IFCAP_TXCSUM) { 1186 ifp->if_capenable ^= IFCAP_TXCSUM; 1187 if (ifp->if_capenable & IFCAP_TXCSUM) 1188 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1189 else 1190 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1191 } 1192 if (mask & IFCAP_TSO) { 1193 ifp->if_capenable ^= IFCAP_TSO; 1194 if (ifp->if_capenable & IFCAP_TSO) 1195 ifp->if_hwassist |= CSUM_TSO; 1196 else 1197 ifp->if_hwassist &= ~CSUM_TSO; 1198 } 1199 if (mask & IFCAP_RSS) 1200 ifp->if_capenable ^= IFCAP_RSS; 1201 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1202 emx_init(sc); 1203 break; 1204 1205 default: 1206 error = ether_ioctl(ifp, command, data); 1207 break; 1208 } 1209 return (error); 1210 } 1211 1212 static void 1213 emx_watchdog(struct ifaltq_subque *ifsq) 1214 { 1215 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1216 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1217 struct emx_softc *sc = ifp->if_softc; 1218 int i; 1219 1220 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1221 1222 /* 1223 * The timer is set to 5 every time start queues a packet. 1224 * Then txeof keeps resetting it as long as it cleans at 1225 * least one descriptor. 1226 * Finally, anytime all descriptors are clean the timer is 1227 * set to 0. 1228 */ 1229 1230 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1231 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1232 /* 1233 * If we reach here, all TX jobs are completed and 1234 * the TX engine should have been idled for some time. 1235 * We don't need to call ifsq_devstart_sched() here. 1236 */ 1237 ifsq_clr_oactive(ifsq); 1238 tdata->tx_watchdog.wd_timer = 0; 1239 return; 1240 } 1241 1242 /* 1243 * If we are in this routine because of pause frames, then 1244 * don't reset the hardware. 1245 */ 1246 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1247 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1248 return; 1249 } 1250 1251 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1252 1253 IFNET_STAT_INC(ifp, oerrors, 1); 1254 1255 emx_init(sc); 1256 for (i = 0; i < sc->tx_ring_inuse; ++i) 1257 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1258 } 1259 1260 static void 1261 emx_init(void *xsc) 1262 { 1263 struct emx_softc *sc = xsc; 1264 struct ifnet *ifp = &sc->arpcom.ac_if; 1265 device_t dev = sc->dev; 1266 boolean_t polling; 1267 int i; 1268 1269 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1270 1271 emx_stop(sc); 1272 1273 /* Get the latest mac address, User can use a LAA */ 1274 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1275 1276 /* Put the address into the Receive Address Array */ 1277 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1278 1279 /* 1280 * With the 82571 sc, RAR[0] may be overwritten 1281 * when the other port is reset, we make a duplicate 1282 * in RAR[14] for that eventuality, this assures 1283 * the interface continues to function. 1284 */ 1285 if (sc->hw.mac.type == e1000_82571) { 1286 e1000_set_laa_state_82571(&sc->hw, TRUE); 1287 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1288 E1000_RAR_ENTRIES - 1); 1289 } 1290 1291 /* Initialize the hardware */ 1292 if (emx_reset(sc)) { 1293 device_printf(dev, "Unable to reset the hardware\n"); 1294 /* XXX emx_stop()? */ 1295 return; 1296 } 1297 emx_update_link_status(sc); 1298 1299 /* Setup VLAN support, basic and offload if available */ 1300 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1301 1302 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1303 uint32_t ctrl; 1304 1305 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1306 ctrl |= E1000_CTRL_VME; 1307 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1308 } 1309 1310 /* Configure for OS presence */ 1311 emx_get_mgmt(sc); 1312 1313 polling = FALSE; 1314 #ifdef IFPOLL_ENABLE 1315 if (ifp->if_flags & IFF_NPOLLING) 1316 polling = TRUE; 1317 #endif 1318 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1319 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1320 1321 /* Prepare transmit descriptors and buffers */ 1322 for (i = 0; i < sc->tx_ring_inuse; ++i) 1323 emx_init_tx_ring(&sc->tx_data[i]); 1324 emx_init_tx_unit(sc); 1325 1326 /* Setup Multicast table */ 1327 emx_set_multi(sc); 1328 1329 /* Prepare receive descriptors and buffers */ 1330 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1331 if (emx_init_rx_ring(&sc->rx_data[i])) { 1332 device_printf(dev, 1333 "Could not setup receive structures\n"); 1334 emx_stop(sc); 1335 return; 1336 } 1337 } 1338 emx_init_rx_unit(sc); 1339 1340 /* Don't lose promiscuous settings */ 1341 emx_set_promisc(sc); 1342 1343 ifp->if_flags |= IFF_RUNNING; 1344 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1345 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1346 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1347 } 1348 1349 callout_reset(&sc->timer, hz, emx_timer, sc); 1350 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1351 1352 /* MSI/X configuration for 82574 */ 1353 if (sc->hw.mac.type == e1000_82574) { 1354 int tmp; 1355 1356 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1357 tmp |= E1000_CTRL_EXT_PBA_CLR; 1358 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1359 /* 1360 * XXX MSIX 1361 * Set the IVAR - interrupt vector routing. 1362 * Each nibble represents a vector, high bit 1363 * is enable, other 3 bits are the MSIX table 1364 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1365 * Link (other) to 2, hence the magic number. 1366 */ 1367 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1368 } 1369 1370 /* 1371 * Only enable interrupts if we are not polling, make sure 1372 * they are off otherwise. 1373 */ 1374 if (polling) 1375 emx_disable_intr(sc); 1376 else 1377 emx_enable_intr(sc); 1378 1379 /* AMT based hardware can now take control from firmware */ 1380 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1381 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1382 emx_get_hw_control(sc); 1383 } 1384 1385 static void 1386 emx_intr(void *xsc) 1387 { 1388 emx_intr_body(xsc, TRUE); 1389 } 1390 1391 static void 1392 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1393 { 1394 struct ifnet *ifp = &sc->arpcom.ac_if; 1395 uint32_t reg_icr; 1396 1397 logif(intr_beg); 1398 ASSERT_SERIALIZED(&sc->main_serialize); 1399 1400 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1401 1402 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1403 logif(intr_end); 1404 return; 1405 } 1406 1407 /* 1408 * XXX: some laptops trigger several spurious interrupts 1409 * on emx(4) when in the resume cycle. The ICR register 1410 * reports all-ones value in this case. Processing such 1411 * interrupts would lead to a freeze. I don't know why. 1412 */ 1413 if (reg_icr == 0xffffffff) { 1414 logif(intr_end); 1415 return; 1416 } 1417 1418 if (ifp->if_flags & IFF_RUNNING) { 1419 if (reg_icr & 1420 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1421 int i; 1422 1423 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1424 lwkt_serialize_enter( 1425 &sc->rx_data[i].rx_serialize); 1426 emx_rxeof(&sc->rx_data[i], -1); 1427 lwkt_serialize_exit( 1428 &sc->rx_data[i].rx_serialize); 1429 } 1430 } 1431 if (reg_icr & E1000_ICR_TXDW) { 1432 struct emx_txdata *tdata = &sc->tx_data[0]; 1433 1434 lwkt_serialize_enter(&tdata->tx_serialize); 1435 emx_txeof(tdata); 1436 if (!ifsq_is_empty(tdata->ifsq)) 1437 ifsq_devstart(tdata->ifsq); 1438 lwkt_serialize_exit(&tdata->tx_serialize); 1439 } 1440 } 1441 1442 /* Link status change */ 1443 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1444 emx_serialize_skipmain(sc); 1445 1446 callout_stop(&sc->timer); 1447 sc->hw.mac.get_link_status = 1; 1448 emx_update_link_status(sc); 1449 1450 /* Deal with TX cruft when link lost */ 1451 emx_tx_purge(sc); 1452 1453 callout_reset(&sc->timer, hz, emx_timer, sc); 1454 1455 emx_deserialize_skipmain(sc); 1456 } 1457 1458 if (reg_icr & E1000_ICR_RXO) 1459 sc->rx_overruns++; 1460 1461 logif(intr_end); 1462 } 1463 1464 static void 1465 emx_intr_mask(void *xsc) 1466 { 1467 struct emx_softc *sc = xsc; 1468 1469 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1470 /* 1471 * NOTE: 1472 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1473 * so don't check it. 1474 */ 1475 emx_intr_body(sc, FALSE); 1476 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1477 } 1478 1479 static void 1480 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1481 { 1482 struct emx_softc *sc = ifp->if_softc; 1483 1484 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1485 1486 emx_update_link_status(sc); 1487 1488 ifmr->ifm_status = IFM_AVALID; 1489 ifmr->ifm_active = IFM_ETHER; 1490 1491 if (!sc->link_active) { 1492 if (sc->hw.mac.autoneg) 1493 ifmr->ifm_active |= IFM_NONE; 1494 else 1495 ifmr->ifm_active |= sc->media.ifm_media; 1496 return; 1497 } 1498 1499 ifmr->ifm_status |= IFM_ACTIVE; 1500 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1501 ifmr->ifm_active |= sc->ifm_flowctrl; 1502 1503 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1504 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1505 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1506 } else { 1507 switch (sc->link_speed) { 1508 case 10: 1509 ifmr->ifm_active |= IFM_10_T; 1510 break; 1511 case 100: 1512 ifmr->ifm_active |= IFM_100_TX; 1513 break; 1514 1515 case 1000: 1516 ifmr->ifm_active |= IFM_1000_T; 1517 break; 1518 } 1519 if (sc->link_duplex == FULL_DUPLEX) 1520 ifmr->ifm_active |= IFM_FDX; 1521 else 1522 ifmr->ifm_active |= IFM_HDX; 1523 } 1524 if (ifmr->ifm_active & IFM_FDX) 1525 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1526 } 1527 1528 static int 1529 emx_media_change(struct ifnet *ifp) 1530 { 1531 struct emx_softc *sc = ifp->if_softc; 1532 struct ifmedia *ifm = &sc->media; 1533 1534 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1535 1536 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1537 return (EINVAL); 1538 1539 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1540 case IFM_AUTO: 1541 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1542 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1543 break; 1544 1545 case IFM_1000_SX: 1546 case IFM_1000_T: 1547 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1548 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1549 break; 1550 1551 case IFM_100_TX: 1552 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1553 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1554 } else { 1555 if (IFM_OPTIONS(ifm->ifm_media) & 1556 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1557 if (bootverbose) { 1558 if_printf(ifp, "Flow control is not " 1559 "allowed for half-duplex\n"); 1560 } 1561 return EINVAL; 1562 } 1563 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1564 } 1565 sc->hw.mac.autoneg = FALSE; 1566 sc->hw.phy.autoneg_advertised = 0; 1567 break; 1568 1569 case IFM_10_T: 1570 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1571 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1572 } else { 1573 if (IFM_OPTIONS(ifm->ifm_media) & 1574 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1575 if (bootverbose) { 1576 if_printf(ifp, "Flow control is not " 1577 "allowed for half-duplex\n"); 1578 } 1579 return EINVAL; 1580 } 1581 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1582 } 1583 sc->hw.mac.autoneg = FALSE; 1584 sc->hw.phy.autoneg_advertised = 0; 1585 break; 1586 1587 default: 1588 if (bootverbose) { 1589 if_printf(ifp, "Unsupported media type %d\n", 1590 IFM_SUBTYPE(ifm->ifm_media)); 1591 } 1592 return EINVAL; 1593 } 1594 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1595 1596 if (ifp->if_flags & IFF_RUNNING) 1597 emx_init(sc); 1598 1599 return (0); 1600 } 1601 1602 static int 1603 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1604 int *segs_used, int *idx) 1605 { 1606 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1607 bus_dmamap_t map; 1608 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1609 struct e1000_tx_desc *ctxd = NULL; 1610 struct mbuf *m_head = *m_headp; 1611 uint32_t txd_upper, txd_lower, cmd = 0; 1612 int maxsegs, nsegs, i, j, first, last = 0, error; 1613 1614 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1615 error = emx_tso_pullup(tdata, m_headp); 1616 if (error) 1617 return error; 1618 m_head = *m_headp; 1619 } 1620 1621 txd_upper = txd_lower = 0; 1622 1623 /* 1624 * Capture the first descriptor index, this descriptor 1625 * will have the index of the EOP which is the only one 1626 * that now gets a DONE bit writeback. 1627 */ 1628 first = tdata->next_avail_tx_desc; 1629 tx_buffer = &tdata->tx_buf[first]; 1630 tx_buffer_mapped = tx_buffer; 1631 map = tx_buffer->map; 1632 1633 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1634 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1635 if (maxsegs > EMX_MAX_SCATTER) 1636 maxsegs = EMX_MAX_SCATTER; 1637 1638 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1639 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1640 if (error) { 1641 m_freem(*m_headp); 1642 *m_headp = NULL; 1643 return error; 1644 } 1645 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1646 1647 m_head = *m_headp; 1648 tdata->tx_nsegs += nsegs; 1649 *segs_used += nsegs; 1650 1651 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1652 /* TSO will consume one TX desc */ 1653 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1654 tdata->tx_nsegs += i; 1655 *segs_used += i; 1656 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1657 /* TX csum offloading will consume one TX desc */ 1658 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1659 tdata->tx_nsegs += i; 1660 *segs_used += i; 1661 } 1662 1663 /* Handle VLAN tag */ 1664 if (m_head->m_flags & M_VLANTAG) { 1665 /* Set the vlan id. */ 1666 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1667 /* Tell hardware to add tag */ 1668 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1669 } 1670 1671 i = tdata->next_avail_tx_desc; 1672 1673 /* Set up our transmit descriptors */ 1674 for (j = 0; j < nsegs; j++) { 1675 tx_buffer = &tdata->tx_buf[i]; 1676 ctxd = &tdata->tx_desc_base[i]; 1677 1678 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1679 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1680 txd_lower | segs[j].ds_len); 1681 ctxd->upper.data = htole32(txd_upper); 1682 1683 last = i; 1684 if (++i == tdata->num_tx_desc) 1685 i = 0; 1686 } 1687 1688 tdata->next_avail_tx_desc = i; 1689 1690 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1691 tdata->num_tx_desc_avail -= nsegs; 1692 1693 tx_buffer->m_head = m_head; 1694 tx_buffer_mapped->map = tx_buffer->map; 1695 tx_buffer->map = map; 1696 1697 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1698 tdata->tx_nsegs = 0; 1699 1700 /* 1701 * Report Status (RS) is turned on 1702 * every tx_intr_nsegs descriptors. 1703 */ 1704 cmd = E1000_TXD_CMD_RS; 1705 1706 /* 1707 * Keep track of the descriptor, which will 1708 * be written back by hardware. 1709 */ 1710 tdata->tx_dd[tdata->tx_dd_tail] = last; 1711 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1712 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1713 } 1714 1715 /* 1716 * Last Descriptor of Packet needs End Of Packet (EOP) 1717 */ 1718 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1719 1720 /* 1721 * Defer TDT updating, until enough descriptors are setup 1722 */ 1723 *idx = i; 1724 1725 #ifdef EMX_TSS_DEBUG 1726 tdata->tx_pkts++; 1727 #endif 1728 1729 return (0); 1730 } 1731 1732 static void 1733 emx_set_promisc(struct emx_softc *sc) 1734 { 1735 struct ifnet *ifp = &sc->arpcom.ac_if; 1736 uint32_t reg_rctl; 1737 1738 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1739 1740 if (ifp->if_flags & IFF_PROMISC) { 1741 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1742 /* Turn this on if you want to see bad packets */ 1743 if (emx_debug_sbp) 1744 reg_rctl |= E1000_RCTL_SBP; 1745 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1746 } else if (ifp->if_flags & IFF_ALLMULTI) { 1747 reg_rctl |= E1000_RCTL_MPE; 1748 reg_rctl &= ~E1000_RCTL_UPE; 1749 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1750 } 1751 } 1752 1753 static void 1754 emx_disable_promisc(struct emx_softc *sc) 1755 { 1756 uint32_t reg_rctl; 1757 1758 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1759 1760 reg_rctl &= ~E1000_RCTL_UPE; 1761 reg_rctl &= ~E1000_RCTL_MPE; 1762 reg_rctl &= ~E1000_RCTL_SBP; 1763 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1764 } 1765 1766 static void 1767 emx_set_multi(struct emx_softc *sc) 1768 { 1769 struct ifnet *ifp = &sc->arpcom.ac_if; 1770 struct ifmultiaddr *ifma; 1771 uint32_t reg_rctl = 0; 1772 uint8_t *mta; 1773 int mcnt = 0; 1774 1775 mta = sc->mta; 1776 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1777 1778 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1779 if (ifma->ifma_addr->sa_family != AF_LINK) 1780 continue; 1781 1782 if (mcnt == EMX_MCAST_ADDR_MAX) 1783 break; 1784 1785 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1786 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1787 mcnt++; 1788 } 1789 1790 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1791 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1792 reg_rctl |= E1000_RCTL_MPE; 1793 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1794 } else { 1795 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1796 } 1797 } 1798 1799 /* 1800 * This routine checks for link status and updates statistics. 1801 */ 1802 static void 1803 emx_timer(void *xsc) 1804 { 1805 struct emx_softc *sc = xsc; 1806 struct ifnet *ifp = &sc->arpcom.ac_if; 1807 1808 lwkt_serialize_enter(&sc->main_serialize); 1809 1810 emx_update_link_status(sc); 1811 emx_update_stats(sc); 1812 1813 /* Reset LAA into RAR[0] on 82571 */ 1814 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1815 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1816 1817 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1818 emx_print_hw_stats(sc); 1819 1820 emx_smartspeed(sc); 1821 1822 callout_reset(&sc->timer, hz, emx_timer, sc); 1823 1824 lwkt_serialize_exit(&sc->main_serialize); 1825 } 1826 1827 static void 1828 emx_update_link_status(struct emx_softc *sc) 1829 { 1830 struct e1000_hw *hw = &sc->hw; 1831 struct ifnet *ifp = &sc->arpcom.ac_if; 1832 device_t dev = sc->dev; 1833 uint32_t link_check = 0; 1834 1835 /* Get the cached link value or read phy for real */ 1836 switch (hw->phy.media_type) { 1837 case e1000_media_type_copper: 1838 if (hw->mac.get_link_status) { 1839 /* Do the work to read phy */ 1840 e1000_check_for_link(hw); 1841 link_check = !hw->mac.get_link_status; 1842 if (link_check) /* ESB2 fix */ 1843 e1000_cfg_on_link_up(hw); 1844 } else { 1845 link_check = TRUE; 1846 } 1847 break; 1848 1849 case e1000_media_type_fiber: 1850 e1000_check_for_link(hw); 1851 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1852 break; 1853 1854 case e1000_media_type_internal_serdes: 1855 e1000_check_for_link(hw); 1856 link_check = sc->hw.mac.serdes_has_link; 1857 break; 1858 1859 case e1000_media_type_unknown: 1860 default: 1861 break; 1862 } 1863 1864 /* Now check for a transition */ 1865 if (link_check && sc->link_active == 0) { 1866 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1867 &sc->link_duplex); 1868 1869 /* 1870 * Check if we should enable/disable SPEED_MODE bit on 1871 * 82571EB/82572EI 1872 */ 1873 if (sc->link_speed != SPEED_1000 && 1874 (hw->mac.type == e1000_82571 || 1875 hw->mac.type == e1000_82572)) { 1876 int tarc0; 1877 1878 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1879 tarc0 &= ~EMX_TARC_SPEED_MODE; 1880 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1881 } 1882 if (bootverbose) { 1883 char flowctrl[IFM_ETH_FC_STRLEN]; 1884 1885 e1000_fc2str(hw->fc.current_mode, flowctrl, 1886 sizeof(flowctrl)); 1887 device_printf(dev, "Link is up %d Mbps %s, " 1888 "Flow control: %s\n", 1889 sc->link_speed, 1890 (sc->link_duplex == FULL_DUPLEX) ? 1891 "Full Duplex" : "Half Duplex", 1892 flowctrl); 1893 } 1894 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1895 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1896 sc->link_active = 1; 1897 sc->smartspeed = 0; 1898 ifp->if_baudrate = sc->link_speed * 1000000; 1899 ifp->if_link_state = LINK_STATE_UP; 1900 if_link_state_change(ifp); 1901 } else if (!link_check && sc->link_active == 1) { 1902 ifp->if_baudrate = sc->link_speed = 0; 1903 sc->link_duplex = 0; 1904 if (bootverbose) 1905 device_printf(dev, "Link is Down\n"); 1906 sc->link_active = 0; 1907 ifp->if_link_state = LINK_STATE_DOWN; 1908 if_link_state_change(ifp); 1909 } 1910 } 1911 1912 static void 1913 emx_stop(struct emx_softc *sc) 1914 { 1915 struct ifnet *ifp = &sc->arpcom.ac_if; 1916 int i; 1917 1918 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1919 1920 emx_disable_intr(sc); 1921 1922 callout_stop(&sc->timer); 1923 1924 ifp->if_flags &= ~IFF_RUNNING; 1925 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1926 struct emx_txdata *tdata = &sc->tx_data[i]; 1927 1928 ifsq_clr_oactive(tdata->ifsq); 1929 ifsq_watchdog_stop(&tdata->tx_watchdog); 1930 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1931 } 1932 1933 /* 1934 * Disable multiple receive queues. 1935 * 1936 * NOTE: 1937 * We should disable multiple receive queues before 1938 * resetting the hardware. 1939 */ 1940 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1941 1942 e1000_reset_hw(&sc->hw); 1943 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1944 1945 for (i = 0; i < sc->tx_ring_cnt; ++i) 1946 emx_free_tx_ring(&sc->tx_data[i]); 1947 for (i = 0; i < sc->rx_ring_cnt; ++i) 1948 emx_free_rx_ring(&sc->rx_data[i]); 1949 } 1950 1951 static int 1952 emx_reset(struct emx_softc *sc) 1953 { 1954 device_t dev = sc->dev; 1955 uint16_t rx_buffer_size; 1956 uint32_t pba; 1957 1958 /* Set up smart power down as default off on newer adapters. */ 1959 if (!emx_smart_pwr_down && 1960 (sc->hw.mac.type == e1000_82571 || 1961 sc->hw.mac.type == e1000_82572)) { 1962 uint16_t phy_tmp = 0; 1963 1964 /* Speed up time to link by disabling smart power down. */ 1965 e1000_read_phy_reg(&sc->hw, 1966 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1967 phy_tmp &= ~IGP02E1000_PM_SPD; 1968 e1000_write_phy_reg(&sc->hw, 1969 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1970 } 1971 1972 /* 1973 * Packet Buffer Allocation (PBA) 1974 * Writing PBA sets the receive portion of the buffer 1975 * the remainder is used for the transmit buffer. 1976 */ 1977 switch (sc->hw.mac.type) { 1978 /* Total Packet Buffer on these is 48K */ 1979 case e1000_82571: 1980 case e1000_82572: 1981 case e1000_80003es2lan: 1982 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1983 break; 1984 1985 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1986 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1987 break; 1988 1989 case e1000_82574: 1990 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1991 break; 1992 1993 case e1000_pch_lpt: 1994 case e1000_pch_spt: 1995 pba = E1000_PBA_26K; 1996 break; 1997 1998 default: 1999 /* Devices before 82547 had a Packet Buffer of 64K. */ 2000 if (sc->hw.mac.max_frame_size > 8192) 2001 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2002 else 2003 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2004 } 2005 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2006 2007 /* 2008 * These parameters control the automatic generation (Tx) and 2009 * response (Rx) to Ethernet PAUSE frames. 2010 * - High water mark should allow for at least two frames to be 2011 * received after sending an XOFF. 2012 * - Low water mark works best when it is very near the high water mark. 2013 * This allows the receiver to restart by sending XON when it has 2014 * drained a bit. Here we use an arbitary value of 1500 which will 2015 * restart after one full frame is pulled from the buffer. There 2016 * could be several smaller frames in the buffer and if so they will 2017 * not trigger the XON until their total number reduces the buffer 2018 * by 1500. 2019 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2020 */ 2021 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2022 2023 sc->hw.fc.high_water = rx_buffer_size - 2024 roundup2(sc->hw.mac.max_frame_size, 1024); 2025 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2026 2027 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2028 sc->hw.fc.send_xon = TRUE; 2029 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2030 2031 /* 2032 * Device specific overrides/settings 2033 */ 2034 if (sc->hw.mac.type == e1000_pch_lpt || 2035 sc->hw.mac.type == e1000_pch_spt) { 2036 sc->hw.fc.high_water = 0x5C20; 2037 sc->hw.fc.low_water = 0x5048; 2038 sc->hw.fc.pause_time = 0x0650; 2039 sc->hw.fc.refresh_time = 0x0400; 2040 /* Jumbos need adjusted PBA */ 2041 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2042 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2043 else 2044 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2045 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2046 sc->hw.fc.pause_time = 0xFFFF; 2047 } 2048 2049 /* Issue a global reset */ 2050 e1000_reset_hw(&sc->hw); 2051 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2052 emx_disable_aspm(sc); 2053 2054 if (e1000_init_hw(&sc->hw) < 0) { 2055 device_printf(dev, "Hardware Initialization Failed\n"); 2056 return (EIO); 2057 } 2058 2059 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2060 e1000_get_phy_info(&sc->hw); 2061 e1000_check_for_link(&sc->hw); 2062 2063 return (0); 2064 } 2065 2066 static void 2067 emx_setup_ifp(struct emx_softc *sc) 2068 { 2069 struct ifnet *ifp = &sc->arpcom.ac_if; 2070 int i; 2071 2072 if_initname(ifp, device_get_name(sc->dev), 2073 device_get_unit(sc->dev)); 2074 ifp->if_softc = sc; 2075 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2076 ifp->if_init = emx_init; 2077 ifp->if_ioctl = emx_ioctl; 2078 ifp->if_start = emx_start; 2079 #ifdef IFPOLL_ENABLE 2080 ifp->if_npoll = emx_npoll; 2081 #endif 2082 ifp->if_serialize = emx_serialize; 2083 ifp->if_deserialize = emx_deserialize; 2084 ifp->if_tryserialize = emx_tryserialize; 2085 #ifdef INVARIANTS 2086 ifp->if_serialize_assert = emx_serialize_assert; 2087 #endif 2088 2089 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2090 2091 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2092 ifq_set_ready(&ifp->if_snd); 2093 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2094 2095 ifp->if_mapsubq = ifq_mapsubq_mask; 2096 ifq_set_subq_mask(&ifp->if_snd, 0); 2097 2098 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2099 2100 ifp->if_capabilities = IFCAP_HWCSUM | 2101 IFCAP_VLAN_HWTAGGING | 2102 IFCAP_VLAN_MTU | 2103 IFCAP_TSO; 2104 if (sc->rx_ring_cnt > 1) 2105 ifp->if_capabilities |= IFCAP_RSS; 2106 ifp->if_capenable = ifp->if_capabilities; 2107 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2108 2109 /* 2110 * Tell the upper layer(s) we support long frames. 2111 */ 2112 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2113 2114 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2115 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2116 struct emx_txdata *tdata = &sc->tx_data[i]; 2117 2118 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2119 ifsq_set_priv(ifsq, tdata); 2120 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2121 tdata->ifsq = ifsq; 2122 2123 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2124 } 2125 2126 /* 2127 * Specify the media types supported by this sc and register 2128 * callbacks to update media and link information 2129 */ 2130 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2131 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2132 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2133 0, NULL); 2134 } else { 2135 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2136 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2137 0, NULL); 2138 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2139 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2140 0, NULL); 2141 if (sc->hw.phy.type != e1000_phy_ife) { 2142 ifmedia_add(&sc->media, 2143 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2144 } 2145 } 2146 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2147 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2148 } 2149 2150 /* 2151 * Workaround for SmartSpeed on 82541 and 82547 controllers 2152 */ 2153 static void 2154 emx_smartspeed(struct emx_softc *sc) 2155 { 2156 uint16_t phy_tmp; 2157 2158 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2159 sc->hw.mac.autoneg == 0 || 2160 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2161 return; 2162 2163 if (sc->smartspeed == 0) { 2164 /* 2165 * If Master/Slave config fault is asserted twice, 2166 * we assume back-to-back 2167 */ 2168 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2169 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2170 return; 2171 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2172 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2173 e1000_read_phy_reg(&sc->hw, 2174 PHY_1000T_CTRL, &phy_tmp); 2175 if (phy_tmp & CR_1000T_MS_ENABLE) { 2176 phy_tmp &= ~CR_1000T_MS_ENABLE; 2177 e1000_write_phy_reg(&sc->hw, 2178 PHY_1000T_CTRL, phy_tmp); 2179 sc->smartspeed++; 2180 if (sc->hw.mac.autoneg && 2181 !e1000_phy_setup_autoneg(&sc->hw) && 2182 !e1000_read_phy_reg(&sc->hw, 2183 PHY_CONTROL, &phy_tmp)) { 2184 phy_tmp |= MII_CR_AUTO_NEG_EN | 2185 MII_CR_RESTART_AUTO_NEG; 2186 e1000_write_phy_reg(&sc->hw, 2187 PHY_CONTROL, phy_tmp); 2188 } 2189 } 2190 } 2191 return; 2192 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2193 /* If still no link, perhaps using 2/3 pair cable */ 2194 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2195 phy_tmp |= CR_1000T_MS_ENABLE; 2196 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2197 if (sc->hw.mac.autoneg && 2198 !e1000_phy_setup_autoneg(&sc->hw) && 2199 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2200 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2201 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2202 } 2203 } 2204 2205 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2206 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2207 sc->smartspeed = 0; 2208 } 2209 2210 static int 2211 emx_create_tx_ring(struct emx_txdata *tdata) 2212 { 2213 device_t dev = tdata->sc->dev; 2214 struct emx_txbuf *tx_buffer; 2215 int error, i, tsize, ntxd; 2216 2217 /* 2218 * Validate number of transmit descriptors. It must not exceed 2219 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2220 */ 2221 ntxd = device_getenv_int(dev, "txd", emx_txd); 2222 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2223 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2224 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2225 EMX_DEFAULT_TXD, ntxd); 2226 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2227 } else { 2228 tdata->num_tx_desc = ntxd; 2229 } 2230 2231 /* 2232 * Allocate Transmit Descriptor ring 2233 */ 2234 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2235 EMX_DBA_ALIGN); 2236 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2237 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2238 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2239 &tdata->tx_desc_paddr); 2240 if (tdata->tx_desc_base == NULL) { 2241 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2242 return ENOMEM; 2243 } 2244 2245 tsize = __VM_CACHELINE_ALIGN( 2246 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2247 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2248 2249 /* 2250 * Create DMA tags for tx buffers 2251 */ 2252 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2253 1, 0, /* alignment, bounds */ 2254 BUS_SPACE_MAXADDR, /* lowaddr */ 2255 BUS_SPACE_MAXADDR, /* highaddr */ 2256 NULL, NULL, /* filter, filterarg */ 2257 EMX_TSO_SIZE, /* maxsize */ 2258 EMX_MAX_SCATTER, /* nsegments */ 2259 EMX_MAX_SEGSIZE, /* maxsegsize */ 2260 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2261 BUS_DMA_ONEBPAGE, /* flags */ 2262 &tdata->txtag); 2263 if (error) { 2264 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2265 kfree(tdata->tx_buf, M_DEVBUF); 2266 tdata->tx_buf = NULL; 2267 return error; 2268 } 2269 2270 /* 2271 * Create DMA maps for tx buffers 2272 */ 2273 for (i = 0; i < tdata->num_tx_desc; i++) { 2274 tx_buffer = &tdata->tx_buf[i]; 2275 2276 error = bus_dmamap_create(tdata->txtag, 2277 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2278 &tx_buffer->map); 2279 if (error) { 2280 device_printf(dev, "Unable to create TX DMA map\n"); 2281 emx_destroy_tx_ring(tdata, i); 2282 return error; 2283 } 2284 } 2285 2286 /* 2287 * Setup TX parameters 2288 */ 2289 tdata->spare_tx_desc = EMX_TX_SPARE; 2290 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2291 2292 /* 2293 * Keep following relationship between spare_tx_desc, oact_tx_desc 2294 * and tx_intr_nsegs: 2295 * (spare_tx_desc + EMX_TX_RESERVED) <= 2296 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2297 */ 2298 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2299 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2300 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2301 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2302 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2303 2304 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2305 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2306 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2307 2308 /* 2309 * Pullup extra 4bytes into the first data segment for TSO, see: 2310 * 82571/82572 specification update errata #7 2311 * 2312 * Same applies to I217 (and maybe I218 and I219). 2313 * 2314 * NOTE: 2315 * 4bytes instead of 2bytes, which are mentioned in the errata, 2316 * are pulled; mainly to keep rest of the data properly aligned. 2317 */ 2318 if (tdata->sc->hw.mac.type == e1000_82571 || 2319 tdata->sc->hw.mac.type == e1000_82572 || 2320 tdata->sc->hw.mac.type == e1000_pch_lpt || 2321 tdata->sc->hw.mac.type == e1000_pch_spt) 2322 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2323 2324 return (0); 2325 } 2326 2327 static void 2328 emx_init_tx_ring(struct emx_txdata *tdata) 2329 { 2330 /* Clear the old ring contents */ 2331 bzero(tdata->tx_desc_base, 2332 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2333 2334 /* Reset state */ 2335 tdata->next_avail_tx_desc = 0; 2336 tdata->next_tx_to_clean = 0; 2337 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2338 2339 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2340 if (tdata->sc->tx_ring_inuse > 1) { 2341 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2342 if (bootverbose) { 2343 if_printf(&tdata->sc->arpcom.ac_if, 2344 "TX %d force ctx setup\n", tdata->idx); 2345 } 2346 } 2347 } 2348 2349 static void 2350 emx_init_tx_unit(struct emx_softc *sc) 2351 { 2352 uint32_t tctl, tarc, tipg = 0, txdctl; 2353 int i; 2354 2355 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2356 struct emx_txdata *tdata = &sc->tx_data[i]; 2357 uint64_t bus_addr; 2358 2359 /* Setup the Base and Length of the Tx Descriptor Ring */ 2360 bus_addr = tdata->tx_desc_paddr; 2361 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2362 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2363 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2364 (uint32_t)(bus_addr >> 32)); 2365 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2366 (uint32_t)bus_addr); 2367 /* Setup the HW Tx Head and Tail descriptor pointers */ 2368 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2369 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2370 } 2371 2372 /* Set the default values for the Tx Inter Packet Gap timer */ 2373 switch (sc->hw.mac.type) { 2374 case e1000_80003es2lan: 2375 tipg = DEFAULT_82543_TIPG_IPGR1; 2376 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2377 E1000_TIPG_IPGR2_SHIFT; 2378 break; 2379 2380 default: 2381 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2382 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2383 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2384 else 2385 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2386 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2387 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2388 break; 2389 } 2390 2391 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2392 2393 /* NOTE: 0 is not allowed for TIDV */ 2394 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2395 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2396 2397 /* 2398 * Errata workaround (obtained from Linux). This is necessary 2399 * to make multiple TX queues work on 82574. 2400 * XXX can't find it in any published errata though. 2401 */ 2402 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2403 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2404 2405 if (sc->hw.mac.type == e1000_82571 || 2406 sc->hw.mac.type == e1000_82572) { 2407 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2408 tarc |= EMX_TARC_SPEED_MODE; 2409 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2410 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2411 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2412 tarc |= 1; 2413 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2414 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2415 tarc |= 1; 2416 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2417 } 2418 2419 /* Program the Transmit Control Register */ 2420 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2421 tctl &= ~E1000_TCTL_CT; 2422 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2423 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2424 tctl |= E1000_TCTL_MULR; 2425 2426 /* This write will effectively turn on the transmit unit. */ 2427 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2428 2429 if (sc->hw.mac.type == e1000_82571 || 2430 sc->hw.mac.type == e1000_82572 || 2431 sc->hw.mac.type == e1000_80003es2lan) { 2432 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2433 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2434 tarc &= ~(1 << 28); 2435 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2436 } 2437 2438 if (sc->tx_ring_inuse > 1) { 2439 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2440 tarc &= ~EMX_TARC_COUNT_MASK; 2441 tarc |= 1; 2442 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2443 2444 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2445 tarc &= ~EMX_TARC_COUNT_MASK; 2446 tarc |= 1; 2447 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2448 } 2449 } 2450 2451 static void 2452 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2453 { 2454 struct emx_txbuf *tx_buffer; 2455 int i; 2456 2457 /* Free Transmit Descriptor ring */ 2458 if (tdata->tx_desc_base) { 2459 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2460 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2461 tdata->tx_desc_dmap); 2462 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2463 2464 tdata->tx_desc_base = NULL; 2465 } 2466 2467 if (tdata->tx_buf == NULL) 2468 return; 2469 2470 for (i = 0; i < ndesc; i++) { 2471 tx_buffer = &tdata->tx_buf[i]; 2472 2473 KKASSERT(tx_buffer->m_head == NULL); 2474 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2475 } 2476 bus_dma_tag_destroy(tdata->txtag); 2477 2478 kfree(tdata->tx_buf, M_DEVBUF); 2479 tdata->tx_buf = NULL; 2480 } 2481 2482 /* 2483 * The offload context needs to be set when we transfer the first 2484 * packet of a particular protocol (TCP/UDP). This routine has been 2485 * enhanced to deal with inserted VLAN headers. 2486 * 2487 * If the new packet's ether header length, ip header length and 2488 * csum offloading type are same as the previous packet, we should 2489 * avoid allocating a new csum context descriptor; mainly to take 2490 * advantage of the pipeline effect of the TX data read request. 2491 * 2492 * This function returns number of TX descrptors allocated for 2493 * csum context. 2494 */ 2495 static int 2496 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2497 uint32_t *txd_upper, uint32_t *txd_lower) 2498 { 2499 struct e1000_context_desc *TXD; 2500 int curr_txd, ehdrlen, csum_flags; 2501 uint32_t cmd, hdr_len, ip_hlen; 2502 2503 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2504 ip_hlen = mp->m_pkthdr.csum_iphlen; 2505 ehdrlen = mp->m_pkthdr.csum_lhlen; 2506 2507 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2508 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2509 tdata->csum_flags == csum_flags) { 2510 /* 2511 * Same csum offload context as the previous packets; 2512 * just return. 2513 */ 2514 *txd_upper = tdata->csum_txd_upper; 2515 *txd_lower = tdata->csum_txd_lower; 2516 return 0; 2517 } 2518 2519 /* 2520 * Setup a new csum offload context. 2521 */ 2522 2523 curr_txd = tdata->next_avail_tx_desc; 2524 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2525 2526 cmd = 0; 2527 2528 /* Setup of IP header checksum. */ 2529 if (csum_flags & CSUM_IP) { 2530 /* 2531 * Start offset for header checksum calculation. 2532 * End offset for header checksum calculation. 2533 * Offset of place to put the checksum. 2534 */ 2535 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2536 TXD->lower_setup.ip_fields.ipcse = 2537 htole16(ehdrlen + ip_hlen - 1); 2538 TXD->lower_setup.ip_fields.ipcso = 2539 ehdrlen + offsetof(struct ip, ip_sum); 2540 cmd |= E1000_TXD_CMD_IP; 2541 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2542 } 2543 hdr_len = ehdrlen + ip_hlen; 2544 2545 if (csum_flags & CSUM_TCP) { 2546 /* 2547 * Start offset for payload checksum calculation. 2548 * End offset for payload checksum calculation. 2549 * Offset of place to put the checksum. 2550 */ 2551 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2552 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2553 TXD->upper_setup.tcp_fields.tucso = 2554 hdr_len + offsetof(struct tcphdr, th_sum); 2555 cmd |= E1000_TXD_CMD_TCP; 2556 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2557 } else if (csum_flags & CSUM_UDP) { 2558 /* 2559 * Start offset for header checksum calculation. 2560 * End offset for header checksum calculation. 2561 * Offset of place to put the checksum. 2562 */ 2563 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2564 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2565 TXD->upper_setup.tcp_fields.tucso = 2566 hdr_len + offsetof(struct udphdr, uh_sum); 2567 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2568 } 2569 2570 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2571 E1000_TXD_DTYP_D; /* Data descr */ 2572 2573 /* Save the information for this csum offloading context */ 2574 tdata->csum_lhlen = ehdrlen; 2575 tdata->csum_iphlen = ip_hlen; 2576 tdata->csum_flags = csum_flags; 2577 tdata->csum_txd_upper = *txd_upper; 2578 tdata->csum_txd_lower = *txd_lower; 2579 2580 TXD->tcp_seg_setup.data = htole32(0); 2581 TXD->cmd_and_length = 2582 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2583 2584 if (++curr_txd == tdata->num_tx_desc) 2585 curr_txd = 0; 2586 2587 KKASSERT(tdata->num_tx_desc_avail > 0); 2588 tdata->num_tx_desc_avail--; 2589 2590 tdata->next_avail_tx_desc = curr_txd; 2591 return 1; 2592 } 2593 2594 static void 2595 emx_txeof(struct emx_txdata *tdata) 2596 { 2597 struct emx_txbuf *tx_buffer; 2598 int first, num_avail; 2599 2600 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2601 return; 2602 2603 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2604 return; 2605 2606 num_avail = tdata->num_tx_desc_avail; 2607 first = tdata->next_tx_to_clean; 2608 2609 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2610 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2611 struct e1000_tx_desc *tx_desc; 2612 2613 tx_desc = &tdata->tx_desc_base[dd_idx]; 2614 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2615 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2616 2617 if (++dd_idx == tdata->num_tx_desc) 2618 dd_idx = 0; 2619 2620 while (first != dd_idx) { 2621 logif(pkt_txclean); 2622 2623 num_avail++; 2624 2625 tx_buffer = &tdata->tx_buf[first]; 2626 if (tx_buffer->m_head) { 2627 bus_dmamap_unload(tdata->txtag, 2628 tx_buffer->map); 2629 m_freem(tx_buffer->m_head); 2630 tx_buffer->m_head = NULL; 2631 } 2632 2633 if (++first == tdata->num_tx_desc) 2634 first = 0; 2635 } 2636 } else { 2637 break; 2638 } 2639 } 2640 tdata->next_tx_to_clean = first; 2641 tdata->num_tx_desc_avail = num_avail; 2642 2643 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2644 tdata->tx_dd_head = 0; 2645 tdata->tx_dd_tail = 0; 2646 } 2647 2648 if (!EMX_IS_OACTIVE(tdata)) { 2649 ifsq_clr_oactive(tdata->ifsq); 2650 2651 /* All clean, turn off the timer */ 2652 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2653 tdata->tx_watchdog.wd_timer = 0; 2654 } 2655 } 2656 2657 static void 2658 emx_tx_collect(struct emx_txdata *tdata) 2659 { 2660 struct emx_txbuf *tx_buffer; 2661 int tdh, first, num_avail, dd_idx = -1; 2662 2663 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2664 return; 2665 2666 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2667 if (tdh == tdata->next_tx_to_clean) 2668 return; 2669 2670 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2671 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2672 2673 num_avail = tdata->num_tx_desc_avail; 2674 first = tdata->next_tx_to_clean; 2675 2676 while (first != tdh) { 2677 logif(pkt_txclean); 2678 2679 num_avail++; 2680 2681 tx_buffer = &tdata->tx_buf[first]; 2682 if (tx_buffer->m_head) { 2683 bus_dmamap_unload(tdata->txtag, 2684 tx_buffer->map); 2685 m_freem(tx_buffer->m_head); 2686 tx_buffer->m_head = NULL; 2687 } 2688 2689 if (first == dd_idx) { 2690 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2691 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2692 tdata->tx_dd_head = 0; 2693 tdata->tx_dd_tail = 0; 2694 dd_idx = -1; 2695 } else { 2696 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2697 } 2698 } 2699 2700 if (++first == tdata->num_tx_desc) 2701 first = 0; 2702 } 2703 tdata->next_tx_to_clean = first; 2704 tdata->num_tx_desc_avail = num_avail; 2705 2706 if (!EMX_IS_OACTIVE(tdata)) { 2707 ifsq_clr_oactive(tdata->ifsq); 2708 2709 /* All clean, turn off the timer */ 2710 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2711 tdata->tx_watchdog.wd_timer = 0; 2712 } 2713 } 2714 2715 /* 2716 * When Link is lost sometimes there is work still in the TX ring 2717 * which will result in a watchdog, rather than allow that do an 2718 * attempted cleanup and then reinit here. Note that this has been 2719 * seens mostly with fiber adapters. 2720 */ 2721 static void 2722 emx_tx_purge(struct emx_softc *sc) 2723 { 2724 int i; 2725 2726 if (sc->link_active) 2727 return; 2728 2729 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2730 struct emx_txdata *tdata = &sc->tx_data[i]; 2731 2732 if (tdata->tx_watchdog.wd_timer) { 2733 emx_tx_collect(tdata); 2734 if (tdata->tx_watchdog.wd_timer) { 2735 if_printf(&sc->arpcom.ac_if, 2736 "Link lost, TX pending, reinit\n"); 2737 emx_init(sc); 2738 return; 2739 } 2740 } 2741 } 2742 } 2743 2744 static int 2745 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2746 { 2747 struct mbuf *m; 2748 bus_dma_segment_t seg; 2749 bus_dmamap_t map; 2750 struct emx_rxbuf *rx_buffer; 2751 int error, nseg; 2752 2753 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2754 if (m == NULL) { 2755 if (init) { 2756 if_printf(&rdata->sc->arpcom.ac_if, 2757 "Unable to allocate RX mbuf\n"); 2758 } 2759 return (ENOBUFS); 2760 } 2761 m->m_len = m->m_pkthdr.len = MCLBYTES; 2762 2763 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2764 m_adj(m, ETHER_ALIGN); 2765 2766 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2767 rdata->rx_sparemap, m, 2768 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2769 if (error) { 2770 m_freem(m); 2771 if (init) { 2772 if_printf(&rdata->sc->arpcom.ac_if, 2773 "Unable to load RX mbuf\n"); 2774 } 2775 return (error); 2776 } 2777 2778 rx_buffer = &rdata->rx_buf[i]; 2779 if (rx_buffer->m_head != NULL) 2780 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2781 2782 map = rx_buffer->map; 2783 rx_buffer->map = rdata->rx_sparemap; 2784 rdata->rx_sparemap = map; 2785 2786 rx_buffer->m_head = m; 2787 rx_buffer->paddr = seg.ds_addr; 2788 2789 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2790 return (0); 2791 } 2792 2793 static int 2794 emx_create_rx_ring(struct emx_rxdata *rdata) 2795 { 2796 device_t dev = rdata->sc->dev; 2797 struct emx_rxbuf *rx_buffer; 2798 int i, error, rsize, nrxd; 2799 2800 /* 2801 * Validate number of receive descriptors. It must not exceed 2802 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2803 */ 2804 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2805 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2806 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2807 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2808 EMX_DEFAULT_RXD, nrxd); 2809 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2810 } else { 2811 rdata->num_rx_desc = nrxd; 2812 } 2813 2814 /* 2815 * Allocate Receive Descriptor ring 2816 */ 2817 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2818 EMX_DBA_ALIGN); 2819 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2820 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2821 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2822 &rdata->rx_desc_paddr); 2823 if (rdata->rx_desc == NULL) { 2824 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2825 return ENOMEM; 2826 } 2827 2828 rsize = __VM_CACHELINE_ALIGN( 2829 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2830 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2831 2832 /* 2833 * Create DMA tag for rx buffers 2834 */ 2835 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2836 1, 0, /* alignment, bounds */ 2837 BUS_SPACE_MAXADDR, /* lowaddr */ 2838 BUS_SPACE_MAXADDR, /* highaddr */ 2839 NULL, NULL, /* filter, filterarg */ 2840 MCLBYTES, /* maxsize */ 2841 1, /* nsegments */ 2842 MCLBYTES, /* maxsegsize */ 2843 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2844 &rdata->rxtag); 2845 if (error) { 2846 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2847 kfree(rdata->rx_buf, M_DEVBUF); 2848 rdata->rx_buf = NULL; 2849 return error; 2850 } 2851 2852 /* 2853 * Create spare DMA map for rx buffers 2854 */ 2855 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2856 &rdata->rx_sparemap); 2857 if (error) { 2858 device_printf(dev, "Unable to create spare RX DMA map\n"); 2859 bus_dma_tag_destroy(rdata->rxtag); 2860 kfree(rdata->rx_buf, M_DEVBUF); 2861 rdata->rx_buf = NULL; 2862 return error; 2863 } 2864 2865 /* 2866 * Create DMA maps for rx buffers 2867 */ 2868 for (i = 0; i < rdata->num_rx_desc; i++) { 2869 rx_buffer = &rdata->rx_buf[i]; 2870 2871 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2872 &rx_buffer->map); 2873 if (error) { 2874 device_printf(dev, "Unable to create RX DMA map\n"); 2875 emx_destroy_rx_ring(rdata, i); 2876 return error; 2877 } 2878 } 2879 return (0); 2880 } 2881 2882 static void 2883 emx_free_rx_ring(struct emx_rxdata *rdata) 2884 { 2885 int i; 2886 2887 for (i = 0; i < rdata->num_rx_desc; i++) { 2888 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2889 2890 if (rx_buffer->m_head != NULL) { 2891 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2892 m_freem(rx_buffer->m_head); 2893 rx_buffer->m_head = NULL; 2894 } 2895 } 2896 2897 if (rdata->fmp != NULL) 2898 m_freem(rdata->fmp); 2899 rdata->fmp = NULL; 2900 rdata->lmp = NULL; 2901 } 2902 2903 static void 2904 emx_free_tx_ring(struct emx_txdata *tdata) 2905 { 2906 int i; 2907 2908 for (i = 0; i < tdata->num_tx_desc; i++) { 2909 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2910 2911 if (tx_buffer->m_head != NULL) { 2912 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2913 m_freem(tx_buffer->m_head); 2914 tx_buffer->m_head = NULL; 2915 } 2916 } 2917 2918 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2919 2920 tdata->csum_flags = 0; 2921 tdata->csum_lhlen = 0; 2922 tdata->csum_iphlen = 0; 2923 tdata->csum_thlen = 0; 2924 tdata->csum_mss = 0; 2925 tdata->csum_pktlen = 0; 2926 2927 tdata->tx_dd_head = 0; 2928 tdata->tx_dd_tail = 0; 2929 tdata->tx_nsegs = 0; 2930 } 2931 2932 static int 2933 emx_init_rx_ring(struct emx_rxdata *rdata) 2934 { 2935 int i, error; 2936 2937 /* Reset descriptor ring */ 2938 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2939 2940 /* Allocate new ones. */ 2941 for (i = 0; i < rdata->num_rx_desc; i++) { 2942 error = emx_newbuf(rdata, i, 1); 2943 if (error) 2944 return (error); 2945 } 2946 2947 /* Setup our descriptor pointers */ 2948 rdata->next_rx_desc_to_check = 0; 2949 2950 return (0); 2951 } 2952 2953 static void 2954 emx_init_rx_unit(struct emx_softc *sc) 2955 { 2956 struct ifnet *ifp = &sc->arpcom.ac_if; 2957 uint64_t bus_addr; 2958 uint32_t rctl, itr, rfctl; 2959 int i; 2960 2961 /* 2962 * Make sure receives are disabled while setting 2963 * up the descriptor ring 2964 */ 2965 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2966 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2967 2968 /* 2969 * Set the interrupt throttling rate. Value is calculated 2970 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2971 */ 2972 if (sc->int_throttle_ceil) 2973 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2974 else 2975 itr = 0; 2976 emx_set_itr(sc, itr); 2977 2978 /* Use extended RX descriptor */ 2979 rfctl = E1000_RFCTL_EXTEN; 2980 2981 /* Disable accelerated ackknowledge */ 2982 if (sc->hw.mac.type == e1000_82574) 2983 rfctl |= E1000_RFCTL_ACK_DIS; 2984 2985 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2986 2987 /* 2988 * Receive Checksum Offload for TCP and UDP 2989 * 2990 * Checksum offloading is also enabled if multiple receive 2991 * queue is to be supported, since we need it to figure out 2992 * packet type. 2993 */ 2994 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2995 sc->rx_ring_cnt > 1) { 2996 uint32_t rxcsum; 2997 2998 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2999 3000 /* 3001 * NOTE: 3002 * PCSD must be enabled to enable multiple 3003 * receive queues. 3004 */ 3005 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3006 E1000_RXCSUM_PCSD; 3007 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3008 } 3009 3010 /* 3011 * Configure multiple receive queue (RSS) 3012 */ 3013 if (sc->rx_ring_cnt > 1) { 3014 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3015 uint32_t reta; 3016 3017 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3018 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3019 3020 /* 3021 * NOTE: 3022 * When we reach here, RSS has already been disabled 3023 * in emx_stop(), so we could safely configure RSS key 3024 * and redirect table. 3025 */ 3026 3027 /* 3028 * Configure RSS key 3029 */ 3030 toeplitz_get_key(key, sizeof(key)); 3031 for (i = 0; i < EMX_NRSSRK; ++i) { 3032 uint32_t rssrk; 3033 3034 rssrk = EMX_RSSRK_VAL(key, i); 3035 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3036 3037 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3038 } 3039 3040 /* 3041 * Configure RSS redirect table in following fashion: 3042 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3043 */ 3044 reta = 0; 3045 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3046 uint32_t q; 3047 3048 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 3049 reta |= q << (8 * i); 3050 } 3051 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3052 3053 for (i = 0; i < EMX_NRETA; ++i) 3054 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 3055 3056 /* 3057 * Enable multiple receive queues. 3058 * Enable IPv4 RSS standard hash functions. 3059 * Disable RSS interrupt. 3060 */ 3061 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3062 E1000_MRQC_ENABLE_RSS_2Q | 3063 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3064 E1000_MRQC_RSS_FIELD_IPV4); 3065 } 3066 3067 /* 3068 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3069 * long latencies are observed, like Lenovo X60. This 3070 * change eliminates the problem, but since having positive 3071 * values in RDTR is a known source of problems on other 3072 * platforms another solution is being sought. 3073 */ 3074 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3075 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3076 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3077 } 3078 3079 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3080 struct emx_rxdata *rdata = &sc->rx_data[i]; 3081 3082 /* 3083 * Setup the Base and Length of the Rx Descriptor Ring 3084 */ 3085 bus_addr = rdata->rx_desc_paddr; 3086 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3087 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3088 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3089 (uint32_t)(bus_addr >> 32)); 3090 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3091 (uint32_t)bus_addr); 3092 3093 /* 3094 * Setup the HW Rx Head and Tail Descriptor Pointers 3095 */ 3096 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3097 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3098 sc->rx_data[i].num_rx_desc - 1); 3099 } 3100 3101 if (sc->hw.mac.type >= e1000_pch2lan) { 3102 if (ifp->if_mtu > ETHERMTU) 3103 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3104 else 3105 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3106 } 3107 3108 /* Setup the Receive Control Register */ 3109 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3110 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3111 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3112 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3113 3114 /* Make sure VLAN Filters are off */ 3115 rctl &= ~E1000_RCTL_VFE; 3116 3117 /* Don't store bad paket */ 3118 rctl &= ~E1000_RCTL_SBP; 3119 3120 /* MCLBYTES */ 3121 rctl |= E1000_RCTL_SZ_2048; 3122 3123 if (ifp->if_mtu > ETHERMTU) 3124 rctl |= E1000_RCTL_LPE; 3125 else 3126 rctl &= ~E1000_RCTL_LPE; 3127 3128 /* Enable Receives */ 3129 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3130 } 3131 3132 static void 3133 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3134 { 3135 struct emx_rxbuf *rx_buffer; 3136 int i; 3137 3138 /* Free Receive Descriptor ring */ 3139 if (rdata->rx_desc) { 3140 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3141 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3142 rdata->rx_desc_dmap); 3143 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3144 3145 rdata->rx_desc = NULL; 3146 } 3147 3148 if (rdata->rx_buf == NULL) 3149 return; 3150 3151 for (i = 0; i < ndesc; i++) { 3152 rx_buffer = &rdata->rx_buf[i]; 3153 3154 KKASSERT(rx_buffer->m_head == NULL); 3155 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3156 } 3157 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3158 bus_dma_tag_destroy(rdata->rxtag); 3159 3160 kfree(rdata->rx_buf, M_DEVBUF); 3161 rdata->rx_buf = NULL; 3162 } 3163 3164 static void 3165 emx_rxeof(struct emx_rxdata *rdata, int count) 3166 { 3167 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3168 uint32_t staterr; 3169 emx_rxdesc_t *current_desc; 3170 struct mbuf *mp; 3171 int i, cpuid = mycpuid; 3172 3173 i = rdata->next_rx_desc_to_check; 3174 current_desc = &rdata->rx_desc[i]; 3175 staterr = le32toh(current_desc->rxd_staterr); 3176 3177 if (!(staterr & E1000_RXD_STAT_DD)) 3178 return; 3179 3180 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3181 struct pktinfo *pi = NULL, pi0; 3182 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3183 struct mbuf *m = NULL; 3184 int eop, len; 3185 3186 logif(pkt_receive); 3187 3188 mp = rx_buf->m_head; 3189 3190 /* 3191 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3192 * needs to access the last received byte in the mbuf. 3193 */ 3194 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3195 BUS_DMASYNC_POSTREAD); 3196 3197 len = le16toh(current_desc->rxd_length); 3198 if (staterr & E1000_RXD_STAT_EOP) { 3199 count--; 3200 eop = 1; 3201 } else { 3202 eop = 0; 3203 } 3204 3205 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3206 uint16_t vlan = 0; 3207 uint32_t mrq, rss_hash; 3208 3209 /* 3210 * Save several necessary information, 3211 * before emx_newbuf() destroy it. 3212 */ 3213 if ((staterr & E1000_RXD_STAT_VP) && eop) 3214 vlan = le16toh(current_desc->rxd_vlan); 3215 3216 mrq = le32toh(current_desc->rxd_mrq); 3217 rss_hash = le32toh(current_desc->rxd_rss); 3218 3219 EMX_RSS_DPRINTF(rdata->sc, 10, 3220 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3221 rdata->idx, mrq, rss_hash); 3222 3223 if (emx_newbuf(rdata, i, 0) != 0) { 3224 IFNET_STAT_INC(ifp, iqdrops, 1); 3225 goto discard; 3226 } 3227 3228 /* Assign correct length to the current fragment */ 3229 mp->m_len = len; 3230 3231 if (rdata->fmp == NULL) { 3232 mp->m_pkthdr.len = len; 3233 rdata->fmp = mp; /* Store the first mbuf */ 3234 rdata->lmp = mp; 3235 } else { 3236 /* 3237 * Chain mbuf's together 3238 */ 3239 rdata->lmp->m_next = mp; 3240 rdata->lmp = rdata->lmp->m_next; 3241 rdata->fmp->m_pkthdr.len += len; 3242 } 3243 3244 if (eop) { 3245 rdata->fmp->m_pkthdr.rcvif = ifp; 3246 IFNET_STAT_INC(ifp, ipackets, 1); 3247 3248 if (ifp->if_capenable & IFCAP_RXCSUM) 3249 emx_rxcsum(staterr, rdata->fmp); 3250 3251 if (staterr & E1000_RXD_STAT_VP) { 3252 rdata->fmp->m_pkthdr.ether_vlantag = 3253 vlan; 3254 rdata->fmp->m_flags |= M_VLANTAG; 3255 } 3256 m = rdata->fmp; 3257 rdata->fmp = NULL; 3258 rdata->lmp = NULL; 3259 3260 if (ifp->if_capenable & IFCAP_RSS) { 3261 pi = emx_rssinfo(m, &pi0, mrq, 3262 rss_hash, staterr); 3263 } 3264 #ifdef EMX_RSS_DEBUG 3265 rdata->rx_pkts++; 3266 #endif 3267 } 3268 } else { 3269 IFNET_STAT_INC(ifp, ierrors, 1); 3270 discard: 3271 emx_setup_rxdesc(current_desc, rx_buf); 3272 if (rdata->fmp != NULL) { 3273 m_freem(rdata->fmp); 3274 rdata->fmp = NULL; 3275 rdata->lmp = NULL; 3276 } 3277 m = NULL; 3278 } 3279 3280 if (m != NULL) 3281 ifp->if_input(ifp, m, pi, cpuid); 3282 3283 /* Advance our pointers to the next descriptor. */ 3284 if (++i == rdata->num_rx_desc) 3285 i = 0; 3286 3287 current_desc = &rdata->rx_desc[i]; 3288 staterr = le32toh(current_desc->rxd_staterr); 3289 } 3290 rdata->next_rx_desc_to_check = i; 3291 3292 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3293 if (--i < 0) 3294 i = rdata->num_rx_desc - 1; 3295 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3296 } 3297 3298 static void 3299 emx_enable_intr(struct emx_softc *sc) 3300 { 3301 uint32_t ims_mask = IMS_ENABLE_MASK; 3302 3303 lwkt_serialize_handler_enable(&sc->main_serialize); 3304 3305 #if 0 3306 if (sc->hw.mac.type == e1000_82574) { 3307 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3308 ims_mask |= EM_MSIX_MASK; 3309 } 3310 #endif 3311 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3312 } 3313 3314 static void 3315 emx_disable_intr(struct emx_softc *sc) 3316 { 3317 if (sc->hw.mac.type == e1000_82574) 3318 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3319 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3320 3321 lwkt_serialize_handler_disable(&sc->main_serialize); 3322 } 3323 3324 /* 3325 * Bit of a misnomer, what this really means is 3326 * to enable OS management of the system... aka 3327 * to disable special hardware management features 3328 */ 3329 static void 3330 emx_get_mgmt(struct emx_softc *sc) 3331 { 3332 /* A shared code workaround */ 3333 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3334 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3335 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3336 3337 /* disable hardware interception of ARP */ 3338 manc &= ~(E1000_MANC_ARP_EN); 3339 3340 /* enable receiving management packets to the host */ 3341 manc |= E1000_MANC_EN_MNG2HOST; 3342 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3343 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3344 manc2h |= E1000_MNG2HOST_PORT_623; 3345 manc2h |= E1000_MNG2HOST_PORT_664; 3346 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3347 3348 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3349 } 3350 } 3351 3352 /* 3353 * Give control back to hardware management 3354 * controller if there is one. 3355 */ 3356 static void 3357 emx_rel_mgmt(struct emx_softc *sc) 3358 { 3359 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3360 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3361 3362 /* re-enable hardware interception of ARP */ 3363 manc |= E1000_MANC_ARP_EN; 3364 manc &= ~E1000_MANC_EN_MNG2HOST; 3365 3366 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3367 } 3368 } 3369 3370 /* 3371 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3372 * For ASF and Pass Through versions of f/w this means that 3373 * the driver is loaded. For AMT version (only with 82573) 3374 * of the f/w this means that the network i/f is open. 3375 */ 3376 static void 3377 emx_get_hw_control(struct emx_softc *sc) 3378 { 3379 /* Let firmware know the driver has taken over */ 3380 if (sc->hw.mac.type == e1000_82573) { 3381 uint32_t swsm; 3382 3383 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3384 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3385 swsm | E1000_SWSM_DRV_LOAD); 3386 } else { 3387 uint32_t ctrl_ext; 3388 3389 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3390 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3391 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3392 } 3393 sc->flags |= EMX_FLAG_HW_CTRL; 3394 } 3395 3396 /* 3397 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3398 * For ASF and Pass Through versions of f/w this means that the 3399 * driver is no longer loaded. For AMT version (only with 82573) 3400 * of the f/w this means that the network i/f is closed. 3401 */ 3402 static void 3403 emx_rel_hw_control(struct emx_softc *sc) 3404 { 3405 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3406 return; 3407 sc->flags &= ~EMX_FLAG_HW_CTRL; 3408 3409 /* Let firmware taken over control of h/w */ 3410 if (sc->hw.mac.type == e1000_82573) { 3411 uint32_t swsm; 3412 3413 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3414 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3415 swsm & ~E1000_SWSM_DRV_LOAD); 3416 } else { 3417 uint32_t ctrl_ext; 3418 3419 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3420 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3421 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3422 } 3423 } 3424 3425 static int 3426 emx_is_valid_eaddr(const uint8_t *addr) 3427 { 3428 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3429 3430 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3431 return (FALSE); 3432 3433 return (TRUE); 3434 } 3435 3436 /* 3437 * Enable PCI Wake On Lan capability 3438 */ 3439 void 3440 emx_enable_wol(device_t dev) 3441 { 3442 uint16_t cap, status; 3443 uint8_t id; 3444 3445 /* First find the capabilities pointer*/ 3446 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3447 3448 /* Read the PM Capabilities */ 3449 id = pci_read_config(dev, cap, 1); 3450 if (id != PCIY_PMG) /* Something wrong */ 3451 return; 3452 3453 /* 3454 * OK, we have the power capabilities, 3455 * so now get the status register 3456 */ 3457 cap += PCIR_POWER_STATUS; 3458 status = pci_read_config(dev, cap, 2); 3459 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3460 pci_write_config(dev, cap, status, 2); 3461 } 3462 3463 static void 3464 emx_update_stats(struct emx_softc *sc) 3465 { 3466 struct ifnet *ifp = &sc->arpcom.ac_if; 3467 3468 if (sc->hw.phy.media_type == e1000_media_type_copper || 3469 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3470 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3471 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3472 } 3473 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3474 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3475 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3476 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3477 3478 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3479 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3480 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3481 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3482 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3483 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3484 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3485 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3486 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3487 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3488 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3489 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3490 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3491 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3492 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3493 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3494 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3495 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3496 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3497 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3498 3499 /* For the 64-bit byte counters the low dword must be read first. */ 3500 /* Both registers clear on the read of the high dword */ 3501 3502 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3503 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3504 3505 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3506 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3507 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3508 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3509 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3510 3511 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3512 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3513 3514 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3515 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3516 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3517 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3518 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3519 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3520 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3521 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3522 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3523 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3524 3525 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3526 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3527 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3528 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3529 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3530 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3531 3532 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3533 3534 /* Rx Errors */ 3535 IFNET_STAT_SET(ifp, ierrors, 3536 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3537 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3538 3539 /* Tx Errors */ 3540 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3541 } 3542 3543 static void 3544 emx_print_debug_info(struct emx_softc *sc) 3545 { 3546 device_t dev = sc->dev; 3547 uint8_t *hw_addr = sc->hw.hw_addr; 3548 int i; 3549 3550 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3551 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3552 E1000_READ_REG(&sc->hw, E1000_CTRL), 3553 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3554 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3555 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3556 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3557 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3558 sc->hw.fc.high_water, sc->hw.fc.low_water); 3559 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3560 E1000_READ_REG(&sc->hw, E1000_TIDV), 3561 E1000_READ_REG(&sc->hw, E1000_TADV)); 3562 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3563 E1000_READ_REG(&sc->hw, E1000_RDTR), 3564 E1000_READ_REG(&sc->hw, E1000_RADV)); 3565 3566 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3567 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3568 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3569 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3570 } 3571 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3572 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3573 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3574 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3575 } 3576 3577 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3578 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3579 sc->tx_data[i].num_tx_desc_avail); 3580 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3581 sc->tx_data[i].tso_segments); 3582 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3583 sc->tx_data[i].tso_ctx_reused); 3584 } 3585 } 3586 3587 static void 3588 emx_print_hw_stats(struct emx_softc *sc) 3589 { 3590 device_t dev = sc->dev; 3591 3592 device_printf(dev, "Excessive collisions = %lld\n", 3593 (long long)sc->stats.ecol); 3594 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3595 device_printf(dev, "Symbol errors = %lld\n", 3596 (long long)sc->stats.symerrs); 3597 #endif 3598 device_printf(dev, "Sequence errors = %lld\n", 3599 (long long)sc->stats.sec); 3600 device_printf(dev, "Defer count = %lld\n", 3601 (long long)sc->stats.dc); 3602 device_printf(dev, "Missed Packets = %lld\n", 3603 (long long)sc->stats.mpc); 3604 device_printf(dev, "Receive No Buffers = %lld\n", 3605 (long long)sc->stats.rnbc); 3606 /* RLEC is inaccurate on some hardware, calculate our own. */ 3607 device_printf(dev, "Receive Length Errors = %lld\n", 3608 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3609 device_printf(dev, "Receive errors = %lld\n", 3610 (long long)sc->stats.rxerrc); 3611 device_printf(dev, "Crc errors = %lld\n", 3612 (long long)sc->stats.crcerrs); 3613 device_printf(dev, "Alignment errors = %lld\n", 3614 (long long)sc->stats.algnerrc); 3615 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3616 (long long)sc->stats.cexterr); 3617 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3618 device_printf(dev, "XON Rcvd = %lld\n", 3619 (long long)sc->stats.xonrxc); 3620 device_printf(dev, "XON Xmtd = %lld\n", 3621 (long long)sc->stats.xontxc); 3622 device_printf(dev, "XOFF Rcvd = %lld\n", 3623 (long long)sc->stats.xoffrxc); 3624 device_printf(dev, "XOFF Xmtd = %lld\n", 3625 (long long)sc->stats.xofftxc); 3626 device_printf(dev, "Good Packets Rcvd = %lld\n", 3627 (long long)sc->stats.gprc); 3628 device_printf(dev, "Good Packets Xmtd = %lld\n", 3629 (long long)sc->stats.gptc); 3630 } 3631 3632 static void 3633 emx_print_nvm_info(struct emx_softc *sc) 3634 { 3635 uint16_t eeprom_data; 3636 int i, j, row = 0; 3637 3638 /* Its a bit crude, but it gets the job done */ 3639 kprintf("\nInterface EEPROM Dump:\n"); 3640 kprintf("Offset\n0x0000 "); 3641 for (i = 0, j = 0; i < 32; i++, j++) { 3642 if (j == 8) { /* Make the offset block */ 3643 j = 0; ++row; 3644 kprintf("\n0x00%x0 ",row); 3645 } 3646 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3647 kprintf("%04x ", eeprom_data); 3648 } 3649 kprintf("\n"); 3650 } 3651 3652 static int 3653 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3654 { 3655 struct emx_softc *sc; 3656 struct ifnet *ifp; 3657 int error, result; 3658 3659 result = -1; 3660 error = sysctl_handle_int(oidp, &result, 0, req); 3661 if (error || !req->newptr) 3662 return (error); 3663 3664 sc = (struct emx_softc *)arg1; 3665 ifp = &sc->arpcom.ac_if; 3666 3667 ifnet_serialize_all(ifp); 3668 3669 if (result == 1) 3670 emx_print_debug_info(sc); 3671 3672 /* 3673 * This value will cause a hex dump of the 3674 * first 32 16-bit words of the EEPROM to 3675 * the screen. 3676 */ 3677 if (result == 2) 3678 emx_print_nvm_info(sc); 3679 3680 ifnet_deserialize_all(ifp); 3681 3682 return (error); 3683 } 3684 3685 static int 3686 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3687 { 3688 int error, result; 3689 3690 result = -1; 3691 error = sysctl_handle_int(oidp, &result, 0, req); 3692 if (error || !req->newptr) 3693 return (error); 3694 3695 if (result == 1) { 3696 struct emx_softc *sc = (struct emx_softc *)arg1; 3697 struct ifnet *ifp = &sc->arpcom.ac_if; 3698 3699 ifnet_serialize_all(ifp); 3700 emx_print_hw_stats(sc); 3701 ifnet_deserialize_all(ifp); 3702 } 3703 return (error); 3704 } 3705 3706 static void 3707 emx_add_sysctl(struct emx_softc *sc) 3708 { 3709 struct sysctl_ctx_list *ctx; 3710 struct sysctl_oid *tree; 3711 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3712 char pkt_desc[32]; 3713 int i; 3714 #endif 3715 3716 ctx = device_get_sysctl_ctx(sc->dev); 3717 tree = device_get_sysctl_tree(sc->dev); 3718 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3719 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3720 emx_sysctl_debug_info, "I", "Debug Information"); 3721 3722 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3723 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3724 emx_sysctl_stats, "I", "Statistics"); 3725 3726 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3727 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3728 "# of RX descs"); 3729 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3730 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3731 "# of TX descs"); 3732 3733 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3734 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3735 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3736 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3737 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3738 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3739 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3740 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3741 emx_sysctl_tx_wreg_nsegs, "I", 3742 "# segments sent before write to hardware register"); 3743 3744 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3745 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3746 "# of RX rings"); 3747 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3748 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3749 "# of TX rings"); 3750 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3751 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3752 "# of TX rings used"); 3753 3754 #ifdef IFPOLL_ENABLE 3755 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3756 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3757 sc, 0, emx_sysctl_npoll_rxoff, "I", 3758 "NPOLLING RX cpu offset"); 3759 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3760 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3761 sc, 0, emx_sysctl_npoll_txoff, "I", 3762 "NPOLLING TX cpu offset"); 3763 #endif 3764 3765 #ifdef EMX_RSS_DEBUG 3766 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3767 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3768 0, "RSS debug level"); 3769 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3770 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3771 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3772 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3773 "RXed packets"); 3774 } 3775 #endif 3776 #ifdef EMX_TSS_DEBUG 3777 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3778 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3779 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3780 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3781 "TXed packets"); 3782 } 3783 #endif 3784 } 3785 3786 static int 3787 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3788 { 3789 struct emx_softc *sc = (void *)arg1; 3790 struct ifnet *ifp = &sc->arpcom.ac_if; 3791 int error, throttle; 3792 3793 throttle = sc->int_throttle_ceil; 3794 error = sysctl_handle_int(oidp, &throttle, 0, req); 3795 if (error || req->newptr == NULL) 3796 return error; 3797 if (throttle < 0 || throttle > 1000000000 / 256) 3798 return EINVAL; 3799 3800 if (throttle) { 3801 /* 3802 * Set the interrupt throttling rate in 256ns increments, 3803 * recalculate sysctl value assignment to get exact frequency. 3804 */ 3805 throttle = 1000000000 / 256 / throttle; 3806 3807 /* Upper 16bits of ITR is reserved and should be zero */ 3808 if (throttle & 0xffff0000) 3809 return EINVAL; 3810 } 3811 3812 ifnet_serialize_all(ifp); 3813 3814 if (throttle) 3815 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3816 else 3817 sc->int_throttle_ceil = 0; 3818 3819 if (ifp->if_flags & IFF_RUNNING) 3820 emx_set_itr(sc, throttle); 3821 3822 ifnet_deserialize_all(ifp); 3823 3824 if (bootverbose) { 3825 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3826 sc->int_throttle_ceil); 3827 } 3828 return 0; 3829 } 3830 3831 static int 3832 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3833 { 3834 struct emx_softc *sc = (void *)arg1; 3835 struct ifnet *ifp = &sc->arpcom.ac_if; 3836 struct emx_txdata *tdata = &sc->tx_data[0]; 3837 int error, segs; 3838 3839 segs = tdata->tx_intr_nsegs; 3840 error = sysctl_handle_int(oidp, &segs, 0, req); 3841 if (error || req->newptr == NULL) 3842 return error; 3843 if (segs <= 0) 3844 return EINVAL; 3845 3846 ifnet_serialize_all(ifp); 3847 3848 /* 3849 * Don't allow tx_intr_nsegs to become: 3850 * o Less the oact_tx_desc 3851 * o Too large that no TX desc will cause TX interrupt to 3852 * be generated (OACTIVE will never recover) 3853 * o Too small that will cause tx_dd[] overflow 3854 */ 3855 if (segs < tdata->oact_tx_desc || 3856 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3857 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3858 error = EINVAL; 3859 } else { 3860 int i; 3861 3862 error = 0; 3863 for (i = 0; i < sc->tx_ring_cnt; ++i) 3864 sc->tx_data[i].tx_intr_nsegs = segs; 3865 } 3866 3867 ifnet_deserialize_all(ifp); 3868 3869 return error; 3870 } 3871 3872 static int 3873 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3874 { 3875 struct emx_softc *sc = (void *)arg1; 3876 struct ifnet *ifp = &sc->arpcom.ac_if; 3877 int error, nsegs, i; 3878 3879 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3880 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3881 if (error || req->newptr == NULL) 3882 return error; 3883 3884 ifnet_serialize_all(ifp); 3885 for (i = 0; i < sc->tx_ring_cnt; ++i) 3886 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3887 ifnet_deserialize_all(ifp); 3888 3889 return 0; 3890 } 3891 3892 #ifdef IFPOLL_ENABLE 3893 3894 static int 3895 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3896 { 3897 struct emx_softc *sc = (void *)arg1; 3898 struct ifnet *ifp = &sc->arpcom.ac_if; 3899 int error, off; 3900 3901 off = sc->rx_npoll_off; 3902 error = sysctl_handle_int(oidp, &off, 0, req); 3903 if (error || req->newptr == NULL) 3904 return error; 3905 if (off < 0) 3906 return EINVAL; 3907 3908 ifnet_serialize_all(ifp); 3909 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3910 error = EINVAL; 3911 } else { 3912 error = 0; 3913 sc->rx_npoll_off = off; 3914 } 3915 ifnet_deserialize_all(ifp); 3916 3917 return error; 3918 } 3919 3920 static int 3921 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3922 { 3923 struct emx_softc *sc = (void *)arg1; 3924 struct ifnet *ifp = &sc->arpcom.ac_if; 3925 int error, off; 3926 3927 off = sc->tx_npoll_off; 3928 error = sysctl_handle_int(oidp, &off, 0, req); 3929 if (error || req->newptr == NULL) 3930 return error; 3931 if (off < 0) 3932 return EINVAL; 3933 3934 ifnet_serialize_all(ifp); 3935 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3936 error = EINVAL; 3937 } else { 3938 error = 0; 3939 sc->tx_npoll_off = off; 3940 } 3941 ifnet_deserialize_all(ifp); 3942 3943 return error; 3944 } 3945 3946 #endif /* IFPOLL_ENABLE */ 3947 3948 static int 3949 emx_dma_alloc(struct emx_softc *sc) 3950 { 3951 int error, i; 3952 3953 /* 3954 * Create top level busdma tag 3955 */ 3956 error = bus_dma_tag_create(NULL, 1, 0, 3957 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3958 NULL, NULL, 3959 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3960 0, &sc->parent_dtag); 3961 if (error) { 3962 device_printf(sc->dev, "could not create top level DMA tag\n"); 3963 return error; 3964 } 3965 3966 /* 3967 * Allocate transmit descriptors ring and buffers 3968 */ 3969 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3970 error = emx_create_tx_ring(&sc->tx_data[i]); 3971 if (error) { 3972 device_printf(sc->dev, 3973 "Could not setup transmit structures\n"); 3974 return error; 3975 } 3976 } 3977 3978 /* 3979 * Allocate receive descriptors ring and buffers 3980 */ 3981 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3982 error = emx_create_rx_ring(&sc->rx_data[i]); 3983 if (error) { 3984 device_printf(sc->dev, 3985 "Could not setup receive structures\n"); 3986 return error; 3987 } 3988 } 3989 return 0; 3990 } 3991 3992 static void 3993 emx_dma_free(struct emx_softc *sc) 3994 { 3995 int i; 3996 3997 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3998 emx_destroy_tx_ring(&sc->tx_data[i], 3999 sc->tx_data[i].num_tx_desc); 4000 } 4001 4002 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4003 emx_destroy_rx_ring(&sc->rx_data[i], 4004 sc->rx_data[i].num_rx_desc); 4005 } 4006 4007 /* Free top level busdma tag */ 4008 if (sc->parent_dtag != NULL) 4009 bus_dma_tag_destroy(sc->parent_dtag); 4010 } 4011 4012 static void 4013 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4014 { 4015 struct emx_softc *sc = ifp->if_softc; 4016 4017 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4018 } 4019 4020 static void 4021 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4022 { 4023 struct emx_softc *sc = ifp->if_softc; 4024 4025 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4026 } 4027 4028 static int 4029 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4030 { 4031 struct emx_softc *sc = ifp->if_softc; 4032 4033 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4034 } 4035 4036 static void 4037 emx_serialize_skipmain(struct emx_softc *sc) 4038 { 4039 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4040 } 4041 4042 static void 4043 emx_deserialize_skipmain(struct emx_softc *sc) 4044 { 4045 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4046 } 4047 4048 #ifdef INVARIANTS 4049 4050 static void 4051 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4052 boolean_t serialized) 4053 { 4054 struct emx_softc *sc = ifp->if_softc; 4055 4056 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4057 slz, serialized); 4058 } 4059 4060 #endif /* INVARIANTS */ 4061 4062 #ifdef IFPOLL_ENABLE 4063 4064 static void 4065 emx_npoll_status(struct ifnet *ifp) 4066 { 4067 struct emx_softc *sc = ifp->if_softc; 4068 uint32_t reg_icr; 4069 4070 ASSERT_SERIALIZED(&sc->main_serialize); 4071 4072 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4073 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4074 callout_stop(&sc->timer); 4075 sc->hw.mac.get_link_status = 1; 4076 emx_update_link_status(sc); 4077 callout_reset(&sc->timer, hz, emx_timer, sc); 4078 } 4079 } 4080 4081 static void 4082 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4083 { 4084 struct emx_txdata *tdata = arg; 4085 4086 ASSERT_SERIALIZED(&tdata->tx_serialize); 4087 4088 emx_txeof(tdata); 4089 if (!ifsq_is_empty(tdata->ifsq)) 4090 ifsq_devstart(tdata->ifsq); 4091 } 4092 4093 static void 4094 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4095 { 4096 struct emx_rxdata *rdata = arg; 4097 4098 ASSERT_SERIALIZED(&rdata->rx_serialize); 4099 4100 emx_rxeof(rdata, cycle); 4101 } 4102 4103 static void 4104 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4105 { 4106 struct emx_softc *sc = ifp->if_softc; 4107 int i, txr_cnt; 4108 4109 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4110 4111 if (info) { 4112 int off; 4113 4114 info->ifpi_status.status_func = emx_npoll_status; 4115 info->ifpi_status.serializer = &sc->main_serialize; 4116 4117 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4118 off = sc->tx_npoll_off; 4119 for (i = 0; i < txr_cnt; ++i) { 4120 struct emx_txdata *tdata = &sc->tx_data[i]; 4121 int idx = i + off; 4122 4123 KKASSERT(idx < ncpus2); 4124 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4125 info->ifpi_tx[idx].arg = tdata; 4126 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4127 ifsq_set_cpuid(tdata->ifsq, idx); 4128 } 4129 4130 off = sc->rx_npoll_off; 4131 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4132 struct emx_rxdata *rdata = &sc->rx_data[i]; 4133 int idx = i + off; 4134 4135 KKASSERT(idx < ncpus2); 4136 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4137 info->ifpi_rx[idx].arg = rdata; 4138 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4139 } 4140 4141 if (ifp->if_flags & IFF_RUNNING) { 4142 if (txr_cnt == sc->tx_ring_inuse) 4143 emx_disable_intr(sc); 4144 else 4145 emx_init(sc); 4146 } 4147 } else { 4148 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4149 struct emx_txdata *tdata = &sc->tx_data[i]; 4150 4151 ifsq_set_cpuid(tdata->ifsq, 4152 rman_get_cpuid(sc->intr_res)); 4153 } 4154 4155 if (ifp->if_flags & IFF_RUNNING) { 4156 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4157 if (txr_cnt == sc->tx_ring_inuse) 4158 emx_enable_intr(sc); 4159 else 4160 emx_init(sc); 4161 } 4162 } 4163 } 4164 4165 #endif /* IFPOLL_ENABLE */ 4166 4167 static void 4168 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4169 { 4170 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4171 if (sc->hw.mac.type == e1000_82574) { 4172 int i; 4173 4174 /* 4175 * When using MSIX interrupts we need to 4176 * throttle using the EITR register 4177 */ 4178 for (i = 0; i < 4; ++i) 4179 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4180 } 4181 } 4182 4183 /* 4184 * Disable the L0s, 82574L Errata #20 4185 */ 4186 static void 4187 emx_disable_aspm(struct emx_softc *sc) 4188 { 4189 uint16_t link_cap, link_ctrl, disable; 4190 uint8_t pcie_ptr, reg; 4191 device_t dev = sc->dev; 4192 4193 switch (sc->hw.mac.type) { 4194 case e1000_82571: 4195 case e1000_82572: 4196 case e1000_82573: 4197 /* 4198 * 82573 specification update 4199 * errata #8 disable L0s 4200 * errata #41 disable L1 4201 * 4202 * 82571/82572 specification update 4203 # errata #13 disable L1 4204 * errata #68 disable L0s 4205 */ 4206 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4207 break; 4208 4209 case e1000_82574: 4210 /* 4211 * 82574 specification update errata #20 4212 * 4213 * There is no need to disable L1 4214 */ 4215 disable = PCIEM_LNKCTL_ASPM_L0S; 4216 break; 4217 4218 default: 4219 return; 4220 } 4221 4222 pcie_ptr = pci_get_pciecap_ptr(dev); 4223 if (pcie_ptr == 0) 4224 return; 4225 4226 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4227 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4228 return; 4229 4230 if (bootverbose) 4231 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4232 4233 reg = pcie_ptr + PCIER_LINKCTRL; 4234 link_ctrl = pci_read_config(dev, reg, 2); 4235 link_ctrl &= ~disable; 4236 pci_write_config(dev, reg, link_ctrl, 2); 4237 } 4238 4239 static int 4240 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4241 { 4242 int iphlen, hoff, thoff, ex = 0; 4243 struct mbuf *m; 4244 struct ip *ip; 4245 4246 m = *mp; 4247 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4248 4249 iphlen = m->m_pkthdr.csum_iphlen; 4250 thoff = m->m_pkthdr.csum_thlen; 4251 hoff = m->m_pkthdr.csum_lhlen; 4252 4253 KASSERT(iphlen > 0, ("invalid ip hlen")); 4254 KASSERT(thoff > 0, ("invalid tcp hlen")); 4255 KASSERT(hoff > 0, ("invalid ether hlen")); 4256 4257 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4258 ex = 4; 4259 4260 if (m->m_len < hoff + iphlen + thoff + ex) { 4261 m = m_pullup(m, hoff + iphlen + thoff + ex); 4262 if (m == NULL) { 4263 *mp = NULL; 4264 return ENOBUFS; 4265 } 4266 *mp = m; 4267 } 4268 ip = mtodoff(m, struct ip *, hoff); 4269 ip->ip_len = 0; 4270 4271 return 0; 4272 } 4273 4274 static int 4275 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4276 uint32_t *txd_upper, uint32_t *txd_lower) 4277 { 4278 struct e1000_context_desc *TXD; 4279 int hoff, iphlen, thoff, hlen; 4280 int mss, pktlen, curr_txd; 4281 4282 #ifdef EMX_TSO_DEBUG 4283 tdata->tso_segments++; 4284 #endif 4285 4286 iphlen = mp->m_pkthdr.csum_iphlen; 4287 thoff = mp->m_pkthdr.csum_thlen; 4288 hoff = mp->m_pkthdr.csum_lhlen; 4289 mss = mp->m_pkthdr.tso_segsz; 4290 pktlen = mp->m_pkthdr.len; 4291 4292 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4293 tdata->csum_flags == CSUM_TSO && 4294 tdata->csum_iphlen == iphlen && 4295 tdata->csum_lhlen == hoff && 4296 tdata->csum_thlen == thoff && 4297 tdata->csum_mss == mss && 4298 tdata->csum_pktlen == pktlen) { 4299 *txd_upper = tdata->csum_txd_upper; 4300 *txd_lower = tdata->csum_txd_lower; 4301 #ifdef EMX_TSO_DEBUG 4302 tdata->tso_ctx_reused++; 4303 #endif 4304 return 0; 4305 } 4306 hlen = hoff + iphlen + thoff; 4307 4308 /* 4309 * Setup a new TSO context. 4310 */ 4311 4312 curr_txd = tdata->next_avail_tx_desc; 4313 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4314 4315 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4316 E1000_TXD_DTYP_D | /* Data descr type */ 4317 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4318 4319 /* IP and/or TCP header checksum calculation and insertion. */ 4320 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4321 4322 /* 4323 * Start offset for header checksum calculation. 4324 * End offset for header checksum calculation. 4325 * Offset of place put the checksum. 4326 */ 4327 TXD->lower_setup.ip_fields.ipcss = hoff; 4328 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4329 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4330 4331 /* 4332 * Start offset for payload checksum calculation. 4333 * End offset for payload checksum calculation. 4334 * Offset of place to put the checksum. 4335 */ 4336 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4337 TXD->upper_setup.tcp_fields.tucse = 0; 4338 TXD->upper_setup.tcp_fields.tucso = 4339 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4340 4341 /* 4342 * Payload size per packet w/o any headers. 4343 * Length of all headers up to payload. 4344 */ 4345 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4346 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4347 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4348 E1000_TXD_CMD_DEXT | /* Extended descr */ 4349 E1000_TXD_CMD_TSE | /* TSE context */ 4350 E1000_TXD_CMD_IP | /* Do IP csum */ 4351 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4352 (pktlen - hlen)); /* Total len */ 4353 4354 /* Save the information for this TSO context */ 4355 tdata->csum_flags = CSUM_TSO; 4356 tdata->csum_lhlen = hoff; 4357 tdata->csum_iphlen = iphlen; 4358 tdata->csum_thlen = thoff; 4359 tdata->csum_mss = mss; 4360 tdata->csum_pktlen = pktlen; 4361 tdata->csum_txd_upper = *txd_upper; 4362 tdata->csum_txd_lower = *txd_lower; 4363 4364 if (++curr_txd == tdata->num_tx_desc) 4365 curr_txd = 0; 4366 4367 KKASSERT(tdata->num_tx_desc_avail > 0); 4368 tdata->num_tx_desc_avail--; 4369 4370 tdata->next_avail_tx_desc = curr_txd; 4371 return 1; 4372 } 4373 4374 static int 4375 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4376 { 4377 if (polling) 4378 return sc->tx_ring_cnt; 4379 else 4380 return 1; 4381 } 4382