1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/if_ringmap.h> 59 #include <net/toeplitz.h> 60 #include <net/toeplitz2.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 #include <net/if_poll.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/ig_hal/e1000_dragonfly.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_COPPER_FLASHLESS), 128 IGB_DEVICE(I210_SERDES_FLASHLESS), 129 IGB_DEVICE(I210_FIBER), 130 IGB_DEVICE(I210_SERDES), 131 IGB_DEVICE(I210_SGMII), 132 IGB_DEVICE(I211_COPPER), 133 IGB_DEVICE(I354_BACKPLANE_1GBPS), 134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 135 IGB_DEVICE(I354_SGMII), 136 137 /* required last entry */ 138 IGB_DEVICE_NULL 139 }; 140 141 static int igb_probe(device_t); 142 static int igb_attach(device_t); 143 static int igb_detach(device_t); 144 static int igb_shutdown(device_t); 145 static int igb_suspend(device_t); 146 static int igb_resume(device_t); 147 148 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 149 static void igb_setup_ifp(struct igb_softc *); 150 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 151 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 152 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 153 static void igb_add_sysctl(struct igb_softc *); 154 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 155 const char *, const char *); 156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 160 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 161 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 162 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 163 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 164 165 static void igb_vf_init_stats(struct igb_softc *); 166 static void igb_reset(struct igb_softc *, boolean_t); 167 static void igb_update_stats_counters(struct igb_softc *); 168 static void igb_update_vf_stats_counters(struct igb_softc *); 169 static void igb_update_link_status(struct igb_softc *); 170 static void igb_init_tx_unit(struct igb_softc *); 171 static void igb_init_rx_unit(struct igb_softc *, boolean_t); 172 static void igb_init_dmac(struct igb_softc *, uint32_t); 173 static void igb_reg_dump(struct igb_softc *); 174 static int igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS); 175 176 static void igb_set_vlan(struct igb_softc *); 177 static void igb_set_multi(struct igb_softc *); 178 static void igb_set_promisc(struct igb_softc *); 179 static void igb_disable_promisc(struct igb_softc *); 180 181 static int igb_get_ring_max(const struct igb_softc *); 182 static void igb_get_rxring_cnt(const struct igb_softc *, int *, int *); 183 static void igb_get_txring_cnt(const struct igb_softc *, int *, int *); 184 static int igb_alloc_rings(struct igb_softc *); 185 static void igb_free_rings(struct igb_softc *); 186 static int igb_create_tx_ring(struct igb_tx_ring *); 187 static int igb_create_rx_ring(struct igb_rx_ring *); 188 static void igb_free_tx_ring(struct igb_tx_ring *); 189 static void igb_free_rx_ring(struct igb_rx_ring *); 190 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 191 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 192 static void igb_init_tx_ring(struct igb_tx_ring *); 193 static int igb_init_rx_ring(struct igb_rx_ring *); 194 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 195 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 196 static void igb_rx_refresh(struct igb_rx_ring *, int); 197 static void igb_setup_serialize(struct igb_softc *); 198 199 static void igb_stop(struct igb_softc *); 200 static void igb_init(void *); 201 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 202 static void igb_media_status(struct ifnet *, struct ifmediareq *); 203 static int igb_media_change(struct ifnet *); 204 static void igb_timer(void *); 205 static void igb_watchdog(struct ifaltq_subque *); 206 static void igb_start(struct ifnet *, struct ifaltq_subque *); 207 #ifdef IFPOLL_ENABLE 208 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 209 static void igb_npoll_rx(struct ifnet *, void *, int); 210 static void igb_npoll_tx(struct ifnet *, void *, int); 211 static void igb_npoll_status(struct ifnet *); 212 #endif 213 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 214 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 215 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 216 #ifdef INVARIANTS 217 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 218 boolean_t); 219 #endif 220 221 static void igb_intr(void *); 222 static void igb_intr_shared(void *); 223 static void igb_rxeof(struct igb_rx_ring *, int); 224 static void igb_txeof(struct igb_tx_ring *, int); 225 static void igb_txgc(struct igb_tx_ring *); 226 static void igb_txgc_timer(void *); 227 static void igb_set_eitr(struct igb_softc *, int, int); 228 static void igb_enable_intr(struct igb_softc *); 229 static void igb_disable_intr(struct igb_softc *); 230 static void igb_init_unshared_intr(struct igb_softc *); 231 static void igb_init_intr(struct igb_softc *); 232 static int igb_setup_intr(struct igb_softc *); 233 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 234 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 235 static void igb_set_intr_mask(struct igb_softc *); 236 static int igb_alloc_intr(struct igb_softc *); 237 static void igb_free_intr(struct igb_softc *); 238 static void igb_teardown_intr(struct igb_softc *, int); 239 static void igb_alloc_msix(struct igb_softc *); 240 static void igb_free_msix(struct igb_softc *, boolean_t); 241 static void igb_msix_rx(void *); 242 static void igb_msix_tx(void *); 243 static void igb_msix_status(void *); 244 static void igb_msix_rxtx(void *); 245 246 /* Management and WOL Support */ 247 static void igb_get_mgmt(struct igb_softc *); 248 static void igb_rel_mgmt(struct igb_softc *); 249 static void igb_get_hw_control(struct igb_softc *); 250 static void igb_rel_hw_control(struct igb_softc *); 251 static void igb_enable_wol(struct igb_softc *); 252 static int igb_enable_phy_wol(struct igb_softc *); 253 254 static device_method_t igb_methods[] = { 255 /* Device interface */ 256 DEVMETHOD(device_probe, igb_probe), 257 DEVMETHOD(device_attach, igb_attach), 258 DEVMETHOD(device_detach, igb_detach), 259 DEVMETHOD(device_shutdown, igb_shutdown), 260 DEVMETHOD(device_suspend, igb_suspend), 261 DEVMETHOD(device_resume, igb_resume), 262 DEVMETHOD_END 263 }; 264 265 static driver_t igb_driver = { 266 "igb", 267 igb_methods, 268 sizeof(struct igb_softc), 269 }; 270 271 static devclass_t igb_devclass; 272 273 DECLARE_DUMMY_MODULE(if_igb); 274 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 275 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 276 277 static int igb_rxd = IGB_DEFAULT_RXD; 278 static int igb_txd = IGB_DEFAULT_TXD; 279 static int igb_rxr = 0; 280 static int igb_txr = 0; 281 static int igb_msi_enable = 1; 282 static int igb_msix_enable = 1; 283 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 284 285 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 286 287 /* 288 * DMA Coalescing, only for i350 - default to off, 289 * this feature is for power savings 290 */ 291 static int igb_dma_coalesce = 0; 292 293 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 294 TUNABLE_INT("hw.igb.txd", &igb_txd); 295 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 296 TUNABLE_INT("hw.igb.txr", &igb_txr); 297 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 298 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 299 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 300 301 /* i350 specific */ 302 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 303 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 304 305 static __inline void 306 igb_tx_intr(struct igb_tx_ring *txr, int hdr) 307 { 308 309 igb_txeof(txr, hdr); 310 if (!ifsq_is_empty(txr->ifsq)) 311 ifsq_devstart(txr->ifsq); 312 } 313 314 static __inline void 315 igb_try_txgc(struct igb_tx_ring *txr, int16_t dec) 316 { 317 318 if (txr->tx_running > 0) { 319 txr->tx_running -= dec; 320 if (txr->tx_running <= 0 && txr->tx_nmbuf && 321 txr->tx_avail < txr->num_tx_desc && 322 txr->tx_avail + txr->intr_nsegs > txr->num_tx_desc) 323 igb_txgc(txr); 324 } 325 } 326 327 static void 328 igb_txgc_timer(void *xtxr) 329 { 330 struct igb_tx_ring *txr = xtxr; 331 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 332 333 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 334 (IFF_RUNNING | IFF_UP)) 335 return; 336 337 if (!lwkt_serialize_try(&txr->tx_serialize)) 338 goto done; 339 340 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 341 (IFF_RUNNING | IFF_UP)) { 342 lwkt_serialize_exit(&txr->tx_serialize); 343 return; 344 } 345 igb_try_txgc(txr, IGB_TX_RUNNING_DEC); 346 347 lwkt_serialize_exit(&txr->tx_serialize); 348 done: 349 callout_reset(&txr->tx_gc_timer, 1, igb_txgc_timer, txr); 350 } 351 352 static __inline void 353 igb_free_txbuf(struct igb_tx_ring *txr, struct igb_tx_buf *txbuf) 354 { 355 356 KKASSERT(txbuf->m_head != NULL); 357 KKASSERT(txr->tx_nmbuf > 0); 358 txr->tx_nmbuf--; 359 360 bus_dmamap_unload(txr->tx_tag, txbuf->map); 361 m_freem(txbuf->m_head); 362 txbuf->m_head = NULL; 363 } 364 365 static __inline void 366 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 367 { 368 /* Ignore Checksum bit is set */ 369 if (staterr & E1000_RXD_STAT_IXSM) 370 return; 371 372 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 373 E1000_RXD_STAT_IPCS) 374 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 375 376 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 377 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 378 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 379 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 380 mp->m_pkthdr.csum_data = htons(0xffff); 381 } 382 } 383 } 384 385 static __inline struct pktinfo * 386 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 387 uint32_t hash, uint32_t hashtype, uint32_t staterr) 388 { 389 switch (hashtype) { 390 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 391 pi->pi_netisr = NETISR_IP; 392 pi->pi_flags = 0; 393 pi->pi_l3proto = IPPROTO_TCP; 394 break; 395 396 case E1000_RXDADV_RSSTYPE_IPV4: 397 if (staterr & E1000_RXD_STAT_IXSM) 398 return NULL; 399 400 if ((staterr & 401 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 402 E1000_RXD_STAT_TCPCS) { 403 pi->pi_netisr = NETISR_IP; 404 pi->pi_flags = 0; 405 pi->pi_l3proto = IPPROTO_UDP; 406 break; 407 } 408 /* FALL THROUGH */ 409 default: 410 return NULL; 411 } 412 413 m_sethash(m, toeplitz_hash(hash)); 414 return pi; 415 } 416 417 static int 418 igb_get_ring_max(const struct igb_softc *sc) 419 { 420 421 switch (sc->hw.mac.type) { 422 case e1000_82575: 423 return (IGB_MAX_RING_82575); 424 425 case e1000_82576: 426 return (IGB_MAX_RING_82576); 427 428 case e1000_82580: 429 return (IGB_MAX_RING_82580); 430 431 case e1000_i350: 432 return (IGB_MAX_RING_I350); 433 434 case e1000_i354: 435 return (IGB_MAX_RING_I354); 436 437 case e1000_i210: 438 return (IGB_MAX_RING_I210); 439 440 case e1000_i211: 441 return (IGB_MAX_RING_I211); 442 443 default: 444 return (IGB_MIN_RING); 445 } 446 } 447 448 static void 449 igb_get_rxring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 450 { 451 452 *ring_max = igb_get_ring_max(sc); 453 *ring_cnt = device_getenv_int(sc->dev, "rxr", igb_rxr); 454 } 455 456 static void 457 igb_get_txring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 458 { 459 460 *ring_max = igb_get_ring_max(sc); 461 *ring_cnt = device_getenv_int(sc->dev, "txr", igb_txr); 462 } 463 464 static int 465 igb_probe(device_t dev) 466 { 467 const struct igb_device *d; 468 uint16_t vid, did; 469 470 vid = pci_get_vendor(dev); 471 did = pci_get_device(dev); 472 473 for (d = igb_devices; d->desc != NULL; ++d) { 474 if (vid == d->vid && did == d->did) { 475 device_set_desc(dev, d->desc); 476 return 0; 477 } 478 } 479 return ENXIO; 480 } 481 482 static int 483 igb_attach(device_t dev) 484 { 485 struct igb_softc *sc = device_get_softc(dev); 486 uint16_t eeprom_data; 487 int error = 0, ring_max, ring_cnt; 488 char flowctrl[IFM_ETH_FC_STRLEN]; 489 490 #ifdef notyet 491 /* SYSCTL stuff */ 492 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 493 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 494 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 495 igb_sysctl_nvm_info, "I", "NVM Information"); 496 #endif 497 498 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 499 igb_media_change, igb_media_status); 500 callout_init_mp(&sc->timer); 501 lwkt_serialize_init(&sc->main_serialize); 502 503 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 504 device_get_unit(dev)); 505 sc->dev = sc->osdep.dev = dev; 506 507 /* Enable bus mastering */ 508 pci_enable_busmaster(dev); 509 510 /* 511 * Determine hardware and mac type 512 */ 513 sc->hw.vendor_id = pci_get_vendor(dev); 514 sc->hw.device_id = pci_get_device(dev); 515 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 516 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 517 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 518 519 if (e1000_set_mac_type(&sc->hw)) 520 return ENXIO; 521 522 /* Are we a VF device? */ 523 if (sc->hw.mac.type == e1000_vfadapt || 524 sc->hw.mac.type == e1000_vfadapt_i350) 525 sc->vf_ifp = 1; 526 else 527 sc->vf_ifp = 0; 528 529 /* 530 * Configure total supported RX/TX ring count 531 */ 532 igb_get_rxring_cnt(sc, &ring_cnt, &ring_max); 533 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 534 igb_get_txring_cnt(sc, &ring_cnt, &ring_max); 535 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 536 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 537 538 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 539 sc->rx_ring_inuse = sc->rx_ring_cnt; 540 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 541 sc->tx_ring_inuse = sc->tx_ring_cnt; 542 543 /* Setup flow control. */ 544 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 545 igb_flowctrl); 546 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 547 548 /* 549 * Allocate IO memory 550 */ 551 sc->mem_rid = PCIR_BAR(0); 552 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 553 RF_ACTIVE); 554 if (sc->mem_res == NULL) { 555 device_printf(dev, "Unable to allocate bus resource: memory\n"); 556 error = ENXIO; 557 goto failed; 558 } 559 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 560 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 561 562 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 563 564 /* Save PCI command register for Shared Code */ 565 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 566 sc->hw.back = &sc->osdep; 567 568 /* Do Shared Code initialization */ 569 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 570 device_printf(dev, "Setup of Shared code failed\n"); 571 error = ENXIO; 572 goto failed; 573 } 574 575 e1000_get_bus_info(&sc->hw); 576 577 sc->hw.mac.autoneg = DO_AUTO_NEG; 578 sc->hw.phy.autoneg_wait_to_complete = FALSE; 579 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 580 581 /* Copper options */ 582 if (sc->hw.phy.media_type == e1000_media_type_copper) { 583 sc->hw.phy.mdix = AUTO_ALL_MODES; 584 sc->hw.phy.disable_polarity_correction = FALSE; 585 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 586 } 587 588 /* Set the frame limits assuming standard ethernet sized frames. */ 589 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 590 591 /* Allocate RX/TX rings */ 592 error = igb_alloc_rings(sc); 593 if (error) 594 goto failed; 595 596 /* Allocate interrupt */ 597 error = igb_alloc_intr(sc); 598 if (error) 599 goto failed; 600 601 /* Setup serializes */ 602 igb_setup_serialize(sc); 603 604 /* Allocate the appropriate stats memory */ 605 if (sc->vf_ifp) { 606 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 607 M_WAITOK | M_ZERO); 608 igb_vf_init_stats(sc); 609 } else { 610 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 611 M_WAITOK | M_ZERO); 612 } 613 614 /* Allocate multicast array memory. */ 615 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 616 M_DEVBUF, M_WAITOK); 617 618 /* Some adapter-specific advanced features */ 619 if (sc->hw.mac.type >= e1000_i350) { 620 #ifdef notyet 621 igb_set_sysctl_value(adapter, "dma_coalesce", 622 "configure dma coalesce", 623 &adapter->dma_coalesce, igb_dma_coalesce); 624 igb_set_sysctl_value(adapter, "eee_disabled", 625 "enable Energy Efficient Ethernet", 626 &adapter->hw.dev_spec._82575.eee_disable, 627 igb_eee_disabled); 628 #else 629 sc->dma_coalesce = igb_dma_coalesce; 630 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 631 #endif 632 if (sc->hw.phy.media_type == e1000_media_type_copper) { 633 if (sc->hw.mac.type == e1000_i354) 634 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 635 else 636 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 637 } 638 } 639 640 /* 641 * Start from a known state, this is important in reading the nvm and 642 * mac from that. 643 */ 644 e1000_reset_hw(&sc->hw); 645 646 /* Make sure we have a good EEPROM before we read from it */ 647 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 648 e1000_validate_nvm_checksum(&sc->hw) < 0) { 649 /* 650 * Some PCI-E parts fail the first check due to 651 * the link being in sleep state, call it again, 652 * if it fails a second time its a real issue. 653 */ 654 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 655 device_printf(dev, 656 "The EEPROM Checksum Is Not Valid\n"); 657 error = EIO; 658 goto failed; 659 } 660 } 661 662 /* Copy the permanent MAC address out of the EEPROM */ 663 if (e1000_read_mac_addr(&sc->hw) < 0) { 664 device_printf(dev, "EEPROM read error while reading MAC" 665 " address\n"); 666 error = EIO; 667 goto failed; 668 } 669 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 670 device_printf(dev, "Invalid MAC address\n"); 671 error = EIO; 672 goto failed; 673 } 674 675 /* Setup OS specific network interface */ 676 igb_setup_ifp(sc); 677 678 /* Add sysctl tree, must after igb_setup_ifp() */ 679 igb_add_sysctl(sc); 680 681 /* Now get a good starting state */ 682 igb_reset(sc, FALSE); 683 684 /* Initialize statistics */ 685 igb_update_stats_counters(sc); 686 687 sc->hw.mac.get_link_status = 1; 688 igb_update_link_status(sc); 689 690 /* Indicate SOL/IDER usage */ 691 if (e1000_check_reset_block(&sc->hw)) { 692 device_printf(dev, 693 "PHY reset is blocked due to SOL/IDER session.\n"); 694 } 695 696 /* Determine if we have to control management hardware */ 697 if (e1000_enable_mng_pass_thru(&sc->hw)) 698 sc->flags |= IGB_FLAG_HAS_MGMT; 699 700 /* 701 * Setup Wake-on-Lan 702 */ 703 /* APME bit in EEPROM is mapped to WUC.APME */ 704 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 705 if (eeprom_data) { 706 /* XXX E1000_WUFC_MC always be cleared from E1000_WUC. */ 707 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 708 device_printf(dev, "has WOL\n"); 709 } 710 711 #ifdef notyet 712 /* Register for VLAN events */ 713 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 714 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 715 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 716 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 717 #endif 718 719 #ifdef notyet 720 igb_add_hw_stats(adapter); 721 #endif 722 723 /* 724 * Disable interrupt to prevent spurious interrupts (line based 725 * interrupt, MSI or even MSI-X), which had been observed on 726 * several types of LOMs, from being handled. 727 */ 728 igb_disable_intr(sc); 729 730 error = igb_setup_intr(sc); 731 if (error) { 732 ether_ifdetach(&sc->arpcom.ac_if); 733 goto failed; 734 } 735 return 0; 736 737 failed: 738 igb_detach(dev); 739 return error; 740 } 741 742 static int 743 igb_detach(device_t dev) 744 { 745 struct igb_softc *sc = device_get_softc(dev); 746 747 if (device_is_attached(dev)) { 748 struct ifnet *ifp = &sc->arpcom.ac_if; 749 750 ifnet_serialize_all(ifp); 751 752 igb_stop(sc); 753 754 e1000_phy_hw_reset(&sc->hw); 755 756 /* Give control back to firmware */ 757 igb_rel_mgmt(sc); 758 igb_rel_hw_control(sc); 759 igb_enable_wol(sc); 760 761 igb_teardown_intr(sc, sc->intr_cnt); 762 763 ifnet_deserialize_all(ifp); 764 765 ether_ifdetach(ifp); 766 } else if (sc->mem_res != NULL) { 767 igb_rel_hw_control(sc); 768 } 769 770 ifmedia_removeall(&sc->media); 771 bus_generic_detach(dev); 772 773 igb_free_intr(sc); 774 775 if (sc->msix_mem_res != NULL) { 776 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 777 sc->msix_mem_res); 778 } 779 if (sc->mem_res != NULL) { 780 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 781 sc->mem_res); 782 } 783 784 igb_free_rings(sc); 785 786 if (sc->mta != NULL) 787 kfree(sc->mta, M_DEVBUF); 788 if (sc->stats != NULL) 789 kfree(sc->stats, M_DEVBUF); 790 if (sc->serializes != NULL) 791 kfree(sc->serializes, M_DEVBUF); 792 if (sc->rx_rmap != NULL) 793 if_ringmap_free(sc->rx_rmap); 794 if (sc->rx_rmap_intr != NULL) 795 if_ringmap_free(sc->rx_rmap_intr); 796 if (sc->tx_rmap != NULL) 797 if_ringmap_free(sc->tx_rmap); 798 if (sc->tx_rmap_intr != NULL) 799 if_ringmap_free(sc->tx_rmap_intr); 800 801 return 0; 802 } 803 804 static int 805 igb_shutdown(device_t dev) 806 { 807 return igb_suspend(dev); 808 } 809 810 static int 811 igb_suspend(device_t dev) 812 { 813 struct igb_softc *sc = device_get_softc(dev); 814 struct ifnet *ifp = &sc->arpcom.ac_if; 815 816 ifnet_serialize_all(ifp); 817 818 igb_stop(sc); 819 820 igb_rel_mgmt(sc); 821 igb_rel_hw_control(sc); 822 igb_enable_wol(sc); 823 824 ifnet_deserialize_all(ifp); 825 826 return bus_generic_suspend(dev); 827 } 828 829 static int 830 igb_resume(device_t dev) 831 { 832 struct igb_softc *sc = device_get_softc(dev); 833 struct ifnet *ifp = &sc->arpcom.ac_if; 834 int i; 835 836 ifnet_serialize_all(ifp); 837 838 igb_init(sc); 839 igb_get_mgmt(sc); 840 841 for (i = 0; i < sc->tx_ring_inuse; ++i) 842 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 843 844 ifnet_deserialize_all(ifp); 845 846 return bus_generic_resume(dev); 847 } 848 849 static int 850 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 851 { 852 struct igb_softc *sc = ifp->if_softc; 853 struct ifreq *ifr = (struct ifreq *)data; 854 int max_frame_size, mask, reinit; 855 int error = 0; 856 857 ASSERT_IFNET_SERIALIZED_ALL(ifp); 858 859 switch (command) { 860 case SIOCSIFMTU: 861 max_frame_size = 9234; 862 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 863 ETHER_CRC_LEN) { 864 error = EINVAL; 865 break; 866 } 867 868 ifp->if_mtu = ifr->ifr_mtu; 869 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 870 ETHER_CRC_LEN; 871 872 if (ifp->if_flags & IFF_RUNNING) 873 igb_init(sc); 874 break; 875 876 case SIOCSIFFLAGS: 877 if (ifp->if_flags & IFF_UP) { 878 if (ifp->if_flags & IFF_RUNNING) { 879 if ((ifp->if_flags ^ sc->if_flags) & 880 (IFF_PROMISC | IFF_ALLMULTI)) { 881 igb_disable_promisc(sc); 882 igb_set_promisc(sc); 883 } 884 } else { 885 igb_init(sc); 886 } 887 } else if (ifp->if_flags & IFF_RUNNING) { 888 igb_stop(sc); 889 } 890 sc->if_flags = ifp->if_flags; 891 break; 892 893 case SIOCADDMULTI: 894 case SIOCDELMULTI: 895 if (ifp->if_flags & IFF_RUNNING) { 896 igb_disable_intr(sc); 897 igb_set_multi(sc); 898 #ifdef IFPOLL_ENABLE 899 if (!(ifp->if_flags & IFF_NPOLLING)) 900 #endif 901 igb_enable_intr(sc); 902 } 903 break; 904 905 case SIOCSIFMEDIA: 906 /* Check SOL/IDER usage */ 907 if (e1000_check_reset_block(&sc->hw)) { 908 if_printf(ifp, "Media change is " 909 "blocked due to SOL/IDER session.\n"); 910 break; 911 } 912 /* FALL THROUGH */ 913 914 case SIOCGIFMEDIA: 915 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 916 break; 917 918 case SIOCSIFCAP: 919 reinit = 0; 920 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 921 if (mask & IFCAP_RXCSUM) { 922 ifp->if_capenable ^= IFCAP_RXCSUM; 923 reinit = 1; 924 } 925 if (mask & IFCAP_VLAN_HWTAGGING) { 926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 927 reinit = 1; 928 } 929 if (mask & IFCAP_TXCSUM) { 930 ifp->if_capenable ^= IFCAP_TXCSUM; 931 if (ifp->if_capenable & IFCAP_TXCSUM) 932 ifp->if_hwassist |= IGB_CSUM_FEATURES; 933 else 934 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 935 } 936 if (mask & IFCAP_TSO) { 937 ifp->if_capenable ^= IFCAP_TSO; 938 if (ifp->if_capenable & IFCAP_TSO) 939 ifp->if_hwassist |= CSUM_TSO; 940 else 941 ifp->if_hwassist &= ~CSUM_TSO; 942 } 943 if (mask & IFCAP_RSS) 944 ifp->if_capenable ^= IFCAP_RSS; 945 if (reinit && (ifp->if_flags & IFF_RUNNING)) 946 igb_init(sc); 947 break; 948 949 default: 950 error = ether_ioctl(ifp, command, data); 951 break; 952 } 953 return error; 954 } 955 956 static void 957 igb_init(void *xsc) 958 { 959 struct igb_softc *sc = xsc; 960 struct ifnet *ifp = &sc->arpcom.ac_if; 961 boolean_t polling; 962 int i; 963 964 ASSERT_IFNET_SERIALIZED_ALL(ifp); 965 966 igb_stop(sc); 967 968 /* Get the latest mac address, User can use a LAA */ 969 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 970 971 /* Put the address into the Receive Address Array */ 972 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 973 974 igb_reset(sc, FALSE); 975 igb_update_link_status(sc); 976 977 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 978 979 /* Clear bad data from Rx FIFOs */ 980 e1000_rx_fifo_flush_82575(&sc->hw); 981 982 /* Configure for OS presence */ 983 igb_get_mgmt(sc); 984 985 polling = FALSE; 986 #ifdef IFPOLL_ENABLE 987 if (ifp->if_flags & IFF_NPOLLING) 988 polling = TRUE; 989 #endif 990 991 /* Configured used RX/TX rings */ 992 igb_set_ring_inuse(sc, polling); 993 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 994 995 /* Initialize interrupt */ 996 igb_init_intr(sc); 997 998 /* Prepare transmit descriptors and buffers */ 999 for (i = 0; i < sc->tx_ring_inuse; ++i) 1000 igb_init_tx_ring(&sc->tx_rings[i]); 1001 igb_init_tx_unit(sc); 1002 1003 /* Setup Multicast table */ 1004 igb_set_multi(sc); 1005 1006 #if 0 1007 /* 1008 * Figure out the desired mbuf pool 1009 * for doing jumbo/packetsplit 1010 */ 1011 if (adapter->max_frame_size <= 2048) 1012 adapter->rx_mbuf_sz = MCLBYTES; 1013 else if (adapter->max_frame_size <= 4096) 1014 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1015 else 1016 adapter->rx_mbuf_sz = MJUM9BYTES; 1017 #endif 1018 1019 /* Prepare receive descriptors and buffers */ 1020 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1021 int error; 1022 1023 error = igb_init_rx_ring(&sc->rx_rings[i]); 1024 if (error) { 1025 if_printf(ifp, "Could not setup receive structures\n"); 1026 igb_stop(sc); 1027 return; 1028 } 1029 } 1030 igb_init_rx_unit(sc, polling); 1031 1032 /* Enable VLAN support */ 1033 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1034 igb_set_vlan(sc); 1035 1036 /* Don't lose promiscuous settings */ 1037 igb_set_promisc(sc); 1038 1039 /* Clear counters */ 1040 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1041 1042 /* This clears any pending interrupts */ 1043 E1000_READ_REG(&sc->hw, E1000_ICR); 1044 1045 /* 1046 * Only enable interrupts if we are not polling, make sure 1047 * they are off otherwise. 1048 */ 1049 if (polling) { 1050 igb_disable_intr(sc); 1051 } else { 1052 igb_enable_intr(sc); 1053 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1054 } 1055 1056 /* Set Energy Efficient Ethernet */ 1057 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1058 if (sc->hw.mac.type == e1000_i354) 1059 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1060 else 1061 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1062 } 1063 1064 ifp->if_flags |= IFF_RUNNING; 1065 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1066 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1067 1068 ifsq_clr_oactive(txr->ifsq); 1069 ifsq_watchdog_start(&txr->tx_watchdog); 1070 1071 if (!polling) { 1072 callout_reset_bycpu(&txr->tx_gc_timer, 1, 1073 igb_txgc_timer, txr, txr->tx_intr_cpuid); 1074 } 1075 } 1076 1077 igb_set_timer_cpuid(sc, polling); 1078 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1079 } 1080 1081 static void 1082 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1083 { 1084 struct igb_softc *sc = ifp->if_softc; 1085 1086 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1087 1088 if ((ifp->if_flags & IFF_RUNNING) == 0) 1089 sc->hw.mac.get_link_status = 1; 1090 igb_update_link_status(sc); 1091 1092 ifmr->ifm_status = IFM_AVALID; 1093 ifmr->ifm_active = IFM_ETHER; 1094 1095 if (!sc->link_active) { 1096 if (sc->hw.mac.autoneg) 1097 ifmr->ifm_active |= IFM_NONE; 1098 else 1099 ifmr->ifm_active |= sc->media.ifm_media; 1100 return; 1101 } 1102 1103 ifmr->ifm_status |= IFM_ACTIVE; 1104 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1105 ifmr->ifm_active |= sc->ifm_flowctrl; 1106 1107 switch (sc->link_speed) { 1108 case 10: 1109 ifmr->ifm_active |= IFM_10_T; 1110 break; 1111 1112 case 100: 1113 /* 1114 * Support for 100Mb SFP - these are Fiber 1115 * but the media type appears as serdes 1116 */ 1117 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1118 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1119 ifmr->ifm_active |= IFM_100_FX; 1120 else 1121 ifmr->ifm_active |= IFM_100_TX; 1122 break; 1123 1124 case 1000: 1125 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1126 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1127 ifmr->ifm_active |= IFM_1000_SX; 1128 else 1129 ifmr->ifm_active |= IFM_1000_T; 1130 break; 1131 1132 case 2500: 1133 ifmr->ifm_active |= IFM_2500_SX; 1134 break; 1135 } 1136 1137 if (sc->link_duplex == FULL_DUPLEX) 1138 ifmr->ifm_active |= IFM_FDX; 1139 else 1140 ifmr->ifm_active |= IFM_HDX; 1141 1142 if (sc->link_duplex == FULL_DUPLEX) 1143 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1144 } 1145 1146 static int 1147 igb_media_change(struct ifnet *ifp) 1148 { 1149 struct igb_softc *sc = ifp->if_softc; 1150 struct ifmedia *ifm = &sc->media; 1151 1152 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1153 1154 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1155 return EINVAL; 1156 1157 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1158 case IFM_AUTO: 1159 sc->hw.mac.autoneg = DO_AUTO_NEG; 1160 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1161 break; 1162 1163 case IFM_1000_SX: 1164 case IFM_1000_T: 1165 sc->hw.mac.autoneg = DO_AUTO_NEG; 1166 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1167 break; 1168 1169 case IFM_100_TX: 1170 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1171 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1172 } else { 1173 if (IFM_OPTIONS(ifm->ifm_media) & 1174 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1175 if (bootverbose) { 1176 if_printf(ifp, "Flow control is not " 1177 "allowed for half-duplex\n"); 1178 } 1179 return EINVAL; 1180 } 1181 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1182 } 1183 sc->hw.mac.autoneg = FALSE; 1184 sc->hw.phy.autoneg_advertised = 0; 1185 break; 1186 1187 case IFM_10_T: 1188 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1189 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1190 } else { 1191 if (IFM_OPTIONS(ifm->ifm_media) & 1192 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1193 if (bootverbose) { 1194 if_printf(ifp, "Flow control is not " 1195 "allowed for half-duplex\n"); 1196 } 1197 return EINVAL; 1198 } 1199 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1200 } 1201 sc->hw.mac.autoneg = FALSE; 1202 sc->hw.phy.autoneg_advertised = 0; 1203 break; 1204 1205 default: 1206 if (bootverbose) { 1207 if_printf(ifp, "Unsupported media type %d\n", 1208 IFM_SUBTYPE(ifm->ifm_media)); 1209 } 1210 return EINVAL; 1211 } 1212 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1213 1214 if (ifp->if_flags & IFF_RUNNING) 1215 igb_init(sc); 1216 1217 return 0; 1218 } 1219 1220 static void 1221 igb_set_promisc(struct igb_softc *sc) 1222 { 1223 struct ifnet *ifp = &sc->arpcom.ac_if; 1224 struct e1000_hw *hw = &sc->hw; 1225 uint32_t reg; 1226 1227 if (sc->vf_ifp) { 1228 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1229 return; 1230 } 1231 1232 reg = E1000_READ_REG(hw, E1000_RCTL); 1233 if (ifp->if_flags & IFF_PROMISC) { 1234 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1235 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1236 } else if (ifp->if_flags & IFF_ALLMULTI) { 1237 reg |= E1000_RCTL_MPE; 1238 reg &= ~E1000_RCTL_UPE; 1239 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1240 } 1241 } 1242 1243 static void 1244 igb_disable_promisc(struct igb_softc *sc) 1245 { 1246 struct e1000_hw *hw = &sc->hw; 1247 struct ifnet *ifp = &sc->arpcom.ac_if; 1248 uint32_t reg; 1249 int mcnt = 0; 1250 1251 if (sc->vf_ifp) { 1252 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1253 return; 1254 } 1255 reg = E1000_READ_REG(hw, E1000_RCTL); 1256 reg &= ~E1000_RCTL_UPE; 1257 if (ifp->if_flags & IFF_ALLMULTI) { 1258 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1259 } else { 1260 struct ifmultiaddr *ifma; 1261 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1262 if (ifma->ifma_addr->sa_family != AF_LINK) 1263 continue; 1264 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1265 break; 1266 mcnt++; 1267 } 1268 } 1269 /* Don't disable if in MAX groups */ 1270 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1271 reg &= ~E1000_RCTL_MPE; 1272 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1273 } 1274 1275 static void 1276 igb_set_multi(struct igb_softc *sc) 1277 { 1278 struct ifnet *ifp = &sc->arpcom.ac_if; 1279 struct ifmultiaddr *ifma; 1280 uint32_t reg_rctl = 0; 1281 uint8_t *mta; 1282 int mcnt = 0; 1283 1284 mta = sc->mta; 1285 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1286 1287 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1288 if (ifma->ifma_addr->sa_family != AF_LINK) 1289 continue; 1290 1291 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1292 break; 1293 1294 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1295 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1296 mcnt++; 1297 } 1298 1299 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1300 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1301 reg_rctl |= E1000_RCTL_MPE; 1302 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1303 } else { 1304 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1305 } 1306 } 1307 1308 static void 1309 igb_timer(void *xsc) 1310 { 1311 struct igb_softc *sc = xsc; 1312 1313 lwkt_serialize_enter(&sc->main_serialize); 1314 1315 igb_update_link_status(sc); 1316 igb_update_stats_counters(sc); 1317 1318 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1319 1320 lwkt_serialize_exit(&sc->main_serialize); 1321 } 1322 1323 static void 1324 igb_update_link_status(struct igb_softc *sc) 1325 { 1326 struct ifnet *ifp = &sc->arpcom.ac_if; 1327 struct e1000_hw *hw = &sc->hw; 1328 uint32_t link_check, thstat, ctrl; 1329 1330 link_check = thstat = ctrl = 0; 1331 1332 /* Get the cached link value or read for real */ 1333 switch (hw->phy.media_type) { 1334 case e1000_media_type_copper: 1335 if (hw->mac.get_link_status) { 1336 /* Do the work to read phy */ 1337 e1000_check_for_link(hw); 1338 link_check = !hw->mac.get_link_status; 1339 } else { 1340 link_check = TRUE; 1341 } 1342 break; 1343 1344 case e1000_media_type_fiber: 1345 e1000_check_for_link(hw); 1346 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1347 break; 1348 1349 case e1000_media_type_internal_serdes: 1350 e1000_check_for_link(hw); 1351 link_check = hw->mac.serdes_has_link; 1352 break; 1353 1354 /* VF device is type_unknown */ 1355 case e1000_media_type_unknown: 1356 e1000_check_for_link(hw); 1357 link_check = !hw->mac.get_link_status; 1358 /* Fall thru */ 1359 default: 1360 break; 1361 } 1362 1363 /* Check for thermal downshift or shutdown */ 1364 if (hw->mac.type == e1000_i350) { 1365 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1366 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1367 } 1368 1369 /* Now we check if a transition has happened */ 1370 if (link_check && sc->link_active == 0) { 1371 e1000_get_speed_and_duplex(hw, 1372 &sc->link_speed, &sc->link_duplex); 1373 if (bootverbose) { 1374 char flowctrl[IFM_ETH_FC_STRLEN]; 1375 1376 /* Get the flow control for display */ 1377 e1000_fc2str(hw->fc.current_mode, flowctrl, 1378 sizeof(flowctrl)); 1379 1380 if_printf(ifp, "Link is up %d Mbps %s, " 1381 "Flow control: %s\n", 1382 sc->link_speed, 1383 sc->link_duplex == FULL_DUPLEX ? 1384 "Full Duplex" : "Half Duplex", 1385 flowctrl); 1386 } 1387 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1388 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1389 sc->link_active = 1; 1390 1391 ifp->if_baudrate = sc->link_speed * 1000000; 1392 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1393 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1394 if_printf(ifp, "Link: thermal downshift\n"); 1395 /* Delay Link Up for Phy update */ 1396 if ((hw->mac.type == e1000_i210 || 1397 hw->mac.type == e1000_i211) && 1398 hw->phy.id == I210_I_PHY_ID) 1399 msec_delay(IGB_I210_LINK_DELAY); 1400 /* 1401 * Reset if the media type changed. 1402 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1403 */ 1404 if (hw->dev_spec._82575.media_changed) { 1405 hw->dev_spec._82575.media_changed = FALSE; 1406 igb_reset(sc, TRUE); 1407 } 1408 /* This can sleep */ 1409 ifp->if_link_state = LINK_STATE_UP; 1410 if_link_state_change(ifp); 1411 } else if (!link_check && sc->link_active == 1) { 1412 ifp->if_baudrate = sc->link_speed = 0; 1413 sc->link_duplex = 0; 1414 if (bootverbose) 1415 if_printf(ifp, "Link is Down\n"); 1416 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1417 (thstat & E1000_THSTAT_PWR_DOWN)) 1418 if_printf(ifp, "Link: thermal shutdown\n"); 1419 sc->link_active = 0; 1420 /* This can sleep */ 1421 ifp->if_link_state = LINK_STATE_DOWN; 1422 if_link_state_change(ifp); 1423 } 1424 } 1425 1426 static void 1427 igb_stop(struct igb_softc *sc) 1428 { 1429 struct ifnet *ifp = &sc->arpcom.ac_if; 1430 int i; 1431 1432 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1433 1434 igb_disable_intr(sc); 1435 1436 callout_stop(&sc->timer); 1437 1438 ifp->if_flags &= ~IFF_RUNNING; 1439 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1440 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1441 1442 ifsq_clr_oactive(txr->ifsq); 1443 ifsq_watchdog_stop(&txr->tx_watchdog); 1444 txr->tx_flags &= ~IGB_TXFLAG_ENABLED; 1445 1446 txr->tx_running = 0; 1447 callout_stop(&txr->tx_gc_timer); 1448 } 1449 1450 e1000_reset_hw(&sc->hw); 1451 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1452 1453 e1000_led_off(&sc->hw); 1454 e1000_cleanup_led(&sc->hw); 1455 1456 for (i = 0; i < sc->tx_ring_cnt; ++i) 1457 igb_free_tx_ring(&sc->tx_rings[i]); 1458 for (i = 0; i < sc->rx_ring_cnt; ++i) 1459 igb_free_rx_ring(&sc->rx_rings[i]); 1460 } 1461 1462 static void 1463 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1464 { 1465 struct ifnet *ifp = &sc->arpcom.ac_if; 1466 struct e1000_hw *hw = &sc->hw; 1467 struct e1000_fc_info *fc = &hw->fc; 1468 uint32_t pba = 0; 1469 uint16_t hwm; 1470 1471 /* Let the firmware know the OS is in control */ 1472 igb_get_hw_control(sc); 1473 1474 /* 1475 * Packet Buffer Allocation (PBA) 1476 * Writing PBA sets the receive portion of the buffer 1477 * the remainder is used for the transmit buffer. 1478 */ 1479 switch (hw->mac.type) { 1480 case e1000_82575: 1481 pba = E1000_PBA_32K; 1482 break; 1483 1484 case e1000_82576: 1485 case e1000_vfadapt: 1486 pba = E1000_READ_REG(hw, E1000_RXPBS); 1487 pba &= E1000_RXPBS_SIZE_MASK_82576; 1488 break; 1489 1490 case e1000_82580: 1491 case e1000_i350: 1492 case e1000_i354: 1493 case e1000_vfadapt_i350: 1494 pba = E1000_READ_REG(hw, E1000_RXPBS); 1495 pba = e1000_rxpbs_adjust_82580(pba); 1496 break; 1497 1498 case e1000_i210: 1499 case e1000_i211: 1500 pba = E1000_PBA_34K; 1501 break; 1502 1503 default: 1504 break; 1505 } 1506 1507 /* Special needs in case of Jumbo frames */ 1508 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1509 uint32_t tx_space, min_tx, min_rx; 1510 1511 pba = E1000_READ_REG(hw, E1000_PBA); 1512 tx_space = pba >> 16; 1513 pba &= 0xffff; 1514 1515 min_tx = (sc->max_frame_size + 1516 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1517 min_tx = roundup2(min_tx, 1024); 1518 min_tx >>= 10; 1519 min_rx = sc->max_frame_size; 1520 min_rx = roundup2(min_rx, 1024); 1521 min_rx >>= 10; 1522 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1523 pba = pba - (min_tx - tx_space); 1524 /* 1525 * if short on rx space, rx wins 1526 * and must trump tx adjustment 1527 */ 1528 if (pba < min_rx) 1529 pba = min_rx; 1530 } 1531 E1000_WRITE_REG(hw, E1000_PBA, pba); 1532 } 1533 1534 /* 1535 * These parameters control the automatic generation (Tx) and 1536 * response (Rx) to Ethernet PAUSE frames. 1537 * - High water mark should allow for at least two frames to be 1538 * received after sending an XOFF. 1539 * - Low water mark works best when it is very near the high water mark. 1540 * This allows the receiver to restart by sending XON when it has 1541 * drained a bit. 1542 */ 1543 hwm = min(((pba << 10) * 9 / 10), 1544 ((pba << 10) - 2 * sc->max_frame_size)); 1545 1546 if (hw->mac.type < e1000_82576) { 1547 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1548 fc->low_water = fc->high_water - 8; 1549 } else { 1550 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1551 fc->low_water = fc->high_water - 16; 1552 } 1553 fc->pause_time = IGB_FC_PAUSE_TIME; 1554 fc->send_xon = TRUE; 1555 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1556 1557 /* Issue a global reset */ 1558 e1000_reset_hw(hw); 1559 E1000_WRITE_REG(hw, E1000_WUC, 0); 1560 1561 /* Reset for AutoMediaDetect */ 1562 if (media_reset) { 1563 e1000_setup_init_funcs(hw, TRUE); 1564 e1000_get_bus_info(hw); 1565 } 1566 1567 if (e1000_init_hw(hw) < 0) 1568 if_printf(ifp, "Hardware Initialization Failed\n"); 1569 1570 /* Setup DMA Coalescing */ 1571 igb_init_dmac(sc, pba); 1572 1573 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1574 e1000_get_phy_info(hw); 1575 e1000_check_for_link(hw); 1576 } 1577 1578 static void 1579 igb_setup_ifp(struct igb_softc *sc) 1580 { 1581 struct ifnet *ifp = &sc->arpcom.ac_if; 1582 int i; 1583 1584 ifp->if_softc = sc; 1585 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1586 ifp->if_init = igb_init; 1587 ifp->if_ioctl = igb_ioctl; 1588 ifp->if_start = igb_start; 1589 ifp->if_serialize = igb_serialize; 1590 ifp->if_deserialize = igb_deserialize; 1591 ifp->if_tryserialize = igb_tryserialize; 1592 #ifdef INVARIANTS 1593 ifp->if_serialize_assert = igb_serialize_assert; 1594 #endif 1595 #ifdef IFPOLL_ENABLE 1596 ifp->if_npoll = igb_npoll; 1597 #endif 1598 1599 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1600 1601 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1602 ifq_set_ready(&ifp->if_snd); 1603 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1604 1605 ifp->if_mapsubq = ifq_mapsubq_modulo; 1606 ifq_set_subq_divisor(&ifp->if_snd, 1); 1607 1608 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1609 1610 ifp->if_capabilities = 1611 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1612 if (IGB_ENABLE_HWRSS(sc)) 1613 ifp->if_capabilities |= IFCAP_RSS; 1614 ifp->if_capenable = ifp->if_capabilities; 1615 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1616 1617 /* 1618 * Tell the upper layer(s) we support long frames 1619 */ 1620 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1621 1622 /* Setup TX rings and subqueues */ 1623 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1624 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1625 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1626 1627 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1628 ifsq_set_priv(ifsq, txr); 1629 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1630 txr->ifsq = ifsq; 1631 1632 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1633 } 1634 1635 /* 1636 * Specify the media types supported by this adapter and register 1637 * callbacks to update media and link information 1638 */ 1639 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1640 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1641 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1642 0, NULL); 1643 } else { 1644 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1645 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1646 0, NULL); 1647 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1648 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1649 0, NULL); 1650 if (sc->hw.phy.type != e1000_phy_ife) { 1651 ifmedia_add(&sc->media, 1652 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1653 } 1654 } 1655 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1656 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1657 } 1658 1659 static void 1660 igb_add_sysctl(struct igb_softc *sc) 1661 { 1662 struct sysctl_ctx_list *ctx; 1663 struct sysctl_oid *tree; 1664 char node[32]; 1665 int i; 1666 1667 ctx = device_get_sysctl_ctx(sc->dev); 1668 tree = device_get_sysctl_tree(sc->dev); 1669 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1670 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1671 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1672 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1673 "# of RX rings used"); 1674 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1675 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1676 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1677 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1678 "# of TX rings used"); 1679 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1680 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1681 "# of RX descs"); 1682 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1683 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1684 "# of TX descs"); 1685 1686 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1687 do { \ 1688 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1689 #use " interrupt rate"); \ 1690 } while (0) 1691 1692 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1693 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1694 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1695 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1696 1697 #undef IGB_ADD_INTR_RATE_SYSCTL 1698 1699 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1700 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1701 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1702 "# of segments per TX interrupt"); 1703 1704 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1705 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1706 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1707 "# of segments sent before write to hardware register"); 1708 1709 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1710 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1711 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1712 "# of segments received before write to hardware register"); 1713 1714 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 1715 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1716 OID_AUTO, "tx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1717 sc->tx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1718 "TX MSI-X CPU map"); 1719 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1720 OID_AUTO, "rx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1721 sc->rx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1722 "RX MSI-X CPU map"); 1723 } 1724 #ifdef IFPOLL_ENABLE 1725 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1726 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1727 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1728 "TX polling CPU map"); 1729 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1730 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1731 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1732 "RX polling CPU map"); 1733 #endif 1734 1735 #ifdef IGB_RSS_DEBUG 1736 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1737 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1738 "RSS debug level"); 1739 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1740 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1741 SYSCTL_ADD_ULONG(ctx, 1742 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1743 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1744 } 1745 #endif 1746 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1747 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1748 1749 #ifdef IGB_TSS_DEBUG 1750 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1751 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1752 CTLFLAG_RW, &txr->tx_packets, "TXed packets"); 1753 #endif 1754 ksnprintf(node, sizeof(node), "tx%d_nmbuf", i); 1755 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1756 CTLFLAG_RD, &txr->tx_nmbuf, 0, "# of pending TX mbufs"); 1757 1758 ksnprintf(node, sizeof(node), "tx%d_gc", i); 1759 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1760 CTLFLAG_RW, &txr->tx_gc, "# of TX desc GC"); 1761 } 1762 1763 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1764 OID_AUTO, "dumpreg", CTLTYPE_INT | CTLFLAG_RW, 1765 sc, 0, igb_sysctl_reg_dump, "I", "dump registers"); 1766 } 1767 1768 static int 1769 igb_alloc_rings(struct igb_softc *sc) 1770 { 1771 int error, i; 1772 1773 /* 1774 * Create top level busdma tag 1775 */ 1776 error = bus_dma_tag_create(NULL, 1, 0, 1777 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1778 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1779 &sc->parent_tag); 1780 if (error) { 1781 device_printf(sc->dev, "could not create top level DMA tag\n"); 1782 return error; 1783 } 1784 1785 /* 1786 * Allocate TX descriptor rings and buffers 1787 */ 1788 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1789 M_DEVBUF, 1790 M_WAITOK | M_ZERO | M_CACHEALIGN); 1791 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1792 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1793 1794 /* Set up some basics */ 1795 txr->sc = sc; 1796 txr->me = i; 1797 txr->tx_intr_cpuid = -1; 1798 lwkt_serialize_init(&txr->tx_serialize); 1799 callout_init_mp(&txr->tx_gc_timer); 1800 1801 error = igb_create_tx_ring(txr); 1802 if (error) 1803 return error; 1804 } 1805 1806 /* 1807 * Allocate RX descriptor rings and buffers 1808 */ 1809 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1810 M_DEVBUF, 1811 M_WAITOK | M_ZERO | M_CACHEALIGN); 1812 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1813 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1814 1815 /* Set up some basics */ 1816 rxr->sc = sc; 1817 rxr->me = i; 1818 lwkt_serialize_init(&rxr->rx_serialize); 1819 1820 error = igb_create_rx_ring(rxr); 1821 if (error) 1822 return error; 1823 } 1824 1825 return 0; 1826 } 1827 1828 static void 1829 igb_free_rings(struct igb_softc *sc) 1830 { 1831 int i; 1832 1833 if (sc->tx_rings != NULL) { 1834 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1835 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1836 1837 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1838 } 1839 kfree(sc->tx_rings, M_DEVBUF); 1840 } 1841 1842 if (sc->rx_rings != NULL) { 1843 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1844 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1845 1846 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1847 } 1848 kfree(sc->rx_rings, M_DEVBUF); 1849 } 1850 } 1851 1852 static int 1853 igb_create_tx_ring(struct igb_tx_ring *txr) 1854 { 1855 int tsize, error, i, ntxd; 1856 1857 /* 1858 * Validate number of transmit descriptors. It must not exceed 1859 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1860 */ 1861 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1862 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1863 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1864 device_printf(txr->sc->dev, 1865 "Using %d TX descriptors instead of %d!\n", 1866 IGB_DEFAULT_TXD, ntxd); 1867 txr->num_tx_desc = IGB_DEFAULT_TXD; 1868 } else { 1869 txr->num_tx_desc = ntxd; 1870 } 1871 1872 /* 1873 * Allocate TX descriptor ring 1874 */ 1875 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1876 IGB_DBA_ALIGN); 1877 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1878 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1879 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1880 if (txr->txdma.dma_vaddr == NULL) { 1881 device_printf(txr->sc->dev, 1882 "Unable to allocate TX Descriptor memory\n"); 1883 return ENOMEM; 1884 } 1885 txr->tx_base = txr->txdma.dma_vaddr; 1886 bzero(txr->tx_base, tsize); 1887 1888 tsize = __VM_CACHELINE_ALIGN( 1889 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1890 txr->tx_buf = kmalloc(tsize, M_DEVBUF, 1891 M_WAITOK | M_ZERO | M_CACHEALIGN); 1892 1893 /* 1894 * Allocate TX head write-back buffer 1895 */ 1896 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1897 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1898 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1899 if (txr->tx_hdr == NULL) { 1900 device_printf(txr->sc->dev, 1901 "Unable to allocate TX head write-back buffer\n"); 1902 return ENOMEM; 1903 } 1904 1905 /* 1906 * Create DMA tag for TX buffers 1907 */ 1908 error = bus_dma_tag_create(txr->sc->parent_tag, 1909 1, 0, /* alignment, bounds */ 1910 BUS_SPACE_MAXADDR, /* lowaddr */ 1911 BUS_SPACE_MAXADDR, /* highaddr */ 1912 NULL, NULL, /* filter, filterarg */ 1913 IGB_TSO_SIZE, /* maxsize */ 1914 IGB_MAX_SCATTER, /* nsegments */ 1915 PAGE_SIZE, /* maxsegsize */ 1916 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1917 BUS_DMA_ONEBPAGE, /* flags */ 1918 &txr->tx_tag); 1919 if (error) { 1920 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1921 kfree(txr->tx_buf, M_DEVBUF); 1922 txr->tx_buf = NULL; 1923 return error; 1924 } 1925 1926 /* 1927 * Create DMA maps for TX buffers 1928 */ 1929 for (i = 0; i < txr->num_tx_desc; ++i) { 1930 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1931 1932 error = bus_dmamap_create(txr->tx_tag, 1933 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1934 if (error) { 1935 device_printf(txr->sc->dev, 1936 "Unable to create TX DMA map\n"); 1937 igb_destroy_tx_ring(txr, i); 1938 return error; 1939 } 1940 } 1941 1942 if (txr->sc->hw.mac.type == e1000_82575) 1943 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1944 1945 /* 1946 * Initialize various watermark 1947 */ 1948 if (txr->sc->hw.mac.type == e1000_82575) { 1949 /* 1950 * There no ways to GC pending TX mbufs in 'header 1951 * write back' mode with reduced # of RS TX descs, 1952 * since TDH does _not_ move for 82575. 1953 */ 1954 txr->intr_nsegs = 1; 1955 } else { 1956 txr->intr_nsegs = txr->num_tx_desc / 16; 1957 } 1958 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1959 1960 return 0; 1961 } 1962 1963 static void 1964 igb_free_tx_ring(struct igb_tx_ring *txr) 1965 { 1966 int i; 1967 1968 for (i = 0; i < txr->num_tx_desc; ++i) { 1969 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1970 1971 if (txbuf->m_head != NULL) 1972 igb_free_txbuf(txr, txbuf); 1973 } 1974 } 1975 1976 static void 1977 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1978 { 1979 int i; 1980 1981 if (txr->txdma.dma_vaddr != NULL) { 1982 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1983 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1984 txr->txdma.dma_map); 1985 bus_dma_tag_destroy(txr->txdma.dma_tag); 1986 txr->txdma.dma_vaddr = NULL; 1987 } 1988 1989 if (txr->tx_hdr != NULL) { 1990 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1991 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1992 txr->tx_hdr_dmap); 1993 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1994 txr->tx_hdr = NULL; 1995 } 1996 1997 if (txr->tx_buf == NULL) 1998 return; 1999 2000 for (i = 0; i < ndesc; ++i) { 2001 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 2002 2003 KKASSERT(txbuf->m_head == NULL); 2004 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 2005 } 2006 bus_dma_tag_destroy(txr->tx_tag); 2007 2008 kfree(txr->tx_buf, M_DEVBUF); 2009 txr->tx_buf = NULL; 2010 } 2011 2012 static void 2013 igb_init_tx_ring(struct igb_tx_ring *txr) 2014 { 2015 /* Clear the old descriptor contents */ 2016 bzero(txr->tx_base, 2017 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 2018 2019 /* Clear TX head write-back buffer */ 2020 *(txr->tx_hdr) = 0; 2021 2022 /* Reset indices */ 2023 txr->next_avail_desc = 0; 2024 txr->next_to_clean = 0; 2025 txr->tx_nsegs = 0; 2026 txr->tx_running = 0; 2027 txr->tx_nmbuf = 0; 2028 2029 /* Set number of descriptors available */ 2030 txr->tx_avail = txr->num_tx_desc; 2031 2032 /* Enable this TX ring */ 2033 txr->tx_flags |= IGB_TXFLAG_ENABLED; 2034 } 2035 2036 static void 2037 igb_init_tx_unit(struct igb_softc *sc) 2038 { 2039 struct e1000_hw *hw = &sc->hw; 2040 uint32_t tctl; 2041 int i; 2042 2043 /* Setup the Tx Descriptor Rings */ 2044 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2045 struct igb_tx_ring *txr = &sc->tx_rings[i]; 2046 uint64_t bus_addr = txr->txdma.dma_paddr; 2047 uint64_t hdr_paddr = txr->tx_hdr_paddr; 2048 uint32_t txdctl = 0; 2049 uint32_t dca_txctrl; 2050 2051 E1000_WRITE_REG(hw, E1000_TDLEN(i), 2052 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 2053 E1000_WRITE_REG(hw, E1000_TDBAH(i), 2054 (uint32_t)(bus_addr >> 32)); 2055 E1000_WRITE_REG(hw, E1000_TDBAL(i), 2056 (uint32_t)bus_addr); 2057 2058 /* Setup the HW Tx Head and Tail descriptor pointers */ 2059 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 2060 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 2061 2062 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 2063 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2064 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 2065 2066 /* 2067 * Don't set WB_on_EITR: 2068 * - 82575 does not have it 2069 * - It almost has no effect on 82576, see: 2070 * 82576 specification update errata #26 2071 * - It causes unnecessary bus traffic 2072 */ 2073 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 2074 (uint32_t)(hdr_paddr >> 32)); 2075 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 2076 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 2077 2078 /* 2079 * WTHRESH is ignored by the hardware, since header 2080 * write back mode is used. 2081 */ 2082 txdctl |= IGB_TX_PTHRESH; 2083 txdctl |= IGB_TX_HTHRESH << 8; 2084 txdctl |= IGB_TX_WTHRESH << 16; 2085 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2086 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2087 } 2088 2089 if (sc->vf_ifp) 2090 return; 2091 2092 e1000_config_collision_dist(hw); 2093 2094 /* Program the Transmit Control Register */ 2095 tctl = E1000_READ_REG(hw, E1000_TCTL); 2096 tctl &= ~E1000_TCTL_CT; 2097 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2098 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2099 2100 /* This write will effectively turn on the transmit unit. */ 2101 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2102 } 2103 2104 static boolean_t 2105 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2106 { 2107 struct e1000_adv_tx_context_desc *TXD; 2108 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2109 int ehdrlen, ctxd, ip_hlen = 0; 2110 boolean_t offload = TRUE; 2111 2112 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2113 offload = FALSE; 2114 2115 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2116 2117 ctxd = txr->next_avail_desc; 2118 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2119 2120 /* 2121 * In advanced descriptors the vlan tag must 2122 * be placed into the context descriptor, thus 2123 * we need to be here just for that setup. 2124 */ 2125 if (mp->m_flags & M_VLANTAG) { 2126 uint16_t vlantag; 2127 2128 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2129 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2130 } else if (!offload) { 2131 return FALSE; 2132 } 2133 2134 ehdrlen = mp->m_pkthdr.csum_lhlen; 2135 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2136 2137 /* Set the ether header length */ 2138 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2139 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2140 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2141 ip_hlen = mp->m_pkthdr.csum_iphlen; 2142 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2143 } 2144 vlan_macip_lens |= ip_hlen; 2145 2146 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2147 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2148 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2149 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2150 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2151 2152 /* 2153 * 82575 needs the TX context index added; the queue 2154 * index is used as TX context index here. 2155 */ 2156 if (txr->sc->hw.mac.type == e1000_82575) 2157 mss_l4len_idx = txr->me << 4; 2158 2159 /* Now copy bits into descriptor */ 2160 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2161 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2162 TXD->seqnum_seed = htole32(0); 2163 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2164 2165 /* We've consumed the first desc, adjust counters */ 2166 if (++ctxd == txr->num_tx_desc) 2167 ctxd = 0; 2168 txr->next_avail_desc = ctxd; 2169 --txr->tx_avail; 2170 2171 return offload; 2172 } 2173 2174 static void 2175 igb_txeof(struct igb_tx_ring *txr, int hdr) 2176 { 2177 int first, avail; 2178 2179 if (txr->tx_avail == txr->num_tx_desc) 2180 return; 2181 2182 first = txr->next_to_clean; 2183 if (first == hdr) 2184 return; 2185 2186 avail = txr->tx_avail; 2187 while (first != hdr) { 2188 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2189 2190 KKASSERT(avail < txr->num_tx_desc); 2191 ++avail; 2192 2193 if (txbuf->m_head) 2194 igb_free_txbuf(txr, txbuf); 2195 2196 if (++first == txr->num_tx_desc) 2197 first = 0; 2198 } 2199 txr->next_to_clean = first; 2200 txr->tx_avail = avail; 2201 2202 /* 2203 * If we have a minimum free, clear OACTIVE 2204 * to tell the stack that it is OK to send packets. 2205 */ 2206 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2207 ifsq_clr_oactive(txr->ifsq); 2208 2209 /* 2210 * We have enough TX descriptors, turn off 2211 * the watchdog. We allow small amount of 2212 * packets (roughly intr_nsegs) pending on 2213 * the transmit ring. 2214 */ 2215 txr->tx_watchdog.wd_timer = 0; 2216 } 2217 txr->tx_running = IGB_TX_RUNNING; 2218 } 2219 2220 static void 2221 igb_txgc(struct igb_tx_ring *txr) 2222 { 2223 int first, hdr; 2224 #ifdef INVARIANTS 2225 int avail; 2226 #endif 2227 2228 if (txr->tx_avail == txr->num_tx_desc) 2229 return; 2230 2231 hdr = E1000_READ_REG(&txr->sc->hw, E1000_TDH(txr->me)), 2232 first = txr->next_to_clean; 2233 if (first == hdr) 2234 goto done; 2235 txr->tx_gc++; 2236 2237 #ifdef INVARIANTS 2238 avail = txr->tx_avail; 2239 #endif 2240 while (first != hdr) { 2241 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2242 2243 #ifdef INVARIANTS 2244 KKASSERT(avail < txr->num_tx_desc); 2245 ++avail; 2246 #endif 2247 if (txbuf->m_head) 2248 igb_free_txbuf(txr, txbuf); 2249 2250 if (++first == txr->num_tx_desc) 2251 first = 0; 2252 } 2253 done: 2254 if (txr->tx_nmbuf) 2255 txr->tx_running = IGB_TX_RUNNING; 2256 } 2257 2258 static int 2259 igb_create_rx_ring(struct igb_rx_ring *rxr) 2260 { 2261 int rsize, i, error, nrxd; 2262 2263 /* 2264 * Validate number of receive descriptors. It must not exceed 2265 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2266 */ 2267 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2268 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2269 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2270 device_printf(rxr->sc->dev, 2271 "Using %d RX descriptors instead of %d!\n", 2272 IGB_DEFAULT_RXD, nrxd); 2273 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2274 } else { 2275 rxr->num_rx_desc = nrxd; 2276 } 2277 2278 /* 2279 * Allocate RX descriptor ring 2280 */ 2281 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2282 IGB_DBA_ALIGN); 2283 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2284 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2285 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2286 &rxr->rxdma.dma_paddr); 2287 if (rxr->rxdma.dma_vaddr == NULL) { 2288 device_printf(rxr->sc->dev, 2289 "Unable to allocate RxDescriptor memory\n"); 2290 return ENOMEM; 2291 } 2292 rxr->rx_base = rxr->rxdma.dma_vaddr; 2293 bzero(rxr->rx_base, rsize); 2294 2295 rsize = __VM_CACHELINE_ALIGN( 2296 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2297 rxr->rx_buf = kmalloc(rsize, M_DEVBUF, 2298 M_WAITOK | M_ZERO | M_CACHEALIGN); 2299 2300 /* 2301 * Create DMA tag for RX buffers 2302 */ 2303 error = bus_dma_tag_create(rxr->sc->parent_tag, 2304 1, 0, /* alignment, bounds */ 2305 BUS_SPACE_MAXADDR, /* lowaddr */ 2306 BUS_SPACE_MAXADDR, /* highaddr */ 2307 NULL, NULL, /* filter, filterarg */ 2308 MCLBYTES, /* maxsize */ 2309 1, /* nsegments */ 2310 MCLBYTES, /* maxsegsize */ 2311 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2312 &rxr->rx_tag); 2313 if (error) { 2314 device_printf(rxr->sc->dev, 2315 "Unable to create RX payload DMA tag\n"); 2316 kfree(rxr->rx_buf, M_DEVBUF); 2317 rxr->rx_buf = NULL; 2318 return error; 2319 } 2320 2321 /* 2322 * Create spare DMA map for RX buffers 2323 */ 2324 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2325 &rxr->rx_sparemap); 2326 if (error) { 2327 device_printf(rxr->sc->dev, 2328 "Unable to create spare RX DMA maps\n"); 2329 bus_dma_tag_destroy(rxr->rx_tag); 2330 kfree(rxr->rx_buf, M_DEVBUF); 2331 rxr->rx_buf = NULL; 2332 return error; 2333 } 2334 2335 /* 2336 * Create DMA maps for RX buffers 2337 */ 2338 for (i = 0; i < rxr->num_rx_desc; i++) { 2339 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2340 2341 error = bus_dmamap_create(rxr->rx_tag, 2342 BUS_DMA_WAITOK, &rxbuf->map); 2343 if (error) { 2344 device_printf(rxr->sc->dev, 2345 "Unable to create RX DMA maps\n"); 2346 igb_destroy_rx_ring(rxr, i); 2347 return error; 2348 } 2349 } 2350 2351 /* 2352 * Initialize various watermark 2353 */ 2354 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2355 2356 return 0; 2357 } 2358 2359 static void 2360 igb_free_rx_ring(struct igb_rx_ring *rxr) 2361 { 2362 int i; 2363 2364 for (i = 0; i < rxr->num_rx_desc; ++i) { 2365 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2366 2367 if (rxbuf->m_head != NULL) { 2368 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2369 m_freem(rxbuf->m_head); 2370 rxbuf->m_head = NULL; 2371 } 2372 } 2373 2374 if (rxr->fmp != NULL) 2375 m_freem(rxr->fmp); 2376 rxr->fmp = NULL; 2377 rxr->lmp = NULL; 2378 } 2379 2380 static void 2381 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2382 { 2383 int i; 2384 2385 if (rxr->rxdma.dma_vaddr != NULL) { 2386 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2387 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2388 rxr->rxdma.dma_map); 2389 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2390 rxr->rxdma.dma_vaddr = NULL; 2391 } 2392 2393 if (rxr->rx_buf == NULL) 2394 return; 2395 2396 for (i = 0; i < ndesc; ++i) { 2397 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2398 2399 KKASSERT(rxbuf->m_head == NULL); 2400 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2401 } 2402 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2403 bus_dma_tag_destroy(rxr->rx_tag); 2404 2405 kfree(rxr->rx_buf, M_DEVBUF); 2406 rxr->rx_buf = NULL; 2407 } 2408 2409 static void 2410 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2411 { 2412 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2413 rxd->wb.upper.status_error = 0; 2414 } 2415 2416 static int 2417 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2418 { 2419 struct mbuf *m; 2420 bus_dma_segment_t seg; 2421 bus_dmamap_t map; 2422 struct igb_rx_buf *rxbuf; 2423 int error, nseg; 2424 2425 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2426 if (m == NULL) { 2427 if (wait) { 2428 if_printf(&rxr->sc->arpcom.ac_if, 2429 "Unable to allocate RX mbuf\n"); 2430 } 2431 return ENOBUFS; 2432 } 2433 m->m_len = m->m_pkthdr.len = MCLBYTES; 2434 2435 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2436 m_adj(m, ETHER_ALIGN); 2437 2438 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2439 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2440 if (error) { 2441 m_freem(m); 2442 if (wait) { 2443 if_printf(&rxr->sc->arpcom.ac_if, 2444 "Unable to load RX mbuf\n"); 2445 } 2446 return error; 2447 } 2448 2449 rxbuf = &rxr->rx_buf[i]; 2450 if (rxbuf->m_head != NULL) 2451 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2452 2453 map = rxbuf->map; 2454 rxbuf->map = rxr->rx_sparemap; 2455 rxr->rx_sparemap = map; 2456 2457 rxbuf->m_head = m; 2458 rxbuf->paddr = seg.ds_addr; 2459 2460 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2461 return 0; 2462 } 2463 2464 static int 2465 igb_init_rx_ring(struct igb_rx_ring *rxr) 2466 { 2467 int i; 2468 2469 /* Clear the ring contents */ 2470 bzero(rxr->rx_base, 2471 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2472 2473 /* Now replenish the ring mbufs */ 2474 for (i = 0; i < rxr->num_rx_desc; ++i) { 2475 int error; 2476 2477 error = igb_newbuf(rxr, i, TRUE); 2478 if (error) 2479 return error; 2480 } 2481 2482 /* Setup our descriptor indices */ 2483 rxr->next_to_check = 0; 2484 2485 rxr->fmp = NULL; 2486 rxr->lmp = NULL; 2487 rxr->discard = FALSE; 2488 2489 return 0; 2490 } 2491 2492 static void 2493 igb_init_rx_unit(struct igb_softc *sc, boolean_t polling) 2494 { 2495 struct ifnet *ifp = &sc->arpcom.ac_if; 2496 struct e1000_hw *hw = &sc->hw; 2497 uint32_t rctl, rxcsum, srrctl = 0; 2498 int i; 2499 2500 /* 2501 * Make sure receives are disabled while setting 2502 * up the descriptor ring 2503 */ 2504 rctl = E1000_READ_REG(hw, E1000_RCTL); 2505 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2506 2507 #if 0 2508 /* 2509 ** Set up for header split 2510 */ 2511 if (igb_header_split) { 2512 /* Use a standard mbuf for the header */ 2513 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2514 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2515 } else 2516 #endif 2517 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2518 2519 /* 2520 ** Set up for jumbo frames 2521 */ 2522 if (ifp->if_mtu > ETHERMTU) { 2523 rctl |= E1000_RCTL_LPE; 2524 #if 0 2525 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2526 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2527 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2528 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2529 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2530 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2531 } 2532 /* Set maximum packet len */ 2533 psize = adapter->max_frame_size; 2534 /* are we on a vlan? */ 2535 if (adapter->ifp->if_vlantrunk != NULL) 2536 psize += VLAN_TAG_SIZE; 2537 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2538 #else 2539 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2540 rctl |= E1000_RCTL_SZ_2048; 2541 #endif 2542 } else { 2543 rctl &= ~E1000_RCTL_LPE; 2544 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2545 rctl |= E1000_RCTL_SZ_2048; 2546 } 2547 2548 /* 2549 * If TX flow control is disabled and more the 1 RX rings 2550 * are enabled, enable DROP. 2551 * 2552 * This drops frames rather than hanging the RX MAC for all 2553 * RX rings. 2554 */ 2555 if (sc->rx_ring_inuse > 1 && 2556 (sc->ifm_flowctrl & IFM_ETH_TXPAUSE) == 0) { 2557 srrctl |= E1000_SRRCTL_DROP_EN; 2558 if (bootverbose) 2559 if_printf(ifp, "enable RX drop\n"); 2560 } 2561 2562 /* Setup the Base and Length of the Rx Descriptor Rings */ 2563 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2564 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2565 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2566 uint32_t rxdctl; 2567 2568 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2569 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2570 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2571 (uint32_t)(bus_addr >> 32)); 2572 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2573 (uint32_t)bus_addr); 2574 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2575 /* Enable this Queue */ 2576 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2577 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2578 rxdctl &= 0xFFF00000; 2579 rxdctl |= IGB_RX_PTHRESH; 2580 rxdctl |= IGB_RX_HTHRESH << 8; 2581 /* 2582 * Don't set WTHRESH to a value above 1 on 82576, see: 2583 * 82576 specification update errata #26 2584 */ 2585 rxdctl |= IGB_RX_WTHRESH << 16; 2586 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2587 } 2588 2589 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2590 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2591 2592 /* 2593 * Receive Checksum Offload for TCP and UDP 2594 * 2595 * Checksum offloading is also enabled if multiple receive 2596 * queue is to be supported, since we need it to figure out 2597 * fragments. 2598 */ 2599 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2600 /* 2601 * NOTE: 2602 * PCSD must be enabled to enable multiple 2603 * receive queues. 2604 */ 2605 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2606 E1000_RXCSUM_PCSD; 2607 } else { 2608 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2609 E1000_RXCSUM_PCSD); 2610 } 2611 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2612 2613 if (sc->rx_ring_inuse > 1) { 2614 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2615 const struct if_ringmap *rm; 2616 uint32_t reta_shift; 2617 int j, r; 2618 2619 /* 2620 * NOTE: 2621 * When we reach here, RSS has already been disabled 2622 * in igb_stop(), so we could safely configure RSS key 2623 * and redirect table. 2624 */ 2625 2626 /* 2627 * Configure RSS key 2628 */ 2629 toeplitz_get_key(key, sizeof(key)); 2630 for (i = 0; i < IGB_NRSSRK; ++i) { 2631 uint32_t rssrk; 2632 2633 rssrk = IGB_RSSRK_VAL(key, i); 2634 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2635 2636 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2637 } 2638 2639 /* 2640 * Configure RSS redirect table 2641 */ 2642 if (polling) 2643 rm = sc->rx_rmap; 2644 else 2645 rm = sc->rx_rmap_intr; 2646 if_ringmap_rdrtable(rm, sc->rdr_table, IGB_RDRTABLE_SIZE); 2647 2648 reta_shift = IGB_RETA_SHIFT; 2649 if (hw->mac.type == e1000_82575) 2650 reta_shift = IGB_RETA_SHIFT_82575; 2651 2652 r = 0; 2653 for (j = 0; j < IGB_NRETA; ++j) { 2654 uint32_t reta = 0; 2655 2656 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2657 uint32_t q; 2658 2659 q = sc->rdr_table[r] << reta_shift; 2660 reta |= q << (8 * i); 2661 ++r; 2662 } 2663 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2664 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2665 } 2666 2667 /* 2668 * Enable multiple receive queues. 2669 * Enable IPv4 RSS standard hash functions. 2670 * Disable RSS interrupt on 82575 2671 */ 2672 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2673 E1000_MRQC_ENABLE_RSS_4Q | 2674 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2675 E1000_MRQC_RSS_FIELD_IPV4); 2676 } 2677 2678 /* Setup the Receive Control Register */ 2679 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2680 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2681 E1000_RCTL_RDMTS_HALF | 2682 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2683 /* Strip CRC bytes. */ 2684 rctl |= E1000_RCTL_SECRC; 2685 /* Make sure VLAN Filters are off */ 2686 rctl &= ~E1000_RCTL_VFE; 2687 /* Don't store bad packets */ 2688 rctl &= ~E1000_RCTL_SBP; 2689 2690 /* Enable Receives */ 2691 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2692 2693 /* 2694 * Setup the HW Rx Head and Tail Descriptor Pointers 2695 * - needs to be after enable 2696 */ 2697 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2698 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2699 2700 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2701 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2702 } 2703 } 2704 2705 static void 2706 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2707 { 2708 if (--i < 0) 2709 i = rxr->num_rx_desc - 1; 2710 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2711 } 2712 2713 static void 2714 igb_rxeof(struct igb_rx_ring *rxr, int count) 2715 { 2716 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2717 union e1000_adv_rx_desc *cur; 2718 uint32_t staterr; 2719 int i, ncoll = 0, cpuid = mycpuid; 2720 2721 i = rxr->next_to_check; 2722 cur = &rxr->rx_base[i]; 2723 staterr = le32toh(cur->wb.upper.status_error); 2724 2725 if ((staterr & E1000_RXD_STAT_DD) == 0) 2726 return; 2727 2728 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2729 struct pktinfo *pi = NULL, pi0; 2730 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2731 struct mbuf *m = NULL; 2732 boolean_t eop; 2733 2734 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2735 if (eop) 2736 --count; 2737 2738 ++ncoll; 2739 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2740 !rxr->discard) { 2741 struct mbuf *mp = rxbuf->m_head; 2742 uint32_t hash, hashtype; 2743 uint16_t vlan; 2744 int len; 2745 2746 len = le16toh(cur->wb.upper.length); 2747 if ((rxr->sc->hw.mac.type == e1000_i350 || 2748 rxr->sc->hw.mac.type == e1000_i354) && 2749 (staterr & E1000_RXDEXT_STATERR_LB)) 2750 vlan = be16toh(cur->wb.upper.vlan); 2751 else 2752 vlan = le16toh(cur->wb.upper.vlan); 2753 2754 hash = le32toh(cur->wb.lower.hi_dword.rss); 2755 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2756 E1000_RXDADV_RSSTYPE_MASK; 2757 2758 IGB_RSS_DPRINTF(rxr->sc, 10, 2759 "ring%d, hash 0x%08x, hashtype %u\n", 2760 rxr->me, hash, hashtype); 2761 2762 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2763 BUS_DMASYNC_POSTREAD); 2764 2765 if (igb_newbuf(rxr, i, FALSE) != 0) { 2766 IFNET_STAT_INC(ifp, iqdrops, 1); 2767 goto discard; 2768 } 2769 2770 mp->m_len = len; 2771 if (rxr->fmp == NULL) { 2772 mp->m_pkthdr.len = len; 2773 rxr->fmp = mp; 2774 rxr->lmp = mp; 2775 } else { 2776 rxr->lmp->m_next = mp; 2777 rxr->lmp = rxr->lmp->m_next; 2778 rxr->fmp->m_pkthdr.len += len; 2779 } 2780 2781 if (eop) { 2782 m = rxr->fmp; 2783 rxr->fmp = NULL; 2784 rxr->lmp = NULL; 2785 2786 m->m_pkthdr.rcvif = ifp; 2787 IFNET_STAT_INC(ifp, ipackets, 1); 2788 2789 if (ifp->if_capenable & IFCAP_RXCSUM) 2790 igb_rxcsum(staterr, m); 2791 2792 if (staterr & E1000_RXD_STAT_VP) { 2793 m->m_pkthdr.ether_vlantag = vlan; 2794 m->m_flags |= M_VLANTAG; 2795 } 2796 2797 if (ifp->if_capenable & IFCAP_RSS) { 2798 pi = igb_rssinfo(m, &pi0, 2799 hash, hashtype, staterr); 2800 } 2801 #ifdef IGB_RSS_DEBUG 2802 rxr->rx_packets++; 2803 #endif 2804 } 2805 } else { 2806 IFNET_STAT_INC(ifp, ierrors, 1); 2807 discard: 2808 igb_setup_rxdesc(cur, rxbuf); 2809 if (!eop) 2810 rxr->discard = TRUE; 2811 else 2812 rxr->discard = FALSE; 2813 if (rxr->fmp != NULL) { 2814 m_freem(rxr->fmp); 2815 rxr->fmp = NULL; 2816 rxr->lmp = NULL; 2817 } 2818 m = NULL; 2819 } 2820 2821 if (m != NULL) 2822 ifp->if_input(ifp, m, pi, cpuid); 2823 2824 /* Advance our pointers to the next descriptor. */ 2825 if (++i == rxr->num_rx_desc) 2826 i = 0; 2827 2828 if (ncoll >= rxr->wreg_nsegs) { 2829 igb_rx_refresh(rxr, i); 2830 ncoll = 0; 2831 } 2832 2833 cur = &rxr->rx_base[i]; 2834 staterr = le32toh(cur->wb.upper.status_error); 2835 } 2836 rxr->next_to_check = i; 2837 2838 if (ncoll > 0) 2839 igb_rx_refresh(rxr, i); 2840 } 2841 2842 2843 static void 2844 igb_set_vlan(struct igb_softc *sc) 2845 { 2846 struct e1000_hw *hw = &sc->hw; 2847 uint32_t reg; 2848 #if 0 2849 struct ifnet *ifp = sc->arpcom.ac_if; 2850 #endif 2851 2852 if (sc->vf_ifp) { 2853 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2854 return; 2855 } 2856 2857 reg = E1000_READ_REG(hw, E1000_CTRL); 2858 reg |= E1000_CTRL_VME; 2859 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2860 2861 #if 0 2862 /* Enable the Filter Table */ 2863 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2864 reg = E1000_READ_REG(hw, E1000_RCTL); 2865 reg &= ~E1000_RCTL_CFIEN; 2866 reg |= E1000_RCTL_VFE; 2867 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2868 } 2869 #endif 2870 2871 /* Update the frame size */ 2872 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2873 sc->max_frame_size + VLAN_TAG_SIZE); 2874 2875 #if 0 2876 /* Don't bother with table if no vlans */ 2877 if ((adapter->num_vlans == 0) || 2878 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2879 return; 2880 /* 2881 ** A soft reset zero's out the VFTA, so 2882 ** we need to repopulate it now. 2883 */ 2884 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2885 if (adapter->shadow_vfta[i] != 0) { 2886 if (adapter->vf_ifp) 2887 e1000_vfta_set_vf(hw, 2888 adapter->shadow_vfta[i], TRUE); 2889 else 2890 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2891 i, adapter->shadow_vfta[i]); 2892 } 2893 #endif 2894 } 2895 2896 static void 2897 igb_enable_intr(struct igb_softc *sc) 2898 { 2899 int i; 2900 2901 for (i = 0; i < sc->intr_cnt; ++i) 2902 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2903 2904 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2905 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2906 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2907 else 2908 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2909 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2910 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2911 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2912 } else { 2913 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2914 } 2915 E1000_WRITE_FLUSH(&sc->hw); 2916 } 2917 2918 static void 2919 igb_disable_intr(struct igb_softc *sc) 2920 { 2921 int i; 2922 2923 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2924 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2925 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2926 } 2927 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2928 E1000_WRITE_FLUSH(&sc->hw); 2929 2930 for (i = 0; i < sc->intr_cnt; ++i) 2931 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2932 } 2933 2934 /* 2935 * Bit of a misnomer, what this really means is 2936 * to enable OS management of the system... aka 2937 * to disable special hardware management features 2938 */ 2939 static void 2940 igb_get_mgmt(struct igb_softc *sc) 2941 { 2942 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2943 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2944 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2945 2946 /* disable hardware interception of ARP */ 2947 manc &= ~E1000_MANC_ARP_EN; 2948 2949 /* enable receiving management packets to the host */ 2950 manc |= E1000_MANC_EN_MNG2HOST; 2951 manc2h |= 1 << 5; /* Mng Port 623 */ 2952 manc2h |= 1 << 6; /* Mng Port 664 */ 2953 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2954 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2955 } 2956 } 2957 2958 /* 2959 * Give control back to hardware management controller 2960 * if there is one. 2961 */ 2962 static void 2963 igb_rel_mgmt(struct igb_softc *sc) 2964 { 2965 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2966 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2967 2968 /* Re-enable hardware interception of ARP */ 2969 manc |= E1000_MANC_ARP_EN; 2970 manc &= ~E1000_MANC_EN_MNG2HOST; 2971 2972 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2973 } 2974 } 2975 2976 /* 2977 * Sets CTRL_EXT:DRV_LOAD bit. 2978 * 2979 * For ASF and Pass Through versions of f/w this means that 2980 * the driver is loaded. 2981 */ 2982 static void 2983 igb_get_hw_control(struct igb_softc *sc) 2984 { 2985 uint32_t ctrl_ext; 2986 2987 if (sc->vf_ifp) 2988 return; 2989 2990 /* Let firmware know the driver has taken over */ 2991 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2992 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2993 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2994 } 2995 2996 /* 2997 * Resets CTRL_EXT:DRV_LOAD bit. 2998 * 2999 * For ASF and Pass Through versions of f/w this means that the 3000 * driver is no longer loaded. 3001 */ 3002 static void 3003 igb_rel_hw_control(struct igb_softc *sc) 3004 { 3005 uint32_t ctrl_ext; 3006 3007 if (sc->vf_ifp) 3008 return; 3009 3010 /* Let firmware taken over control of h/w */ 3011 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3012 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3013 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3014 } 3015 3016 static boolean_t 3017 igb_is_valid_ether_addr(const uint8_t *addr) 3018 { 3019 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3020 3021 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3022 return FALSE; 3023 return TRUE; 3024 } 3025 3026 /* 3027 * Enable PCI Wake On Lan capability 3028 */ 3029 static void 3030 igb_enable_wol(struct igb_softc *sc) 3031 { 3032 device_t dev = sc->dev; 3033 int error = 0; 3034 uint32_t pmc, ctrl; 3035 uint16_t status; 3036 3037 if (pci_find_extcap(dev, PCIY_PMG, &pmc) != 0) { 3038 device_printf(dev, "no PMG\n"); 3039 return; 3040 } 3041 3042 /* 3043 * Set the type of wakeup. 3044 */ 3045 sc->wol &= ~(E1000_WUFC_EX | E1000_WUFC_MC); 3046 if ((sc->wol & (E1000_WUFC_EX | E1000_WUFC_MAG | E1000_WUFC_MC)) == 0) 3047 goto pme; 3048 3049 /* 3050 * Advertise the wakeup capabilities. 3051 */ 3052 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 3053 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); 3054 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 3055 3056 /* 3057 * Keep the laser running on Fiber adapters. 3058 */ 3059 if (sc->hw.phy.media_type == e1000_media_type_fiber || 3060 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 3061 uint32_t ctrl_ext; 3062 3063 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3064 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 3065 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, ctrl_ext); 3066 } 3067 3068 error = igb_enable_phy_wol(sc); 3069 if (error) 3070 goto pme; 3071 3072 /* XXX will this happen? ich/pch specific. */ 3073 if (sc->hw.phy.type == e1000_phy_igp_3) 3074 e1000_igp3_phy_powerdown_workaround_ich8lan(&sc->hw); 3075 3076 pme: 3077 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 3078 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3079 if (!error) 3080 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3081 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 3082 } 3083 3084 /* 3085 * WOL in the newer chipset interfaces (pchlan) 3086 * require thing to be copied into the phy 3087 */ 3088 static int 3089 igb_enable_phy_wol(struct igb_softc *sc) 3090 { 3091 struct e1000_hw *hw = &sc->hw; 3092 uint32_t mreg; 3093 uint16_t preg; 3094 int ret = 0, i; 3095 3096 /* Copy MAC RARs to PHY RARs */ 3097 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 3098 3099 /* Copy MAC MTA to PHY MTA */ 3100 for (i = 0; i < hw->mac.mta_reg_count; i++) { 3101 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 3102 e1000_write_phy_reg(hw, BM_MTA(i), (uint16_t)(mreg & 0xFFFF)); 3103 e1000_write_phy_reg(hw, BM_MTA(i) + 1, 3104 (uint16_t)((mreg >> 16) & 0xFFFF)); 3105 } 3106 3107 /* Configure PHY Rx Control register */ 3108 e1000_read_phy_reg(hw, BM_RCTL, &preg); 3109 mreg = E1000_READ_REG(hw, E1000_RCTL); 3110 if (mreg & E1000_RCTL_UPE) 3111 preg |= BM_RCTL_UPE; 3112 if (mreg & E1000_RCTL_MPE) 3113 preg |= BM_RCTL_MPE; 3114 preg &= ~(BM_RCTL_MO_MASK); 3115 if (mreg & E1000_RCTL_MO_3) { 3116 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 3117 << BM_RCTL_MO_SHIFT); 3118 } 3119 if (mreg & E1000_RCTL_BAM) 3120 preg |= BM_RCTL_BAM; 3121 if (mreg & E1000_RCTL_PMCF) 3122 preg |= BM_RCTL_PMCF; 3123 mreg = E1000_READ_REG(hw, E1000_CTRL); 3124 if (mreg & E1000_CTRL_RFCE) 3125 preg |= BM_RCTL_RFCE; 3126 e1000_write_phy_reg(&sc->hw, BM_RCTL, preg); 3127 3128 /* Enable PHY wakeup in MAC register. */ 3129 E1000_WRITE_REG(hw, E1000_WUC, 3130 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME); 3131 E1000_WRITE_REG(hw, E1000_WUFC, sc->wol); 3132 3133 /* Configure and enable PHY wakeup in PHY registers */ 3134 e1000_write_phy_reg(hw, BM_WUFC, sc->wol); 3135 e1000_write_phy_reg(hw, BM_WUC, E1000_WUC_PME_EN); 3136 /* Activate PHY wakeup */ 3137 ret = hw->phy.ops.acquire(hw); 3138 if (ret) { 3139 if_printf(&sc->arpcom.ac_if, "Could not acquire PHY\n"); 3140 return ret; 3141 } 3142 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 3143 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 3144 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); 3145 if (ret) { 3146 if_printf(&sc->arpcom.ac_if, "Could not read PHY page 769\n"); 3147 goto out; 3148 } 3149 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 3150 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); 3151 if (ret) { 3152 if_printf(&sc->arpcom.ac_if, 3153 "Could not set PHY Host Wakeup bit\n"); 3154 } 3155 out: 3156 hw->phy.ops.release(hw); 3157 return ret; 3158 } 3159 3160 static void 3161 igb_update_stats_counters(struct igb_softc *sc) 3162 { 3163 struct e1000_hw *hw = &sc->hw; 3164 struct e1000_hw_stats *stats; 3165 struct ifnet *ifp = &sc->arpcom.ac_if; 3166 3167 /* 3168 * The virtual function adapter has only a 3169 * small controlled set of stats, do only 3170 * those and return. 3171 */ 3172 if (sc->vf_ifp) { 3173 igb_update_vf_stats_counters(sc); 3174 return; 3175 } 3176 stats = sc->stats; 3177 3178 if (sc->hw.phy.media_type == e1000_media_type_copper || 3179 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3180 stats->symerrs += 3181 E1000_READ_REG(hw,E1000_SYMERRS); 3182 stats->sec += E1000_READ_REG(hw, E1000_SEC); 3183 } 3184 3185 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 3186 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 3187 stats->scc += E1000_READ_REG(hw, E1000_SCC); 3188 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 3189 3190 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 3191 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 3192 stats->colc += E1000_READ_REG(hw, E1000_COLC); 3193 stats->dc += E1000_READ_REG(hw, E1000_DC); 3194 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 3195 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 3196 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 3197 3198 /* 3199 * For watchdog management we need to know if we have been 3200 * paused during the last interval, so capture that here. 3201 */ 3202 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 3203 stats->xoffrxc += sc->pause_frames; 3204 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 3205 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 3206 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 3207 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 3208 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 3209 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 3210 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 3211 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 3212 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 3213 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 3214 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 3215 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 3216 3217 /* For the 64-bit byte counters the low dword must be read first. */ 3218 /* Both registers clear on the read of the high dword */ 3219 3220 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 3221 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 3222 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 3223 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 3224 3225 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 3226 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 3227 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 3228 stats->roc += E1000_READ_REG(hw, E1000_ROC); 3229 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 3230 3231 stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC); 3232 stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); 3233 stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC); 3234 3235 stats->tor += E1000_READ_REG(hw, E1000_TORL) + 3236 ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 3237 stats->tot += E1000_READ_REG(hw, E1000_TOTL) + 3238 ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 3239 3240 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 3241 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 3242 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 3243 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 3244 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 3245 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3246 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3247 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3248 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3249 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3250 3251 /* Interrupt Counts */ 3252 3253 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3254 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3255 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3256 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3257 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3258 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3259 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3260 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3261 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3262 3263 /* Host to Card Statistics */ 3264 3265 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3266 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3267 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3268 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3269 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3270 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3271 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3272 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3273 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3274 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3275 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3276 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3277 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3278 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3279 3280 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3281 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3282 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3283 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3284 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3285 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3286 3287 IFNET_STAT_SET(ifp, collisions, stats->colc); 3288 3289 /* Rx Errors */ 3290 IFNET_STAT_SET(ifp, ierrors, 3291 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3292 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3293 3294 /* Tx Errors */ 3295 IFNET_STAT_SET(ifp, oerrors, 3296 stats->ecol + stats->latecol + sc->watchdog_events); 3297 3298 /* Driver specific counters */ 3299 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3300 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3301 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3302 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3303 sc->packet_buf_alloc_tx = 3304 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3305 sc->packet_buf_alloc_rx = 3306 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3307 } 3308 3309 static void 3310 igb_vf_init_stats(struct igb_softc *sc) 3311 { 3312 struct e1000_hw *hw = &sc->hw; 3313 struct e1000_vf_stats *stats; 3314 3315 stats = sc->stats; 3316 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3317 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3318 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3319 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3320 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3321 } 3322 3323 static void 3324 igb_update_vf_stats_counters(struct igb_softc *sc) 3325 { 3326 struct e1000_hw *hw = &sc->hw; 3327 struct e1000_vf_stats *stats; 3328 3329 if (sc->link_speed == 0) 3330 return; 3331 3332 stats = sc->stats; 3333 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3334 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3335 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3336 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3337 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3338 } 3339 3340 #ifdef IFPOLL_ENABLE 3341 3342 static void 3343 igb_npoll_status(struct ifnet *ifp) 3344 { 3345 struct igb_softc *sc = ifp->if_softc; 3346 uint32_t reg_icr; 3347 3348 ASSERT_SERIALIZED(&sc->main_serialize); 3349 3350 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3351 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3352 sc->hw.mac.get_link_status = 1; 3353 igb_update_link_status(sc); 3354 } 3355 } 3356 3357 static void 3358 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3359 { 3360 struct igb_tx_ring *txr = arg; 3361 3362 ASSERT_SERIALIZED(&txr->tx_serialize); 3363 igb_tx_intr(txr, *(txr->tx_hdr)); 3364 igb_try_txgc(txr, 1); 3365 } 3366 3367 static void 3368 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3369 { 3370 struct igb_rx_ring *rxr = arg; 3371 3372 ASSERT_SERIALIZED(&rxr->rx_serialize); 3373 3374 igb_rxeof(rxr, cycle); 3375 } 3376 3377 static void 3378 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3379 { 3380 struct igb_softc *sc = ifp->if_softc; 3381 int i, txr_cnt, rxr_cnt; 3382 3383 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3384 3385 if (info) { 3386 int cpu; 3387 3388 info->ifpi_status.status_func = igb_npoll_status; 3389 info->ifpi_status.serializer = &sc->main_serialize; 3390 3391 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3392 for (i = 0; i < txr_cnt; ++i) { 3393 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3394 3395 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 3396 KKASSERT(cpu < netisr_ncpus); 3397 info->ifpi_tx[cpu].poll_func = igb_npoll_tx; 3398 info->ifpi_tx[cpu].arg = txr; 3399 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; 3400 ifsq_set_cpuid(txr->ifsq, cpu); 3401 } 3402 3403 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3404 for (i = 0; i < rxr_cnt; ++i) { 3405 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3406 3407 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 3408 KKASSERT(cpu < netisr_ncpus); 3409 info->ifpi_rx[cpu].poll_func = igb_npoll_rx; 3410 info->ifpi_rx[cpu].arg = rxr; 3411 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize; 3412 } 3413 } else { 3414 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3415 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3416 3417 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3418 } 3419 } 3420 if (ifp->if_flags & IFF_RUNNING) 3421 igb_init(sc); 3422 } 3423 3424 #endif /* IFPOLL_ENABLE */ 3425 3426 static void 3427 igb_intr(void *xsc) 3428 { 3429 struct igb_softc *sc = xsc; 3430 struct ifnet *ifp = &sc->arpcom.ac_if; 3431 uint32_t eicr; 3432 3433 ASSERT_SERIALIZED(&sc->main_serialize); 3434 3435 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3436 3437 if (eicr == 0) 3438 return; 3439 3440 if (ifp->if_flags & IFF_RUNNING) { 3441 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3442 int i; 3443 3444 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3445 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3446 3447 if (eicr & rxr->rx_intr_mask) { 3448 lwkt_serialize_enter(&rxr->rx_serialize); 3449 igb_rxeof(rxr, -1); 3450 lwkt_serialize_exit(&rxr->rx_serialize); 3451 } 3452 } 3453 3454 if (eicr & txr->tx_intr_mask) { 3455 lwkt_serialize_enter(&txr->tx_serialize); 3456 igb_tx_intr(txr, *(txr->tx_hdr)); 3457 lwkt_serialize_exit(&txr->tx_serialize); 3458 } 3459 } 3460 3461 if (eicr & E1000_EICR_OTHER) { 3462 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3463 3464 /* Link status change */ 3465 if (icr & E1000_ICR_LSC) { 3466 sc->hw.mac.get_link_status = 1; 3467 igb_update_link_status(sc); 3468 } 3469 } 3470 3471 /* 3472 * Reading EICR has the side effect to clear interrupt mask, 3473 * so all interrupts need to be enabled here. 3474 */ 3475 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3476 } 3477 3478 static void 3479 igb_intr_shared(void *xsc) 3480 { 3481 struct igb_softc *sc = xsc; 3482 struct ifnet *ifp = &sc->arpcom.ac_if; 3483 uint32_t reg_icr; 3484 3485 ASSERT_SERIALIZED(&sc->main_serialize); 3486 3487 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3488 3489 /* Hot eject? */ 3490 if (reg_icr == 0xffffffff) 3491 return; 3492 3493 /* Definitely not our interrupt. */ 3494 if (reg_icr == 0x0) 3495 return; 3496 3497 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3498 return; 3499 3500 if (ifp->if_flags & IFF_RUNNING) { 3501 if (reg_icr & 3502 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3503 int i; 3504 3505 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3506 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3507 3508 lwkt_serialize_enter(&rxr->rx_serialize); 3509 igb_rxeof(rxr, -1); 3510 lwkt_serialize_exit(&rxr->rx_serialize); 3511 } 3512 } 3513 3514 if (reg_icr & E1000_ICR_TXDW) { 3515 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3516 3517 lwkt_serialize_enter(&txr->tx_serialize); 3518 igb_tx_intr(txr, *(txr->tx_hdr)); 3519 lwkt_serialize_exit(&txr->tx_serialize); 3520 } 3521 } 3522 3523 /* Link status change */ 3524 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3525 sc->hw.mac.get_link_status = 1; 3526 igb_update_link_status(sc); 3527 } 3528 3529 if (reg_icr & E1000_ICR_RXO) 3530 sc->rx_overruns++; 3531 } 3532 3533 static int 3534 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3535 int *segs_used, int *idx) 3536 { 3537 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3538 bus_dmamap_t map; 3539 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3540 union e1000_adv_tx_desc *txd = NULL; 3541 struct mbuf *m_head = *m_headp; 3542 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3543 int maxsegs, nsegs, i, j, error; 3544 uint32_t hdrlen = 0; 3545 3546 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3547 error = igb_tso_pullup(txr, m_headp); 3548 if (error) 3549 return error; 3550 m_head = *m_headp; 3551 } 3552 3553 /* Set basic descriptor constants */ 3554 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3555 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3556 if (m_head->m_flags & M_VLANTAG) 3557 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3558 3559 /* 3560 * Map the packet for DMA. 3561 */ 3562 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3563 tx_buf_mapped = tx_buf; 3564 map = tx_buf->map; 3565 3566 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3567 if (maxsegs > IGB_MAX_SCATTER) 3568 maxsegs = IGB_MAX_SCATTER; 3569 3570 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3571 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3572 if (error) { 3573 if (error == ENOBUFS) 3574 txr->sc->mbuf_defrag_failed++; 3575 else 3576 txr->sc->no_tx_dma_setup++; 3577 3578 m_freem(*m_headp); 3579 *m_headp = NULL; 3580 return error; 3581 } 3582 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3583 3584 m_head = *m_headp; 3585 3586 /* 3587 * Set up the TX context descriptor, if any hardware offloading is 3588 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3589 * TX descriptor. 3590 * 3591 * Unlike these chips' predecessors (em/emx), TX context descriptor 3592 * will _not_ interfere TX data fetching pipelining. 3593 */ 3594 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3595 igb_tso_ctx(txr, m_head, &hdrlen); 3596 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3597 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3598 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3599 txr->tx_nsegs++; 3600 (*segs_used)++; 3601 } else if (igb_txcsum_ctx(txr, m_head)) { 3602 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3603 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3604 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3605 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3606 txr->tx_nsegs++; 3607 (*segs_used)++; 3608 } 3609 3610 *segs_used += nsegs; 3611 txr->tx_nsegs += nsegs; 3612 if (txr->tx_nsegs >= txr->intr_nsegs) { 3613 /* 3614 * Report Status (RS) is turned on every intr_nsegs 3615 * descriptors (roughly). 3616 */ 3617 txr->tx_nsegs = 0; 3618 cmd_rs = E1000_ADVTXD_DCMD_RS; 3619 } 3620 3621 /* Calculate payload length */ 3622 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3623 << E1000_ADVTXD_PAYLEN_SHIFT); 3624 3625 /* 3626 * 82575 needs the TX context index added; the queue 3627 * index is used as TX context index here. 3628 */ 3629 if (txr->sc->hw.mac.type == e1000_82575) 3630 olinfo_status |= txr->me << 4; 3631 3632 /* Set up our transmit descriptors */ 3633 i = txr->next_avail_desc; 3634 for (j = 0; j < nsegs; j++) { 3635 bus_size_t seg_len; 3636 bus_addr_t seg_addr; 3637 3638 tx_buf = &txr->tx_buf[i]; 3639 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3640 seg_addr = segs[j].ds_addr; 3641 seg_len = segs[j].ds_len; 3642 3643 txd->read.buffer_addr = htole64(seg_addr); 3644 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3645 txd->read.olinfo_status = htole32(olinfo_status); 3646 if (++i == txr->num_tx_desc) 3647 i = 0; 3648 tx_buf->m_head = NULL; 3649 } 3650 3651 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3652 txr->next_avail_desc = i; 3653 txr->tx_avail -= nsegs; 3654 txr->tx_nmbuf++; 3655 3656 tx_buf->m_head = m_head; 3657 tx_buf_mapped->map = tx_buf->map; 3658 tx_buf->map = map; 3659 3660 /* 3661 * Last Descriptor of Packet needs End Of Packet (EOP) 3662 */ 3663 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3664 3665 /* 3666 * Defer TDT updating, until enough descrptors are setup 3667 */ 3668 *idx = i; 3669 #ifdef IGB_TSS_DEBUG 3670 ++txr->tx_packets; 3671 #endif 3672 3673 return 0; 3674 } 3675 3676 static void 3677 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3678 { 3679 struct igb_softc *sc = ifp->if_softc; 3680 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3681 struct mbuf *m_head; 3682 int idx = -1, nsegs = 0; 3683 3684 KKASSERT(txr->ifsq == ifsq); 3685 ASSERT_SERIALIZED(&txr->tx_serialize); 3686 3687 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3688 return; 3689 3690 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3691 ifsq_purge(ifsq); 3692 return; 3693 } 3694 3695 while (!ifsq_is_empty(ifsq)) { 3696 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3697 ifsq_set_oactive(ifsq); 3698 /* Set watchdog on */ 3699 txr->tx_watchdog.wd_timer = 5; 3700 break; 3701 } 3702 3703 m_head = ifsq_dequeue(ifsq); 3704 if (m_head == NULL) 3705 break; 3706 3707 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3708 IFNET_STAT_INC(ifp, oerrors, 1); 3709 continue; 3710 } 3711 3712 /* 3713 * TX interrupt are aggressively aggregated, so increasing 3714 * opackets at TX interrupt time will make the opackets 3715 * statistics vastly inaccurate; we do the opackets increment 3716 * now. 3717 */ 3718 IFNET_STAT_INC(ifp, opackets, 1); 3719 3720 if (nsegs >= txr->wreg_nsegs) { 3721 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3722 idx = -1; 3723 nsegs = 0; 3724 } 3725 3726 /* Send a copy of the frame to the BPF listener */ 3727 ETHER_BPF_MTAP(ifp, m_head); 3728 } 3729 if (idx >= 0) 3730 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3731 txr->tx_running = IGB_TX_RUNNING; 3732 } 3733 3734 static void 3735 igb_watchdog(struct ifaltq_subque *ifsq) 3736 { 3737 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3738 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3739 struct igb_softc *sc = ifp->if_softc; 3740 int i; 3741 3742 KKASSERT(txr->ifsq == ifsq); 3743 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3744 3745 /* 3746 * If flow control has paused us since last checking 3747 * it invalidates the watchdog timing, so dont run it. 3748 */ 3749 if (sc->pause_frames) { 3750 sc->pause_frames = 0; 3751 txr->tx_watchdog.wd_timer = 5; 3752 return; 3753 } 3754 3755 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3756 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3757 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3758 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3759 if_printf(ifp, "TX(%d) desc avail = %d, " 3760 "Next TX to Clean = %d\n", 3761 txr->me, txr->tx_avail, txr->next_to_clean); 3762 3763 IFNET_STAT_INC(ifp, oerrors, 1); 3764 sc->watchdog_events++; 3765 3766 igb_init(sc); 3767 for (i = 0; i < sc->tx_ring_inuse; ++i) 3768 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3769 } 3770 3771 static void 3772 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3773 { 3774 uint32_t eitr = 0; 3775 3776 if (rate > 0) { 3777 if (sc->hw.mac.type == e1000_82575) { 3778 eitr = 1000000000 / 256 / rate; 3779 /* 3780 * NOTE: 3781 * Document is wrong on the 2 bits left shift 3782 */ 3783 } else { 3784 eitr = 1000000 / rate; 3785 eitr <<= IGB_EITR_INTVL_SHIFT; 3786 } 3787 3788 if (eitr == 0) { 3789 /* Don't disable it */ 3790 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3791 } else if (eitr > IGB_EITR_INTVL_MASK) { 3792 /* Don't allow it to be too large */ 3793 eitr = IGB_EITR_INTVL_MASK; 3794 } 3795 } 3796 if (sc->hw.mac.type == e1000_82575) 3797 eitr |= eitr << 16; 3798 else 3799 eitr |= E1000_EITR_CNT_IGNR; 3800 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3801 } 3802 3803 static void 3804 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3805 const char *name, const char *desc) 3806 { 3807 int i; 3808 3809 for (i = 0; i < sc->intr_cnt; ++i) { 3810 if (sc->intr_data[i].intr_use == use) { 3811 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3812 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3813 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3814 sc, use, igb_sysctl_intr_rate, "I", desc); 3815 break; 3816 } 3817 } 3818 } 3819 3820 static int 3821 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3822 { 3823 struct igb_softc *sc = (void *)arg1; 3824 int use = arg2; 3825 struct ifnet *ifp = &sc->arpcom.ac_if; 3826 int error, rate, i; 3827 struct igb_intr_data *intr; 3828 3829 rate = 0; 3830 for (i = 0; i < sc->intr_cnt; ++i) { 3831 intr = &sc->intr_data[i]; 3832 if (intr->intr_use == use) { 3833 rate = intr->intr_rate; 3834 break; 3835 } 3836 } 3837 3838 error = sysctl_handle_int(oidp, &rate, 0, req); 3839 if (error || req->newptr == NULL) 3840 return error; 3841 if (rate <= 0) 3842 return EINVAL; 3843 3844 ifnet_serialize_all(ifp); 3845 3846 for (i = 0; i < sc->intr_cnt; ++i) { 3847 intr = &sc->intr_data[i]; 3848 if (intr->intr_use == use && intr->intr_rate != rate) { 3849 intr->intr_rate = rate; 3850 if (ifp->if_flags & IFF_RUNNING) 3851 igb_set_eitr(sc, i, rate); 3852 } 3853 } 3854 3855 ifnet_deserialize_all(ifp); 3856 3857 return error; 3858 } 3859 3860 static int 3861 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3862 { 3863 struct igb_softc *sc = (void *)arg1; 3864 struct ifnet *ifp = &sc->arpcom.ac_if; 3865 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3866 int error, nsegs; 3867 3868 nsegs = txr->intr_nsegs; 3869 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3870 if (error || req->newptr == NULL) 3871 return error; 3872 if (nsegs <= 0) 3873 return EINVAL; 3874 3875 ifnet_serialize_all(ifp); 3876 3877 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3878 error = EINVAL; 3879 } else { 3880 int i; 3881 3882 error = 0; 3883 for (i = 0; i < sc->tx_ring_cnt; ++i) 3884 sc->tx_rings[i].intr_nsegs = nsegs; 3885 } 3886 3887 ifnet_deserialize_all(ifp); 3888 3889 return error; 3890 } 3891 3892 static int 3893 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3894 { 3895 struct igb_softc *sc = (void *)arg1; 3896 struct ifnet *ifp = &sc->arpcom.ac_if; 3897 int error, nsegs, i; 3898 3899 nsegs = sc->rx_rings[0].wreg_nsegs; 3900 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3901 if (error || req->newptr == NULL) 3902 return error; 3903 3904 ifnet_serialize_all(ifp); 3905 for (i = 0; i < sc->rx_ring_cnt; ++i) 3906 sc->rx_rings[i].wreg_nsegs = nsegs; 3907 ifnet_deserialize_all(ifp); 3908 3909 return 0; 3910 } 3911 3912 static int 3913 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3914 { 3915 struct igb_softc *sc = (void *)arg1; 3916 struct ifnet *ifp = &sc->arpcom.ac_if; 3917 int error, nsegs, i; 3918 3919 nsegs = sc->tx_rings[0].wreg_nsegs; 3920 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3921 if (error || req->newptr == NULL) 3922 return error; 3923 3924 ifnet_serialize_all(ifp); 3925 for (i = 0; i < sc->tx_ring_cnt; ++i) 3926 sc->tx_rings[i].wreg_nsegs = nsegs; 3927 ifnet_deserialize_all(ifp); 3928 3929 return 0; 3930 } 3931 3932 static void 3933 igb_init_intr(struct igb_softc *sc) 3934 { 3935 int i; 3936 3937 igb_set_intr_mask(sc); 3938 3939 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3940 igb_init_unshared_intr(sc); 3941 3942 for (i = 0; i < sc->intr_cnt; ++i) 3943 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3944 } 3945 3946 static void 3947 igb_init_unshared_intr(struct igb_softc *sc) 3948 { 3949 struct e1000_hw *hw = &sc->hw; 3950 const struct igb_rx_ring *rxr; 3951 const struct igb_tx_ring *txr; 3952 uint32_t ivar, index; 3953 int i; 3954 3955 /* 3956 * Enable extended mode 3957 */ 3958 if (sc->hw.mac.type != e1000_82575) { 3959 uint32_t gpie; 3960 int ivar_max; 3961 3962 gpie = E1000_GPIE_NSICR; 3963 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3964 gpie |= E1000_GPIE_MSIX_MODE | 3965 E1000_GPIE_EIAME | 3966 E1000_GPIE_PBA; 3967 } 3968 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3969 3970 /* 3971 * Clear IVARs 3972 */ 3973 switch (sc->hw.mac.type) { 3974 case e1000_82576: 3975 ivar_max = IGB_MAX_IVAR_82576; 3976 break; 3977 3978 case e1000_82580: 3979 ivar_max = IGB_MAX_IVAR_82580; 3980 break; 3981 3982 case e1000_i350: 3983 ivar_max = IGB_MAX_IVAR_I350; 3984 break; 3985 3986 case e1000_i354: 3987 ivar_max = IGB_MAX_IVAR_I354; 3988 break; 3989 3990 case e1000_vfadapt: 3991 case e1000_vfadapt_i350: 3992 ivar_max = IGB_MAX_IVAR_VF; 3993 break; 3994 3995 case e1000_i210: 3996 ivar_max = IGB_MAX_IVAR_I210; 3997 break; 3998 3999 case e1000_i211: 4000 ivar_max = IGB_MAX_IVAR_I211; 4001 break; 4002 4003 default: 4004 panic("unknown mac type %d\n", sc->hw.mac.type); 4005 } 4006 for (i = 0; i < ivar_max; ++i) 4007 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 4008 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 4009 } else { 4010 uint32_t tmp; 4011 4012 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 4013 ("82575 w/ MSI-X")); 4014 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 4015 tmp |= E1000_CTRL_EXT_IRCA; 4016 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 4017 } 4018 4019 /* 4020 * Map TX/RX interrupts to EICR 4021 */ 4022 switch (sc->hw.mac.type) { 4023 case e1000_82580: 4024 case e1000_i350: 4025 case e1000_i354: 4026 case e1000_vfadapt: 4027 case e1000_vfadapt_i350: 4028 case e1000_i210: 4029 case e1000_i211: 4030 /* RX entries */ 4031 for (i = 0; i < sc->rx_ring_inuse; ++i) { 4032 rxr = &sc->rx_rings[i]; 4033 4034 index = i >> 1; 4035 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4036 4037 if (i & 1) { 4038 ivar &= 0xff00ffff; 4039 ivar |= 4040 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 4041 } else { 4042 ivar &= 0xffffff00; 4043 ivar |= 4044 (rxr->rx_intr_vec | E1000_IVAR_VALID); 4045 } 4046 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4047 } 4048 /* TX entries */ 4049 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4050 txr = &sc->tx_rings[i]; 4051 4052 index = i >> 1; 4053 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4054 4055 if (i & 1) { 4056 ivar &= 0x00ffffff; 4057 ivar |= 4058 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 4059 } else { 4060 ivar &= 0xffff00ff; 4061 ivar |= 4062 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 4063 } 4064 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4065 } 4066 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4067 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 4068 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4069 } 4070 break; 4071 4072 case e1000_82576: 4073 /* RX entries */ 4074 for (i = 0; i < sc->rx_ring_inuse; ++i) { 4075 rxr = &sc->rx_rings[i]; 4076 4077 index = i & 0x7; /* Each IVAR has two entries */ 4078 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4079 4080 if (i < 8) { 4081 ivar &= 0xffffff00; 4082 ivar |= 4083 (rxr->rx_intr_vec | E1000_IVAR_VALID); 4084 } else { 4085 ivar &= 0xff00ffff; 4086 ivar |= 4087 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 4088 } 4089 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4090 } 4091 /* TX entries */ 4092 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4093 txr = &sc->tx_rings[i]; 4094 4095 index = i & 0x7; /* Each IVAR has two entries */ 4096 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4097 4098 if (i < 8) { 4099 ivar &= 0xffff00ff; 4100 ivar |= 4101 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 4102 } else { 4103 ivar &= 0x00ffffff; 4104 ivar |= 4105 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 4106 } 4107 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4108 } 4109 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4110 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 4111 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4112 } 4113 break; 4114 4115 case e1000_82575: 4116 /* 4117 * Enable necessary interrupt bits. 4118 * 4119 * The name of the register is confusing; in addition to 4120 * configuring the first vector of MSI-X, it also configures 4121 * which bits of EICR could be set by the hardware even when 4122 * MSI or line interrupt is used; it thus controls interrupt 4123 * generation. It MUST be configured explicitly; the default 4124 * value mentioned in the datasheet is wrong: RX queue0 and 4125 * TX queue0 are NOT enabled by default. 4126 */ 4127 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 4128 break; 4129 4130 default: 4131 panic("unknown mac type %d\n", sc->hw.mac.type); 4132 } 4133 } 4134 4135 static int 4136 igb_setup_intr(struct igb_softc *sc) 4137 { 4138 int i; 4139 4140 for (i = 0; i < sc->intr_cnt; ++i) { 4141 struct igb_intr_data *intr = &sc->intr_data[i]; 4142 int error; 4143 4144 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 4145 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 4146 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 4147 if (error) { 4148 device_printf(sc->dev, "can't setup %dth intr\n", i); 4149 igb_teardown_intr(sc, i); 4150 return error; 4151 } 4152 } 4153 return 0; 4154 } 4155 4156 static void 4157 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 4158 { 4159 if (txr->sc->hw.mac.type == e1000_82575) { 4160 txr->tx_intr_vec = 0; /* unused */ 4161 switch (txr->me) { 4162 case 0: 4163 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 4164 break; 4165 case 1: 4166 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 4167 break; 4168 case 2: 4169 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4170 break; 4171 case 3: 4172 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4173 break; 4174 default: 4175 panic("unsupported # of TX ring, %d\n", txr->me); 4176 } 4177 } else { 4178 int intr_vec = *intr_vec0; 4179 4180 txr->tx_intr_vec = intr_vec % intr_vecmax; 4181 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4182 4183 *intr_vec0 = intr_vec + 1; 4184 } 4185 } 4186 4187 static void 4188 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 4189 { 4190 if (rxr->sc->hw.mac.type == e1000_82575) { 4191 rxr->rx_intr_vec = 0; /* unused */ 4192 switch (rxr->me) { 4193 case 0: 4194 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4195 break; 4196 case 1: 4197 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4198 break; 4199 case 2: 4200 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4201 break; 4202 case 3: 4203 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4204 break; 4205 default: 4206 panic("unsupported # of RX ring, %d\n", rxr->me); 4207 } 4208 } else { 4209 int intr_vec = *intr_vec0; 4210 4211 rxr->rx_intr_vec = intr_vec % intr_vecmax; 4212 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4213 4214 *intr_vec0 = intr_vec + 1; 4215 } 4216 } 4217 4218 static void 4219 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4220 { 4221 struct igb_softc *sc = ifp->if_softc; 4222 4223 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4224 } 4225 4226 static void 4227 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4228 { 4229 struct igb_softc *sc = ifp->if_softc; 4230 4231 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4232 } 4233 4234 static int 4235 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4236 { 4237 struct igb_softc *sc = ifp->if_softc; 4238 4239 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4240 slz); 4241 } 4242 4243 #ifdef INVARIANTS 4244 4245 static void 4246 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4247 boolean_t serialized) 4248 { 4249 struct igb_softc *sc = ifp->if_softc; 4250 4251 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4252 slz, serialized); 4253 } 4254 4255 #endif /* INVARIANTS */ 4256 4257 static void 4258 igb_set_intr_mask(struct igb_softc *sc) 4259 { 4260 int i; 4261 4262 sc->intr_mask = sc->sts_intr_mask; 4263 for (i = 0; i < sc->rx_ring_inuse; ++i) 4264 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4265 for (i = 0; i < sc->tx_ring_inuse; ++i) 4266 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4267 if (bootverbose) { 4268 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4269 sc->intr_mask); 4270 } 4271 } 4272 4273 static int 4274 igb_alloc_intr(struct igb_softc *sc) 4275 { 4276 struct igb_tx_ring *txr; 4277 struct igb_intr_data *intr; 4278 int i, intr_vec, intr_vecmax; 4279 u_int intr_flags; 4280 4281 igb_alloc_msix(sc); 4282 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4283 igb_set_ring_inuse(sc, FALSE); 4284 goto done; 4285 } 4286 4287 /* 4288 * Reset some settings changed by igb_alloc_msix(). 4289 */ 4290 if (sc->rx_rmap_intr != NULL) { 4291 if_ringmap_free(sc->rx_rmap_intr); 4292 sc->rx_rmap_intr = NULL; 4293 } 4294 if (sc->tx_rmap_intr != NULL) { 4295 if_ringmap_free(sc->tx_rmap_intr); 4296 sc->tx_rmap_intr = NULL; 4297 } 4298 if (sc->intr_data != NULL) { 4299 kfree(sc->intr_data, M_DEVBUF); 4300 sc->intr_data = NULL; 4301 } 4302 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4303 txr = &sc->tx_rings[i]; 4304 txr->tx_intr_vec = 0; 4305 txr->tx_intr_mask = 0; 4306 txr->tx_intr_cpuid = -1; 4307 } 4308 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4309 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4310 4311 rxr->rx_intr_vec = 0; 4312 rxr->rx_intr_mask = 0; 4313 rxr->rx_txr = NULL; 4314 } 4315 4316 sc->intr_cnt = 1; 4317 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4318 M_WAITOK | M_ZERO); 4319 intr = &sc->intr_data[0]; 4320 4321 /* 4322 * Allocate MSI/legacy interrupt resource 4323 */ 4324 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4325 &intr->intr_rid, &intr_flags); 4326 4327 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4328 int unshared; 4329 4330 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4331 if (!unshared) { 4332 sc->flags |= IGB_FLAG_SHARED_INTR; 4333 if (bootverbose) 4334 device_printf(sc->dev, "IRQ shared\n"); 4335 } else { 4336 intr_flags &= ~RF_SHAREABLE; 4337 if (bootverbose) 4338 device_printf(sc->dev, "IRQ unshared\n"); 4339 } 4340 } 4341 4342 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4343 &intr->intr_rid, intr_flags); 4344 if (intr->intr_res == NULL) { 4345 device_printf(sc->dev, "Unable to allocate bus resource: " 4346 "interrupt\n"); 4347 return ENXIO; 4348 } 4349 4350 intr->intr_serialize = &sc->main_serialize; 4351 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4352 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4353 igb_intr_shared : igb_intr; 4354 intr->intr_funcarg = sc; 4355 intr->intr_rate = IGB_INTR_RATE; 4356 intr->intr_use = IGB_INTR_USE_RXTX; 4357 4358 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 4359 4360 /* 4361 * Setup MSI/legacy interrupt mask 4362 */ 4363 switch (sc->hw.mac.type) { 4364 case e1000_82575: 4365 intr_vecmax = IGB_MAX_TXRXINT_82575; 4366 break; 4367 4368 case e1000_82576: 4369 intr_vecmax = IGB_MAX_TXRXINT_82576; 4370 break; 4371 4372 case e1000_82580: 4373 intr_vecmax = IGB_MAX_TXRXINT_82580; 4374 break; 4375 4376 case e1000_i350: 4377 intr_vecmax = IGB_MAX_TXRXINT_I350; 4378 break; 4379 4380 case e1000_i354: 4381 intr_vecmax = IGB_MAX_TXRXINT_I354; 4382 break; 4383 4384 case e1000_i210: 4385 intr_vecmax = IGB_MAX_TXRXINT_I210; 4386 break; 4387 4388 case e1000_i211: 4389 intr_vecmax = IGB_MAX_TXRXINT_I211; 4390 break; 4391 4392 default: 4393 intr_vecmax = IGB_MIN_TXRXINT; 4394 break; 4395 } 4396 intr_vec = 0; 4397 for (i = 0; i < sc->tx_ring_cnt; ++i) 4398 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4399 for (i = 0; i < sc->rx_ring_cnt; ++i) 4400 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4401 sc->sts_intr_mask = E1000_EICR_OTHER; 4402 4403 igb_set_ring_inuse(sc, FALSE); 4404 KKASSERT(sc->rx_ring_inuse <= IGB_MIN_RING_RSS); 4405 if (sc->rx_ring_inuse == IGB_MIN_RING_RSS) { 4406 /* 4407 * Allocate RX ring map for RSS setup. 4408 */ 4409 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, 4410 IGB_MIN_RING_RSS, IGB_MIN_RING_RSS); 4411 KASSERT(if_ringmap_count(sc->rx_rmap_intr) == 4412 sc->rx_ring_inuse, ("RX ring inuse mismatch")); 4413 } 4414 done: 4415 igb_set_intr_mask(sc); 4416 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4417 txr = &sc->tx_rings[i]; 4418 if (txr->tx_intr_cpuid < 0) 4419 txr->tx_intr_cpuid = 0; 4420 } 4421 return 0; 4422 } 4423 4424 static void 4425 igb_free_intr(struct igb_softc *sc) 4426 { 4427 if (sc->intr_data == NULL) 4428 return; 4429 4430 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4431 struct igb_intr_data *intr = &sc->intr_data[0]; 4432 4433 KKASSERT(sc->intr_cnt == 1); 4434 if (intr->intr_res != NULL) { 4435 bus_release_resource(sc->dev, SYS_RES_IRQ, 4436 intr->intr_rid, intr->intr_res); 4437 } 4438 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4439 pci_release_msi(sc->dev); 4440 4441 kfree(sc->intr_data, M_DEVBUF); 4442 } else { 4443 igb_free_msix(sc, TRUE); 4444 } 4445 } 4446 4447 static void 4448 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4449 { 4450 int i; 4451 4452 if (sc->intr_data == NULL) 4453 return; 4454 4455 for (i = 0; i < intr_cnt; ++i) { 4456 struct igb_intr_data *intr = &sc->intr_data[i]; 4457 4458 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4459 } 4460 } 4461 4462 static void 4463 igb_alloc_msix(struct igb_softc *sc) 4464 { 4465 int msix_enable, msix_cnt, msix_ring, alloc_cnt; 4466 int i, x, error; 4467 int ring_cnt, ring_cntmax; 4468 struct igb_intr_data *intr; 4469 boolean_t setup = FALSE; 4470 4471 /* 4472 * Don't enable MSI-X on 82575, see: 4473 * 82575 specification update errata #25 4474 */ 4475 if (sc->hw.mac.type == e1000_82575) 4476 return; 4477 4478 /* Don't enable MSI-X on VF */ 4479 if (sc->vf_ifp) 4480 return; 4481 4482 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4483 igb_msix_enable); 4484 if (!msix_enable) 4485 return; 4486 4487 msix_cnt = pci_msix_count(sc->dev); 4488 #ifdef IGB_MSIX_DEBUG 4489 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4490 #endif 4491 if (msix_cnt <= 1) { 4492 /* One MSI-X model does not make sense. */ 4493 return; 4494 } 4495 if (bootverbose) 4496 device_printf(sc->dev, "MSI-X count %d\n", msix_cnt); 4497 msix_ring = msix_cnt - 1; /* -1 for status */ 4498 4499 /* 4500 * Configure # of RX/TX rings usable by MSI-X. 4501 */ 4502 igb_get_rxring_cnt(sc, &ring_cnt, &ring_cntmax); 4503 if (ring_cntmax > msix_ring) 4504 ring_cntmax = msix_ring; 4505 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4506 4507 igb_get_txring_cnt(sc, &ring_cnt, &ring_cntmax); 4508 if (ring_cntmax > msix_ring) 4509 ring_cntmax = msix_ring; 4510 sc->tx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4511 4512 if_ringmap_match(sc->dev, sc->rx_rmap_intr, sc->tx_rmap_intr); 4513 sc->rx_ring_msix = if_ringmap_count(sc->rx_rmap_intr); 4514 KASSERT(sc->rx_ring_msix <= sc->rx_ring_cnt, 4515 ("total RX ring count %d, MSI-X RX ring count %d", 4516 sc->rx_ring_cnt, sc->rx_ring_msix)); 4517 sc->tx_ring_msix = if_ringmap_count(sc->tx_rmap_intr); 4518 KASSERT(sc->tx_ring_msix <= sc->tx_ring_cnt, 4519 ("total TX ring count %d, MSI-X TX ring count %d", 4520 sc->tx_ring_cnt, sc->tx_ring_msix)); 4521 4522 /* 4523 * Aggregate TX/RX MSI-X 4524 */ 4525 ring_cntmax = sc->rx_ring_msix; 4526 if (ring_cntmax < sc->tx_ring_msix) 4527 ring_cntmax = sc->tx_ring_msix; 4528 KASSERT(ring_cntmax <= msix_ring, 4529 ("invalid ring count max %d, MSI-X count for rings %d", 4530 ring_cntmax, msix_ring)); 4531 4532 alloc_cnt = ring_cntmax + 1; /* +1 for status */ 4533 if (bootverbose) { 4534 device_printf(sc->dev, "MSI-X alloc %d, " 4535 "RX ring %d, TX ring %d\n", alloc_cnt, 4536 sc->rx_ring_msix, sc->tx_ring_msix); 4537 } 4538 4539 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4540 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4541 &sc->msix_mem_rid, RF_ACTIVE); 4542 if (sc->msix_mem_res == NULL) { 4543 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4544 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4545 &sc->msix_mem_rid, RF_ACTIVE); 4546 if (sc->msix_mem_res == NULL) { 4547 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4548 return; 4549 } 4550 } 4551 4552 sc->intr_cnt = alloc_cnt; 4553 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4554 M_DEVBUF, M_WAITOK | M_ZERO); 4555 for (x = 0; x < sc->intr_cnt; ++x) { 4556 intr = &sc->intr_data[x]; 4557 intr->intr_rid = -1; 4558 intr->intr_rate = IGB_INTR_RATE; 4559 } 4560 4561 x = 0; 4562 for (i = 0; i < sc->rx_ring_msix; ++i) { 4563 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4564 struct igb_tx_ring *txr = NULL; 4565 int cpuid, j; 4566 4567 KKASSERT(x < sc->intr_cnt); 4568 rxr->rx_intr_vec = x; 4569 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4570 4571 cpuid = if_ringmap_cpumap(sc->rx_rmap_intr, i); 4572 4573 /* 4574 * Try finding TX ring to piggyback. 4575 */ 4576 for (j = 0; j < sc->tx_ring_msix; ++j) { 4577 if (cpuid == 4578 if_ringmap_cpumap(sc->tx_rmap_intr, j)) { 4579 txr = &sc->tx_rings[j]; 4580 KKASSERT(txr->tx_intr_cpuid < 0); 4581 break; 4582 } 4583 } 4584 rxr->rx_txr = txr; 4585 4586 intr = &sc->intr_data[x++]; 4587 intr->intr_serialize = &rxr->rx_serialize; 4588 intr->intr_cpuid = cpuid; 4589 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4590 intr->intr_funcarg = rxr; 4591 if (txr != NULL) { 4592 intr->intr_func = igb_msix_rxtx; 4593 intr->intr_use = IGB_INTR_USE_RXTX; 4594 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4595 "%s rx%dtx%d", device_get_nameunit(sc->dev), 4596 i, txr->me); 4597 4598 txr->tx_intr_vec = rxr->rx_intr_vec; 4599 txr->tx_intr_mask = rxr->rx_intr_mask; 4600 txr->tx_intr_cpuid = intr->intr_cpuid; 4601 } else { 4602 intr->intr_func = igb_msix_rx; 4603 intr->intr_rate = IGB_MSIX_RX_RATE; 4604 intr->intr_use = IGB_INTR_USE_RX; 4605 4606 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4607 "%s rx%d", device_get_nameunit(sc->dev), i); 4608 } 4609 intr->intr_desc = intr->intr_desc0; 4610 } 4611 4612 for (i = 0; i < sc->tx_ring_msix; ++i) { 4613 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4614 4615 if (txr->tx_intr_cpuid >= 0) { 4616 /* Piggybacked by RX ring. */ 4617 continue; 4618 } 4619 4620 KKASSERT(x < sc->intr_cnt); 4621 txr->tx_intr_vec = x; 4622 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4623 4624 intr = &sc->intr_data[x++]; 4625 intr->intr_serialize = &txr->tx_serialize; 4626 intr->intr_func = igb_msix_tx; 4627 intr->intr_funcarg = txr; 4628 intr->intr_rate = IGB_MSIX_TX_RATE; 4629 intr->intr_use = IGB_INTR_USE_TX; 4630 4631 intr->intr_cpuid = if_ringmap_cpumap(sc->tx_rmap_intr, i); 4632 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4633 txr->tx_intr_cpuid = intr->intr_cpuid; 4634 4635 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4636 device_get_nameunit(sc->dev), i); 4637 intr->intr_desc = intr->intr_desc0; 4638 } 4639 4640 /* 4641 * Link status 4642 */ 4643 KKASSERT(x < sc->intr_cnt); 4644 sc->sts_msix_vec = x; 4645 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4646 4647 intr = &sc->intr_data[x++]; 4648 intr->intr_serialize = &sc->main_serialize; 4649 intr->intr_func = igb_msix_status; 4650 intr->intr_funcarg = sc; 4651 intr->intr_cpuid = 0; 4652 intr->intr_use = IGB_INTR_USE_STATUS; 4653 4654 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4655 device_get_nameunit(sc->dev)); 4656 intr->intr_desc = intr->intr_desc0; 4657 4658 KKASSERT(x == sc->intr_cnt); 4659 4660 error = pci_setup_msix(sc->dev); 4661 if (error) { 4662 device_printf(sc->dev, "Setup MSI-X failed\n"); 4663 goto back; 4664 } 4665 setup = TRUE; 4666 4667 for (i = 0; i < sc->intr_cnt; ++i) { 4668 intr = &sc->intr_data[i]; 4669 4670 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4671 intr->intr_cpuid); 4672 if (error) { 4673 device_printf(sc->dev, 4674 "Unable to allocate MSI-X %d on cpu%d\n", i, 4675 intr->intr_cpuid); 4676 goto back; 4677 } 4678 4679 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4680 &intr->intr_rid, RF_ACTIVE); 4681 if (intr->intr_res == NULL) { 4682 device_printf(sc->dev, 4683 "Unable to allocate MSI-X %d resource\n", i); 4684 error = ENOMEM; 4685 goto back; 4686 } 4687 } 4688 4689 pci_enable_msix(sc->dev); 4690 sc->intr_type = PCI_INTR_TYPE_MSIX; 4691 back: 4692 if (error) 4693 igb_free_msix(sc, setup); 4694 } 4695 4696 static void 4697 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4698 { 4699 int i; 4700 4701 KKASSERT(sc->intr_cnt > 1); 4702 4703 for (i = 0; i < sc->intr_cnt; ++i) { 4704 struct igb_intr_data *intr = &sc->intr_data[i]; 4705 4706 if (intr->intr_res != NULL) { 4707 bus_release_resource(sc->dev, SYS_RES_IRQ, 4708 intr->intr_rid, intr->intr_res); 4709 } 4710 if (intr->intr_rid >= 0) 4711 pci_release_msix_vector(sc->dev, intr->intr_rid); 4712 } 4713 if (setup) 4714 pci_teardown_msix(sc->dev); 4715 4716 sc->intr_cnt = 0; 4717 kfree(sc->intr_data, M_DEVBUF); 4718 sc->intr_data = NULL; 4719 } 4720 4721 static void 4722 igb_msix_rx(void *arg) 4723 { 4724 struct igb_rx_ring *rxr = arg; 4725 4726 ASSERT_SERIALIZED(&rxr->rx_serialize); 4727 igb_rxeof(rxr, -1); 4728 4729 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4730 } 4731 4732 static void 4733 igb_msix_tx(void *arg) 4734 { 4735 struct igb_tx_ring *txr = arg; 4736 4737 ASSERT_SERIALIZED(&txr->tx_serialize); 4738 4739 igb_tx_intr(txr, *(txr->tx_hdr)); 4740 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4741 } 4742 4743 static void 4744 igb_msix_status(void *arg) 4745 { 4746 struct igb_softc *sc = arg; 4747 uint32_t icr; 4748 4749 ASSERT_SERIALIZED(&sc->main_serialize); 4750 4751 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4752 if (icr & E1000_ICR_LSC) { 4753 sc->hw.mac.get_link_status = 1; 4754 igb_update_link_status(sc); 4755 } 4756 4757 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4758 } 4759 4760 static void 4761 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4762 { 4763 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4764 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4765 if (bootverbose) { 4766 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4767 sc->rx_ring_inuse, sc->rx_ring_cnt, 4768 sc->tx_ring_inuse, sc->tx_ring_cnt); 4769 } 4770 } 4771 4772 static int 4773 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4774 { 4775 if (!IGB_ENABLE_HWRSS(sc)) 4776 return 1; 4777 4778 if (polling) 4779 return sc->rx_ring_cnt; 4780 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4781 return IGB_MIN_RING_RSS; 4782 else 4783 return sc->rx_ring_msix; 4784 } 4785 4786 static int 4787 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4788 { 4789 if (!IGB_ENABLE_HWTSS(sc)) 4790 return 1; 4791 4792 if (polling) 4793 return sc->tx_ring_cnt; 4794 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4795 return IGB_MIN_RING; 4796 else 4797 return sc->tx_ring_msix; 4798 } 4799 4800 static int 4801 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4802 { 4803 int hoff, iphlen, thoff; 4804 struct mbuf *m; 4805 4806 m = *mp; 4807 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4808 4809 iphlen = m->m_pkthdr.csum_iphlen; 4810 thoff = m->m_pkthdr.csum_thlen; 4811 hoff = m->m_pkthdr.csum_lhlen; 4812 4813 KASSERT(iphlen > 0, ("invalid ip hlen")); 4814 KASSERT(thoff > 0, ("invalid tcp hlen")); 4815 KASSERT(hoff > 0, ("invalid ether hlen")); 4816 4817 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4818 m = m_pullup(m, hoff + iphlen + thoff); 4819 if (m == NULL) { 4820 *mp = NULL; 4821 return ENOBUFS; 4822 } 4823 *mp = m; 4824 } 4825 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4826 struct ip *ip; 4827 4828 ip = mtodoff(m, struct ip *, hoff); 4829 ip->ip_len = 0; 4830 } 4831 4832 return 0; 4833 } 4834 4835 static void 4836 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4837 { 4838 struct e1000_adv_tx_context_desc *TXD; 4839 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4840 int hoff, ctxd, iphlen, thoff; 4841 4842 iphlen = m->m_pkthdr.csum_iphlen; 4843 thoff = m->m_pkthdr.csum_thlen; 4844 hoff = m->m_pkthdr.csum_lhlen; 4845 4846 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4847 4848 ctxd = txr->next_avail_desc; 4849 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4850 4851 if (m->m_flags & M_VLANTAG) { 4852 uint16_t vlantag; 4853 4854 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4855 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4856 } 4857 4858 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4859 vlan_macip_lens |= iphlen; 4860 4861 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4862 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4863 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4864 4865 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4866 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4867 4868 /* 4869 * 82575 needs the TX context index added; the queue 4870 * index is used as TX context index here. 4871 */ 4872 if (txr->sc->hw.mac.type == e1000_82575) 4873 mss_l4len_idx |= txr->me << 4; 4874 4875 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4876 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4877 TXD->seqnum_seed = htole32(0); 4878 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4879 4880 /* We've consumed the first desc, adjust counters */ 4881 if (++ctxd == txr->num_tx_desc) 4882 ctxd = 0; 4883 txr->next_avail_desc = ctxd; 4884 --txr->tx_avail; 4885 4886 *hlen = hoff + iphlen + thoff; 4887 } 4888 4889 static void 4890 igb_setup_serialize(struct igb_softc *sc) 4891 { 4892 int i = 0, j; 4893 4894 /* Main + RX + TX */ 4895 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4896 sc->serializes = 4897 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4898 M_DEVBUF, M_WAITOK | M_ZERO); 4899 4900 /* 4901 * Setup serializes 4902 * 4903 * NOTE: Order is critical 4904 */ 4905 4906 KKASSERT(i < sc->serialize_cnt); 4907 sc->serializes[i++] = &sc->main_serialize; 4908 4909 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4910 KKASSERT(i < sc->serialize_cnt); 4911 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4912 } 4913 4914 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4915 KKASSERT(i < sc->serialize_cnt); 4916 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4917 } 4918 4919 KKASSERT(i == sc->serialize_cnt); 4920 } 4921 4922 static void 4923 igb_msix_rxtx(void *arg) 4924 { 4925 struct igb_rx_ring *rxr = arg; 4926 struct igb_tx_ring *txr; 4927 int hdr; 4928 4929 ASSERT_SERIALIZED(&rxr->rx_serialize); 4930 4931 igb_rxeof(rxr, -1); 4932 4933 /* 4934 * NOTE: 4935 * Since next_to_clean is only changed by igb_txeof(), 4936 * which is called only in interrupt handler, the 4937 * check w/o holding tx serializer is MPSAFE. 4938 */ 4939 txr = rxr->rx_txr; 4940 hdr = *(txr->tx_hdr); 4941 if (hdr != txr->next_to_clean) { 4942 lwkt_serialize_enter(&txr->tx_serialize); 4943 igb_tx_intr(txr, hdr); 4944 lwkt_serialize_exit(&txr->tx_serialize); 4945 } 4946 4947 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4948 } 4949 4950 static void 4951 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4952 { 4953 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4954 sc->timer_cpuid = 0; /* XXX fixed */ 4955 else 4956 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4957 } 4958 4959 static void 4960 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4961 { 4962 struct e1000_hw *hw = &sc->hw; 4963 uint32_t reg; 4964 4965 if (hw->mac.type == e1000_i211) 4966 return; 4967 4968 if (hw->mac.type > e1000_82580) { 4969 uint32_t dmac; 4970 uint16_t hwm; 4971 4972 if (sc->dma_coalesce == 0) { /* Disabling it */ 4973 reg = ~E1000_DMACR_DMAC_EN; 4974 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4975 return; 4976 } else { 4977 if_printf(&sc->arpcom.ac_if, 4978 "DMA Coalescing enabled\n"); 4979 } 4980 4981 /* Set starting threshold */ 4982 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4983 4984 hwm = 64 * pba - sc->max_frame_size / 16; 4985 if (hwm < 64 * (pba - 6)) 4986 hwm = 64 * (pba - 6); 4987 reg = E1000_READ_REG(hw, E1000_FCRTC); 4988 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4989 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4990 & E1000_FCRTC_RTH_COAL_MASK); 4991 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4992 4993 dmac = pba - sc->max_frame_size / 512; 4994 if (dmac < pba - 10) 4995 dmac = pba - 10; 4996 reg = E1000_READ_REG(hw, E1000_DMACR); 4997 reg &= ~E1000_DMACR_DMACTHR_MASK; 4998 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4999 & E1000_DMACR_DMACTHR_MASK); 5000 5001 /* transition to L0x or L1 if available..*/ 5002 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 5003 5004 /* 5005 * Check if status is 2.5Gb backplane connection 5006 * before configuration of watchdog timer, which 5007 * is in msec values in 12.8usec intervals watchdog 5008 * timer = msec values in 32usec intervals for non 5009 * 2.5Gb connection. 5010 */ 5011 if (hw->mac.type == e1000_i354) { 5012 int status = E1000_READ_REG(hw, E1000_STATUS); 5013 5014 if ((status & E1000_STATUS_2P5_SKU) && 5015 !(status & E1000_STATUS_2P5_SKU_OVER)) 5016 reg |= ((sc->dma_coalesce * 5) >> 6); 5017 else 5018 reg |= (sc->dma_coalesce >> 5); 5019 } else { 5020 reg |= (sc->dma_coalesce >> 5); 5021 } 5022 5023 E1000_WRITE_REG(hw, E1000_DMACR, reg); 5024 5025 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 5026 5027 /* Set the interval before transition */ 5028 reg = E1000_READ_REG(hw, E1000_DMCTLX); 5029 if (hw->mac.type == e1000_i350) 5030 reg |= IGB_DMCTLX_DCFLUSH_DIS; 5031 /* 5032 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 5033 * is 0x4*2 = 0xA. But delay is still 4 usec. 5034 */ 5035 if (hw->mac.type == e1000_i354) { 5036 int status = E1000_READ_REG(hw, E1000_STATUS); 5037 5038 if ((status & E1000_STATUS_2P5_SKU) && 5039 !(status & E1000_STATUS_2P5_SKU_OVER)) 5040 reg |= 0xA; 5041 else 5042 reg |= 0x4; 5043 } else { 5044 reg |= 0x4; 5045 } 5046 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 5047 5048 /* Free space in tx packet buffer to wake from DMA coal */ 5049 E1000_WRITE_REG(hw, E1000_DMCTXTH, 5050 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 5051 5052 /* Make low power state decision controlled by DMA coal */ 5053 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 5054 reg &= ~E1000_PCIEMISC_LX_DECISION; 5055 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 5056 } else if (hw->mac.type == e1000_82580) { 5057 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 5058 E1000_WRITE_REG(hw, E1000_PCIEMISC, 5059 reg & ~E1000_PCIEMISC_LX_DECISION); 5060 E1000_WRITE_REG(hw, E1000_DMACR, 0); 5061 } 5062 } 5063 5064 static void 5065 igb_reg_dump(struct igb_softc *sc) 5066 { 5067 device_t dev = sc->dev; 5068 int col = 0; 5069 5070 #define DUMPREG(regno) \ 5071 kprintf(" %13s=%08x", #regno + 6, E1000_READ_REG(&sc->hw, regno));\ 5072 if (++col == 3) { \ 5073 kprintf("\n"); \ 5074 col = 0; \ 5075 } \ 5076 5077 device_printf(dev, "REGISTER DUMP\n"); 5078 DUMPREG(E1000_CTRL); 5079 DUMPREG(E1000_STATUS); 5080 DUMPREG(E1000_EECD); 5081 DUMPREG(E1000_EERD); 5082 DUMPREG(E1000_CTRL_EXT); 5083 DUMPREG(E1000_FLA); 5084 DUMPREG(E1000_MDIC); 5085 DUMPREG(E1000_SCTL); 5086 DUMPREG(E1000_FCAL); 5087 DUMPREG(E1000_FCAH); 5088 DUMPREG(E1000_FCT); 5089 DUMPREG(E1000_CONNSW); 5090 DUMPREG(E1000_VET); 5091 DUMPREG(E1000_ICR); 5092 DUMPREG(E1000_ITR); 5093 DUMPREG(E1000_IMS); 5094 DUMPREG(E1000_IVAR); 5095 DUMPREG(E1000_SVCR); 5096 DUMPREG(E1000_SVT); 5097 DUMPREG(E1000_LPIC); 5098 DUMPREG(E1000_RCTL); 5099 DUMPREG(E1000_FCTTV); 5100 DUMPREG(E1000_TXCW); 5101 DUMPREG(E1000_RXCW); 5102 DUMPREG(E1000_EIMS); 5103 DUMPREG(E1000_EIAC); 5104 DUMPREG(E1000_EIAM); 5105 DUMPREG(E1000_GPIE); 5106 DUMPREG(E1000_IVAR0); 5107 DUMPREG(E1000_IVAR_MISC); 5108 DUMPREG(E1000_TCTL); 5109 DUMPREG(E1000_TCTL_EXT); 5110 DUMPREG(E1000_TIPG); 5111 DUMPREG(E1000_TBT); 5112 DUMPREG(E1000_AIT); 5113 DUMPREG(E1000_LEDCTL); 5114 DUMPREG(E1000_EXTCNF_CTRL); 5115 DUMPREG(E1000_EXTCNF_SIZE); 5116 DUMPREG(E1000_PHY_CTRL); 5117 DUMPREG(E1000_PBA); 5118 DUMPREG(E1000_PBS); 5119 DUMPREG(E1000_PBECCSTS); 5120 DUMPREG(E1000_EEMNGCTL); 5121 DUMPREG(E1000_EEARBC); 5122 DUMPREG(E1000_FLASHT); 5123 DUMPREG(E1000_EEARBC_I210); 5124 DUMPREG(E1000_EEWR); 5125 DUMPREG(E1000_FLSWCTL); 5126 DUMPREG(E1000_FLSWDATA); 5127 DUMPREG(E1000_FLSWCNT); 5128 DUMPREG(E1000_FLOP); 5129 DUMPREG(E1000_I2CCMD); 5130 DUMPREG(E1000_I2CPARAMS); 5131 DUMPREG(E1000_WDSTP); 5132 DUMPREG(E1000_SWDSTS); 5133 DUMPREG(E1000_FRTIMER); 5134 DUMPREG(E1000_TCPTIMER); 5135 DUMPREG(E1000_VPDDIAG); 5136 DUMPREG(E1000_IMS_V2); 5137 DUMPREG(E1000_IAM_V2); 5138 DUMPREG(E1000_ERT); 5139 DUMPREG(E1000_FCRTL); 5140 DUMPREG(E1000_FCRTH); 5141 DUMPREG(E1000_PSRCTL); 5142 DUMPREG(E1000_RDFH); 5143 DUMPREG(E1000_RDFT); 5144 DUMPREG(E1000_RDFHS); 5145 DUMPREG(E1000_RDFTS); 5146 DUMPREG(E1000_RDFPC); 5147 DUMPREG(E1000_PBRTH); 5148 DUMPREG(E1000_FCRTV); 5149 DUMPREG(E1000_RDPUMB); 5150 DUMPREG(E1000_RDPUAD); 5151 DUMPREG(E1000_RDPUWD); 5152 DUMPREG(E1000_RDPURD); 5153 DUMPREG(E1000_RDPUCTL); 5154 DUMPREG(E1000_PBDIAG); 5155 DUMPREG(E1000_RXPBS); 5156 DUMPREG(E1000_IRPBS); 5157 DUMPREG(E1000_PBRWAC); 5158 DUMPREG(E1000_RDTR); 5159 DUMPREG(E1000_RADV); 5160 DUMPREG(E1000_SRWR); 5161 DUMPREG(E1000_I210_FLMNGCTL); 5162 DUMPREG(E1000_I210_FLMNGDATA); 5163 DUMPREG(E1000_I210_FLMNGCNT); 5164 DUMPREG(E1000_I210_FLSWCTL); 5165 DUMPREG(E1000_I210_FLSWDATA); 5166 DUMPREG(E1000_I210_FLSWCNT); 5167 DUMPREG(E1000_I210_FLA); 5168 DUMPREG(E1000_INVM_SIZE); 5169 DUMPREG(E1000_I210_TQAVCTRL); 5170 DUMPREG(E1000_RSRPD); 5171 DUMPREG(E1000_RAID); 5172 DUMPREG(E1000_TXDMAC); 5173 DUMPREG(E1000_KABGTXD); 5174 DUMPREG(E1000_PBSLAC); 5175 DUMPREG(E1000_TXPBS); 5176 DUMPREG(E1000_ITPBS); 5177 DUMPREG(E1000_TDFH); 5178 DUMPREG(E1000_TDFT); 5179 DUMPREG(E1000_TDFHS); 5180 DUMPREG(E1000_TDFTS); 5181 DUMPREG(E1000_TDFPC); 5182 DUMPREG(E1000_TDPUMB); 5183 DUMPREG(E1000_TDPUAD); 5184 DUMPREG(E1000_TDPUWD); 5185 DUMPREG(E1000_TDPURD); 5186 DUMPREG(E1000_TDPUCTL); 5187 DUMPREG(E1000_DTXCTL); 5188 DUMPREG(E1000_DTXTCPFLGL); 5189 DUMPREG(E1000_DTXTCPFLGH); 5190 DUMPREG(E1000_DTXMXSZRQ); 5191 DUMPREG(E1000_TIDV); 5192 DUMPREG(E1000_TADV); 5193 DUMPREG(E1000_TSPMT); 5194 DUMPREG(E1000_VFGPRC); 5195 DUMPREG(E1000_VFGORC); 5196 DUMPREG(E1000_VFMPRC); 5197 DUMPREG(E1000_VFGPTC); 5198 DUMPREG(E1000_VFGOTC); 5199 DUMPREG(E1000_VFGOTLBC); 5200 DUMPREG(E1000_VFGPTLBC); 5201 DUMPREG(E1000_VFGORLBC); 5202 DUMPREG(E1000_VFGPRLBC); 5203 DUMPREG(E1000_LSECTXCAP); 5204 DUMPREG(E1000_LSECRXCAP); 5205 DUMPREG(E1000_LSECTXCTRL); 5206 DUMPREG(E1000_LSECRXCTRL); 5207 DUMPREG(E1000_LSECTXSCL); 5208 DUMPREG(E1000_LSECTXSCH); 5209 DUMPREG(E1000_LSECTXSA); 5210 DUMPREG(E1000_LSECTXPN0); 5211 DUMPREG(E1000_LSECTXPN1); 5212 DUMPREG(E1000_LSECRXSCL); 5213 DUMPREG(E1000_LSECRXSCH); 5214 DUMPREG(E1000_IPSCTRL); 5215 DUMPREG(E1000_IPSRXCMD); 5216 DUMPREG(E1000_IPSRXIDX); 5217 DUMPREG(E1000_IPSRXSALT); 5218 DUMPREG(E1000_IPSRXSPI); 5219 DUMPREG(E1000_IPSTXSALT); 5220 DUMPREG(E1000_IPSTXIDX); 5221 DUMPREG(E1000_PCS_CFG0); 5222 DUMPREG(E1000_PCS_LCTL); 5223 DUMPREG(E1000_PCS_LSTAT); 5224 DUMPREG(E1000_PCS_ANADV); 5225 DUMPREG(E1000_PCS_LPAB); 5226 DUMPREG(E1000_PCS_NPTX); 5227 DUMPREG(E1000_PCS_LPABNP); 5228 DUMPREG(E1000_RXCSUM); 5229 DUMPREG(E1000_RLPML); 5230 DUMPREG(E1000_RFCTL); 5231 DUMPREG(E1000_MTA); 5232 DUMPREG(E1000_RA); 5233 DUMPREG(E1000_RA2); 5234 DUMPREG(E1000_VFTA); 5235 DUMPREG(E1000_VT_CTL); 5236 DUMPREG(E1000_CIAA); 5237 DUMPREG(E1000_CIAD); 5238 DUMPREG(E1000_VFQA0); 5239 DUMPREG(E1000_VFQA1); 5240 DUMPREG(E1000_WUC); 5241 DUMPREG(E1000_WUFC); 5242 DUMPREG(E1000_WUS); 5243 DUMPREG(E1000_MANC); 5244 DUMPREG(E1000_IPAV); 5245 DUMPREG(E1000_IP4AT); 5246 DUMPREG(E1000_IP6AT); 5247 DUMPREG(E1000_WUPL); 5248 DUMPREG(E1000_WUPM); 5249 DUMPREG(E1000_PBACL); 5250 DUMPREG(E1000_FFLT); 5251 DUMPREG(E1000_HOST_IF); 5252 DUMPREG(E1000_HIBBA); 5253 DUMPREG(E1000_KMRNCTRLSTA); 5254 DUMPREG(E1000_MANC2H); 5255 DUMPREG(E1000_CCMCTL); 5256 DUMPREG(E1000_GIOCTL); 5257 DUMPREG(E1000_SCCTL); 5258 5259 #define E1000_WCS 0x558C 5260 DUMPREG(E1000_WCS); 5261 #define E1000_GCR_EXT 0x586C 5262 DUMPREG(E1000_GCR_EXT); 5263 DUMPREG(E1000_GCR); 5264 DUMPREG(E1000_GCR2); 5265 DUMPREG(E1000_FACTPS); 5266 DUMPREG(E1000_DCA_ID); 5267 DUMPREG(E1000_DCA_CTRL); 5268 DUMPREG(E1000_UFUSE); 5269 DUMPREG(E1000_FFLT_DBG); 5270 DUMPREG(E1000_HICR); 5271 DUMPREG(E1000_FWSTS); 5272 DUMPREG(E1000_CPUVEC); 5273 DUMPREG(E1000_MRQC); 5274 DUMPREG(E1000_SWPBS); 5275 DUMPREG(E1000_MBVFICR); 5276 DUMPREG(E1000_MBVFIMR); 5277 DUMPREG(E1000_VFLRE); 5278 DUMPREG(E1000_VFRE); 5279 DUMPREG(E1000_VFTE); 5280 DUMPREG(E1000_QDE); 5281 DUMPREG(E1000_DTXSWC); 5282 DUMPREG(E1000_WVBR); 5283 DUMPREG(E1000_RPLOLR); 5284 DUMPREG(E1000_UTA); 5285 DUMPREG(E1000_IOVTCL); 5286 DUMPREG(E1000_VMRCTL); 5287 DUMPREG(E1000_VMRVLAN); 5288 DUMPREG(E1000_VMRVM); 5289 DUMPREG(E1000_LVMMC); 5290 DUMPREG(E1000_TXSWC); 5291 DUMPREG(E1000_SCCRL); 5292 DUMPREG(E1000_BSCTRH); 5293 DUMPREG(E1000_MSCTRH); 5294 DUMPREG(E1000_RXSTMPL); 5295 DUMPREG(E1000_RXSTMPH); 5296 DUMPREG(E1000_RXSATRL); 5297 DUMPREG(E1000_RXSATRH); 5298 DUMPREG(E1000_TXSTMPL); 5299 DUMPREG(E1000_TXSTMPH); 5300 DUMPREG(E1000_TIMINCA); 5301 DUMPREG(E1000_TIMADJL); 5302 DUMPREG(E1000_TIMADJH); 5303 DUMPREG(E1000_TSAUXC); 5304 DUMPREG(E1000_SYSSTMPL); 5305 DUMPREG(E1000_SYSSTMPH); 5306 DUMPREG(E1000_PLTSTMPL); 5307 DUMPREG(E1000_PLTSTMPH); 5308 DUMPREG(E1000_RXMTRL); 5309 DUMPREG(E1000_RXUDP); 5310 DUMPREG(E1000_SYSTIMR); 5311 DUMPREG(E1000_TSICR); 5312 DUMPREG(E1000_TSIM); 5313 DUMPREG(E1000_DMACR); 5314 DUMPREG(E1000_DMCTXTH); 5315 DUMPREG(E1000_DMCTLX); 5316 DUMPREG(E1000_DMCRTRH); 5317 DUMPREG(E1000_DMCCNT); 5318 DUMPREG(E1000_FCRTC); 5319 DUMPREG(E1000_PCIEMISC); 5320 DUMPREG(E1000_PCIEERRSTS); 5321 DUMPREG(E1000_IPCNFG); 5322 DUMPREG(E1000_LTRC); 5323 DUMPREG(E1000_EEER); 5324 DUMPREG(E1000_EEE_SU); 5325 DUMPREG(E1000_TLPIC); 5326 DUMPREG(E1000_RLPIC); 5327 if (++col != 1) 5328 kprintf("\n"); 5329 kprintf("\n"); 5330 } 5331 5332 static int 5333 igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS) 5334 { 5335 struct igb_softc *sc = (void *)arg1; 5336 struct ifnet *ifp = &sc->arpcom.ac_if; 5337 int error, dump = 0; 5338 5339 error = sysctl_handle_int(oidp, &dump, 0, req); 5340 if (error || req->newptr == NULL) 5341 return error; 5342 if (dump <= 0) 5343 return EINVAL; 5344 5345 ifnet_serialize_all(ifp); 5346 igb_reg_dump(sc); 5347 ifnet_deserialize_all(ifp); 5348 5349 return error; 5350 } 5351