1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/if_ringmap.h> 59 #include <net/toeplitz.h> 60 #include <net/toeplitz2.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 #include <net/if_poll.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/ig_hal/e1000_dragonfly.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_COPPER_FLASHLESS), 128 IGB_DEVICE(I210_SERDES_FLASHLESS), 129 IGB_DEVICE(I210_FIBER), 130 IGB_DEVICE(I210_SERDES), 131 IGB_DEVICE(I210_SGMII), 132 IGB_DEVICE(I211_COPPER), 133 IGB_DEVICE(I354_BACKPLANE_1GBPS), 134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 135 IGB_DEVICE(I354_SGMII), 136 137 /* required last entry */ 138 IGB_DEVICE_NULL 139 }; 140 141 static int igb_probe(device_t); 142 static int igb_attach(device_t); 143 static int igb_detach(device_t); 144 static int igb_shutdown(device_t); 145 static int igb_suspend(device_t); 146 static int igb_resume(device_t); 147 148 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 149 static void igb_setup_ifp(struct igb_softc *); 150 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 151 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 152 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 153 static void igb_add_sysctl(struct igb_softc *); 154 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 155 const char *, const char *); 156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 160 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 161 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 162 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 163 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 164 165 static void igb_vf_init_stats(struct igb_softc *); 166 static void igb_reset(struct igb_softc *, boolean_t); 167 static void igb_update_stats_counters(struct igb_softc *); 168 static void igb_update_vf_stats_counters(struct igb_softc *); 169 static void igb_update_link_status(struct igb_softc *); 170 static void igb_init_tx_unit(struct igb_softc *); 171 static void igb_init_rx_unit(struct igb_softc *, boolean_t); 172 static void igb_init_dmac(struct igb_softc *, uint32_t); 173 static void igb_reg_dump(struct igb_softc *); 174 static int igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS); 175 176 static void igb_set_vlan(struct igb_softc *); 177 static void igb_set_multi(struct igb_softc *); 178 static void igb_set_promisc(struct igb_softc *); 179 static void igb_disable_promisc(struct igb_softc *); 180 181 static int igb_get_ring_max(const struct igb_softc *); 182 static void igb_get_rxring_cnt(const struct igb_softc *, int *, int *); 183 static void igb_get_txring_cnt(const struct igb_softc *, int *, int *); 184 static int igb_alloc_rings(struct igb_softc *); 185 static void igb_free_rings(struct igb_softc *); 186 static int igb_create_tx_ring(struct igb_tx_ring *); 187 static int igb_create_rx_ring(struct igb_rx_ring *); 188 static void igb_free_tx_ring(struct igb_tx_ring *); 189 static void igb_free_rx_ring(struct igb_rx_ring *); 190 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 191 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 192 static void igb_init_tx_ring(struct igb_tx_ring *); 193 static int igb_init_rx_ring(struct igb_rx_ring *); 194 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 195 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 196 static void igb_rx_refresh(struct igb_rx_ring *, int); 197 static void igb_setup_serialize(struct igb_softc *); 198 199 static void igb_stop(struct igb_softc *); 200 static void igb_init(void *); 201 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 202 static void igb_media_status(struct ifnet *, struct ifmediareq *); 203 static int igb_media_change(struct ifnet *); 204 static void igb_timer(void *); 205 static void igb_watchdog(struct ifaltq_subque *); 206 static void igb_start(struct ifnet *, struct ifaltq_subque *); 207 #ifdef IFPOLL_ENABLE 208 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 209 static void igb_npoll_rx(struct ifnet *, void *, int); 210 static void igb_npoll_tx(struct ifnet *, void *, int); 211 static void igb_npoll_status(struct ifnet *); 212 #endif 213 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 214 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 215 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 216 #ifdef INVARIANTS 217 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 218 boolean_t); 219 #endif 220 221 static void igb_intr(void *); 222 static void igb_intr_shared(void *); 223 static void igb_rxeof(struct igb_rx_ring *, int); 224 static void igb_txeof(struct igb_tx_ring *, int); 225 static void igb_txgc(struct igb_tx_ring *); 226 static void igb_txgc_timer(void *); 227 static void igb_set_eitr(struct igb_softc *, int, int); 228 static void igb_enable_intr(struct igb_softc *); 229 static void igb_disable_intr(struct igb_softc *); 230 static void igb_init_unshared_intr(struct igb_softc *); 231 static void igb_init_intr(struct igb_softc *); 232 static int igb_setup_intr(struct igb_softc *); 233 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 234 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 235 static void igb_set_intr_mask(struct igb_softc *); 236 static int igb_alloc_intr(struct igb_softc *); 237 static void igb_free_intr(struct igb_softc *); 238 static void igb_teardown_intr(struct igb_softc *, int); 239 static void igb_alloc_msix(struct igb_softc *); 240 static void igb_free_msix(struct igb_softc *, boolean_t); 241 static void igb_msix_rx(void *); 242 static void igb_msix_tx(void *); 243 static void igb_msix_status(void *); 244 static void igb_msix_rxtx(void *); 245 246 /* Management and WOL Support */ 247 static void igb_get_mgmt(struct igb_softc *); 248 static void igb_rel_mgmt(struct igb_softc *); 249 static void igb_get_hw_control(struct igb_softc *); 250 static void igb_rel_hw_control(struct igb_softc *); 251 static void igb_enable_wol(struct igb_softc *); 252 static int igb_enable_phy_wol(struct igb_softc *); 253 254 static device_method_t igb_methods[] = { 255 /* Device interface */ 256 DEVMETHOD(device_probe, igb_probe), 257 DEVMETHOD(device_attach, igb_attach), 258 DEVMETHOD(device_detach, igb_detach), 259 DEVMETHOD(device_shutdown, igb_shutdown), 260 DEVMETHOD(device_suspend, igb_suspend), 261 DEVMETHOD(device_resume, igb_resume), 262 DEVMETHOD_END 263 }; 264 265 static driver_t igb_driver = { 266 "igb", 267 igb_methods, 268 sizeof(struct igb_softc), 269 }; 270 271 static devclass_t igb_devclass; 272 273 DECLARE_DUMMY_MODULE(if_igb); 274 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 275 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 276 277 static int igb_rxd = IGB_DEFAULT_RXD; 278 static int igb_txd = IGB_DEFAULT_TXD; 279 static int igb_rxr = 0; 280 static int igb_txr = 0; 281 static int igb_msi_enable = 1; 282 static int igb_msix_enable = 1; 283 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 284 285 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 286 287 /* 288 * DMA Coalescing, only for i350 - default to off, 289 * this feature is for power savings 290 */ 291 static int igb_dma_coalesce = 0; 292 293 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 294 TUNABLE_INT("hw.igb.txd", &igb_txd); 295 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 296 TUNABLE_INT("hw.igb.txr", &igb_txr); 297 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 298 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 299 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 300 301 /* i350 specific */ 302 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 303 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 304 305 static __inline void 306 igb_tx_intr(struct igb_tx_ring *txr, int hdr) 307 { 308 309 igb_txeof(txr, hdr); 310 if (!ifsq_is_empty(txr->ifsq)) 311 ifsq_devstart(txr->ifsq); 312 } 313 314 static __inline void 315 igb_try_txgc(struct igb_tx_ring *txr, int16_t dec) 316 { 317 318 if (txr->tx_running > 0) { 319 txr->tx_running -= dec; 320 if (txr->tx_running <= 0 && txr->tx_nmbuf && 321 txr->tx_avail < txr->num_tx_desc && 322 txr->tx_avail + txr->intr_nsegs > txr->num_tx_desc) 323 igb_txgc(txr); 324 } 325 } 326 327 static void 328 igb_txgc_timer(void *xtxr) 329 { 330 struct igb_tx_ring *txr = xtxr; 331 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 332 333 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 334 (IFF_RUNNING | IFF_UP)) 335 return; 336 337 if (!lwkt_serialize_try(&txr->tx_serialize)) 338 goto done; 339 340 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 341 (IFF_RUNNING | IFF_UP)) { 342 lwkt_serialize_exit(&txr->tx_serialize); 343 return; 344 } 345 igb_try_txgc(txr, IGB_TX_RUNNING_DEC); 346 347 lwkt_serialize_exit(&txr->tx_serialize); 348 done: 349 callout_reset(&txr->tx_gc_timer, 1, igb_txgc_timer, txr); 350 } 351 352 static __inline void 353 igb_free_txbuf(struct igb_tx_ring *txr, struct igb_tx_buf *txbuf) 354 { 355 356 KKASSERT(txbuf->m_head != NULL); 357 KKASSERT(txr->tx_nmbuf > 0); 358 txr->tx_nmbuf--; 359 360 bus_dmamap_unload(txr->tx_tag, txbuf->map); 361 m_freem(txbuf->m_head); 362 txbuf->m_head = NULL; 363 } 364 365 static __inline void 366 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 367 { 368 /* Ignore Checksum bit is set */ 369 if (staterr & E1000_RXD_STAT_IXSM) 370 return; 371 372 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 373 E1000_RXD_STAT_IPCS) 374 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 375 376 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 377 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 378 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 379 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 380 mp->m_pkthdr.csum_data = htons(0xffff); 381 } 382 } 383 } 384 385 static __inline struct pktinfo * 386 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 387 uint32_t hash, uint32_t hashtype, uint32_t staterr) 388 { 389 switch (hashtype) { 390 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 391 pi->pi_netisr = NETISR_IP; 392 pi->pi_flags = 0; 393 pi->pi_l3proto = IPPROTO_TCP; 394 break; 395 396 case E1000_RXDADV_RSSTYPE_IPV4: 397 if (staterr & E1000_RXD_STAT_IXSM) 398 return NULL; 399 400 if ((staterr & 401 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 402 E1000_RXD_STAT_TCPCS) { 403 pi->pi_netisr = NETISR_IP; 404 pi->pi_flags = 0; 405 pi->pi_l3proto = IPPROTO_UDP; 406 break; 407 } 408 /* FALL THROUGH */ 409 default: 410 return NULL; 411 } 412 413 m_sethash(m, toeplitz_hash(hash)); 414 return pi; 415 } 416 417 static int 418 igb_get_ring_max(const struct igb_softc *sc) 419 { 420 421 switch (sc->hw.mac.type) { 422 case e1000_82575: 423 return (IGB_MAX_RING_82575); 424 425 case e1000_82576: 426 return (IGB_MAX_RING_82576); 427 428 case e1000_82580: 429 return (IGB_MAX_RING_82580); 430 431 case e1000_i350: 432 return (IGB_MAX_RING_I350); 433 434 case e1000_i354: 435 return (IGB_MAX_RING_I354); 436 437 case e1000_i210: 438 return (IGB_MAX_RING_I210); 439 440 case e1000_i211: 441 return (IGB_MAX_RING_I211); 442 443 default: 444 return (IGB_MIN_RING); 445 } 446 } 447 448 static void 449 igb_get_rxring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 450 { 451 452 *ring_max = igb_get_ring_max(sc); 453 *ring_cnt = device_getenv_int(sc->dev, "rxr", igb_rxr); 454 } 455 456 static void 457 igb_get_txring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 458 { 459 460 *ring_max = igb_get_ring_max(sc); 461 *ring_cnt = device_getenv_int(sc->dev, "txr", igb_txr); 462 } 463 464 static int 465 igb_probe(device_t dev) 466 { 467 const struct igb_device *d; 468 uint16_t vid, did; 469 470 vid = pci_get_vendor(dev); 471 did = pci_get_device(dev); 472 473 for (d = igb_devices; d->desc != NULL; ++d) { 474 if (vid == d->vid && did == d->did) { 475 device_set_desc(dev, d->desc); 476 return 0; 477 } 478 } 479 return ENXIO; 480 } 481 482 static int 483 igb_attach(device_t dev) 484 { 485 struct igb_softc *sc = device_get_softc(dev); 486 uint16_t eeprom_data; 487 int error = 0, ring_max, ring_cnt; 488 char flowctrl[IFM_ETH_FC_STRLEN]; 489 490 #ifdef notyet 491 /* SYSCTL stuff */ 492 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 493 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 494 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 495 igb_sysctl_nvm_info, "I", "NVM Information"); 496 #endif 497 498 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 499 igb_media_change, igb_media_status); 500 callout_init_mp(&sc->timer); 501 lwkt_serialize_init(&sc->main_serialize); 502 503 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 504 device_get_unit(dev)); 505 sc->dev = sc->osdep.dev = dev; 506 507 /* Enable bus mastering */ 508 pci_enable_busmaster(dev); 509 510 /* 511 * Determine hardware and mac type 512 */ 513 sc->hw.vendor_id = pci_get_vendor(dev); 514 sc->hw.device_id = pci_get_device(dev); 515 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 516 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 517 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 518 519 if (e1000_set_mac_type(&sc->hw)) 520 return ENXIO; 521 522 /* Are we a VF device? */ 523 if (sc->hw.mac.type == e1000_vfadapt || 524 sc->hw.mac.type == e1000_vfadapt_i350) 525 sc->vf_ifp = 1; 526 else 527 sc->vf_ifp = 0; 528 529 /* 530 * Configure total supported RX/TX ring count 531 */ 532 igb_get_rxring_cnt(sc, &ring_cnt, &ring_max); 533 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 534 igb_get_txring_cnt(sc, &ring_cnt, &ring_max); 535 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 536 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 537 538 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 539 sc->rx_ring_inuse = sc->rx_ring_cnt; 540 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 541 sc->tx_ring_inuse = sc->tx_ring_cnt; 542 543 /* Setup flow control. */ 544 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 545 igb_flowctrl); 546 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 547 548 /* 549 * Allocate IO memory 550 */ 551 sc->mem_rid = PCIR_BAR(0); 552 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 553 RF_ACTIVE); 554 if (sc->mem_res == NULL) { 555 device_printf(dev, "Unable to allocate bus resource: memory\n"); 556 error = ENXIO; 557 goto failed; 558 } 559 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 560 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 561 562 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 563 564 /* Save PCI command register for Shared Code */ 565 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 566 sc->hw.back = &sc->osdep; 567 568 /* Do Shared Code initialization */ 569 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 570 device_printf(dev, "Setup of Shared code failed\n"); 571 error = ENXIO; 572 goto failed; 573 } 574 575 e1000_get_bus_info(&sc->hw); 576 577 sc->hw.mac.autoneg = DO_AUTO_NEG; 578 sc->hw.phy.autoneg_wait_to_complete = FALSE; 579 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 580 581 /* Copper options */ 582 if (sc->hw.phy.media_type == e1000_media_type_copper) { 583 sc->hw.phy.mdix = AUTO_ALL_MODES; 584 sc->hw.phy.disable_polarity_correction = FALSE; 585 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 586 } 587 588 /* Set the frame limits assuming standard ethernet sized frames. */ 589 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 590 591 /* Allocate RX/TX rings */ 592 error = igb_alloc_rings(sc); 593 if (error) 594 goto failed; 595 596 /* Allocate interrupt */ 597 error = igb_alloc_intr(sc); 598 if (error) 599 goto failed; 600 601 /* Setup serializes */ 602 igb_setup_serialize(sc); 603 604 /* Allocate the appropriate stats memory */ 605 if (sc->vf_ifp) { 606 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 607 M_WAITOK | M_ZERO); 608 igb_vf_init_stats(sc); 609 } else { 610 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 611 M_WAITOK | M_ZERO); 612 } 613 614 /* Allocate multicast array memory. */ 615 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 616 M_DEVBUF, M_WAITOK); 617 618 /* Some adapter-specific advanced features */ 619 if (sc->hw.mac.type >= e1000_i350) { 620 #ifdef notyet 621 igb_set_sysctl_value(adapter, "dma_coalesce", 622 "configure dma coalesce", 623 &adapter->dma_coalesce, igb_dma_coalesce); 624 igb_set_sysctl_value(adapter, "eee_disabled", 625 "enable Energy Efficient Ethernet", 626 &adapter->hw.dev_spec._82575.eee_disable, 627 igb_eee_disabled); 628 #else 629 sc->dma_coalesce = igb_dma_coalesce; 630 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 631 #endif 632 if (sc->hw.phy.media_type == e1000_media_type_copper) { 633 if (sc->hw.mac.type == e1000_i354) 634 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 635 else 636 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 637 } 638 } 639 640 /* 641 * Start from a known state, this is important in reading the nvm and 642 * mac from that. 643 */ 644 e1000_reset_hw(&sc->hw); 645 646 /* Make sure we have a good EEPROM before we read from it */ 647 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 648 e1000_validate_nvm_checksum(&sc->hw) < 0) { 649 /* 650 * Some PCI-E parts fail the first check due to 651 * the link being in sleep state, call it again, 652 * if it fails a second time its a real issue. 653 */ 654 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 655 device_printf(dev, 656 "The EEPROM Checksum Is Not Valid\n"); 657 error = EIO; 658 goto failed; 659 } 660 } 661 662 /* Copy the permanent MAC address out of the EEPROM */ 663 if (e1000_read_mac_addr(&sc->hw) < 0) { 664 device_printf(dev, "EEPROM read error while reading MAC" 665 " address\n"); 666 error = EIO; 667 goto failed; 668 } 669 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 670 device_printf(dev, "Invalid MAC address\n"); 671 error = EIO; 672 goto failed; 673 } 674 675 /* Setup OS specific network interface */ 676 igb_setup_ifp(sc); 677 678 /* Add sysctl tree, must after igb_setup_ifp() */ 679 igb_add_sysctl(sc); 680 681 /* Now get a good starting state */ 682 igb_reset(sc, FALSE); 683 684 /* Initialize statistics */ 685 igb_update_stats_counters(sc); 686 687 sc->hw.mac.get_link_status = 1; 688 igb_update_link_status(sc); 689 690 /* Indicate SOL/IDER usage */ 691 if (e1000_check_reset_block(&sc->hw)) { 692 device_printf(dev, 693 "PHY reset is blocked due to SOL/IDER session.\n"); 694 } 695 696 /* Determine if we have to control management hardware */ 697 if (e1000_enable_mng_pass_thru(&sc->hw)) 698 sc->flags |= IGB_FLAG_HAS_MGMT; 699 700 /* 701 * Setup Wake-on-Lan 702 */ 703 /* APME bit in EEPROM is mapped to WUC.APME */ 704 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 705 if (eeprom_data) { 706 /* XXX E1000_WUFC_MC always be cleared from E1000_WUC. */ 707 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 708 device_printf(dev, "has WOL\n"); 709 } 710 711 #ifdef notyet 712 /* Register for VLAN events */ 713 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 714 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 715 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 716 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 717 #endif 718 719 #ifdef notyet 720 igb_add_hw_stats(adapter); 721 #endif 722 723 /* 724 * Disable interrupt to prevent spurious interrupts (line based 725 * interrupt, MSI or even MSI-X), which had been observed on 726 * several types of LOMs, from being handled. 727 */ 728 igb_disable_intr(sc); 729 730 error = igb_setup_intr(sc); 731 if (error) { 732 ether_ifdetach(&sc->arpcom.ac_if); 733 goto failed; 734 } 735 return 0; 736 737 failed: 738 igb_detach(dev); 739 return error; 740 } 741 742 static int 743 igb_detach(device_t dev) 744 { 745 struct igb_softc *sc = device_get_softc(dev); 746 747 if (device_is_attached(dev)) { 748 struct ifnet *ifp = &sc->arpcom.ac_if; 749 750 ifnet_serialize_all(ifp); 751 752 igb_stop(sc); 753 754 e1000_phy_hw_reset(&sc->hw); 755 756 /* Give control back to firmware */ 757 igb_rel_mgmt(sc); 758 igb_rel_hw_control(sc); 759 igb_enable_wol(sc); 760 761 igb_teardown_intr(sc, sc->intr_cnt); 762 763 ifnet_deserialize_all(ifp); 764 765 ether_ifdetach(ifp); 766 } else if (sc->mem_res != NULL) { 767 igb_rel_hw_control(sc); 768 } 769 770 ifmedia_removeall(&sc->media); 771 bus_generic_detach(dev); 772 773 igb_free_intr(sc); 774 775 if (sc->msix_mem_res != NULL) { 776 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 777 sc->msix_mem_res); 778 } 779 if (sc->mem_res != NULL) { 780 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 781 sc->mem_res); 782 } 783 784 igb_free_rings(sc); 785 786 if (sc->mta != NULL) 787 kfree(sc->mta, M_DEVBUF); 788 if (sc->stats != NULL) 789 kfree(sc->stats, M_DEVBUF); 790 if (sc->serializes != NULL) 791 kfree(sc->serializes, M_DEVBUF); 792 if (sc->rx_rmap != NULL) 793 if_ringmap_free(sc->rx_rmap); 794 if (sc->rx_rmap_intr != NULL) 795 if_ringmap_free(sc->rx_rmap_intr); 796 if (sc->tx_rmap != NULL) 797 if_ringmap_free(sc->tx_rmap); 798 if (sc->tx_rmap_intr != NULL) 799 if_ringmap_free(sc->tx_rmap_intr); 800 801 return 0; 802 } 803 804 static int 805 igb_shutdown(device_t dev) 806 { 807 return igb_suspend(dev); 808 } 809 810 static int 811 igb_suspend(device_t dev) 812 { 813 struct igb_softc *sc = device_get_softc(dev); 814 struct ifnet *ifp = &sc->arpcom.ac_if; 815 816 ifnet_serialize_all(ifp); 817 818 igb_stop(sc); 819 820 igb_rel_mgmt(sc); 821 igb_rel_hw_control(sc); 822 igb_enable_wol(sc); 823 824 ifnet_deserialize_all(ifp); 825 826 return bus_generic_suspend(dev); 827 } 828 829 static int 830 igb_resume(device_t dev) 831 { 832 struct igb_softc *sc = device_get_softc(dev); 833 struct ifnet *ifp = &sc->arpcom.ac_if; 834 int i; 835 836 ifnet_serialize_all(ifp); 837 838 igb_init(sc); 839 igb_get_mgmt(sc); 840 841 for (i = 0; i < sc->tx_ring_inuse; ++i) 842 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 843 844 ifnet_deserialize_all(ifp); 845 846 return bus_generic_resume(dev); 847 } 848 849 static int 850 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 851 { 852 struct igb_softc *sc = ifp->if_softc; 853 struct ifreq *ifr = (struct ifreq *)data; 854 int max_frame_size, mask, reinit; 855 int error = 0; 856 857 ASSERT_IFNET_SERIALIZED_ALL(ifp); 858 859 switch (command) { 860 case SIOCSIFMTU: 861 max_frame_size = 9234; 862 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 863 ETHER_CRC_LEN) { 864 error = EINVAL; 865 break; 866 } 867 868 ifp->if_mtu = ifr->ifr_mtu; 869 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 870 ETHER_CRC_LEN; 871 872 if (ifp->if_flags & IFF_RUNNING) 873 igb_init(sc); 874 break; 875 876 case SIOCSIFFLAGS: 877 if (ifp->if_flags & IFF_UP) { 878 if (ifp->if_flags & IFF_RUNNING) { 879 if ((ifp->if_flags ^ sc->if_flags) & 880 (IFF_PROMISC | IFF_ALLMULTI)) { 881 igb_disable_promisc(sc); 882 igb_set_promisc(sc); 883 } 884 } else { 885 igb_init(sc); 886 } 887 } else if (ifp->if_flags & IFF_RUNNING) { 888 igb_stop(sc); 889 } 890 sc->if_flags = ifp->if_flags; 891 break; 892 893 case SIOCADDMULTI: 894 case SIOCDELMULTI: 895 if (ifp->if_flags & IFF_RUNNING) { 896 igb_disable_intr(sc); 897 igb_set_multi(sc); 898 #ifdef IFPOLL_ENABLE 899 if (!(ifp->if_flags & IFF_NPOLLING)) 900 #endif 901 igb_enable_intr(sc); 902 } 903 break; 904 905 case SIOCSIFMEDIA: 906 /* Check SOL/IDER usage */ 907 if (e1000_check_reset_block(&sc->hw)) { 908 if_printf(ifp, "Media change is " 909 "blocked due to SOL/IDER session.\n"); 910 break; 911 } 912 /* FALL THROUGH */ 913 914 case SIOCGIFMEDIA: 915 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 916 break; 917 918 case SIOCSIFCAP: 919 reinit = 0; 920 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 921 if (mask & IFCAP_RXCSUM) { 922 ifp->if_capenable ^= IFCAP_RXCSUM; 923 reinit = 1; 924 } 925 if (mask & IFCAP_VLAN_HWTAGGING) { 926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 927 reinit = 1; 928 } 929 if (mask & IFCAP_TXCSUM) { 930 ifp->if_capenable ^= IFCAP_TXCSUM; 931 if (ifp->if_capenable & IFCAP_TXCSUM) 932 ifp->if_hwassist |= IGB_CSUM_FEATURES; 933 else 934 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 935 } 936 if (mask & IFCAP_TSO) { 937 ifp->if_capenable ^= IFCAP_TSO; 938 if (ifp->if_capenable & IFCAP_TSO) 939 ifp->if_hwassist |= CSUM_TSO; 940 else 941 ifp->if_hwassist &= ~CSUM_TSO; 942 } 943 if (mask & IFCAP_RSS) 944 ifp->if_capenable ^= IFCAP_RSS; 945 if (reinit && (ifp->if_flags & IFF_RUNNING)) 946 igb_init(sc); 947 break; 948 949 default: 950 error = ether_ioctl(ifp, command, data); 951 break; 952 } 953 return error; 954 } 955 956 static void 957 igb_init(void *xsc) 958 { 959 struct igb_softc *sc = xsc; 960 struct ifnet *ifp = &sc->arpcom.ac_if; 961 boolean_t polling; 962 int i; 963 964 ASSERT_IFNET_SERIALIZED_ALL(ifp); 965 966 igb_stop(sc); 967 968 /* Get the latest mac address, User can use a LAA */ 969 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 970 971 /* Put the address into the Receive Address Array */ 972 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 973 974 igb_reset(sc, FALSE); 975 igb_update_link_status(sc); 976 977 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 978 979 /* Clear bad data from Rx FIFOs */ 980 e1000_rx_fifo_flush_82575(&sc->hw); 981 982 /* Configure for OS presence */ 983 igb_get_mgmt(sc); 984 985 polling = FALSE; 986 #ifdef IFPOLL_ENABLE 987 if (ifp->if_flags & IFF_NPOLLING) 988 polling = TRUE; 989 #endif 990 991 /* Configured used RX/TX rings */ 992 igb_set_ring_inuse(sc, polling); 993 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 994 995 /* Initialize interrupt */ 996 igb_init_intr(sc); 997 998 /* Prepare transmit descriptors and buffers */ 999 for (i = 0; i < sc->tx_ring_inuse; ++i) 1000 igb_init_tx_ring(&sc->tx_rings[i]); 1001 igb_init_tx_unit(sc); 1002 1003 /* Setup Multicast table */ 1004 igb_set_multi(sc); 1005 1006 #if 0 1007 /* 1008 * Figure out the desired mbuf pool 1009 * for doing jumbo/packetsplit 1010 */ 1011 if (adapter->max_frame_size <= 2048) 1012 adapter->rx_mbuf_sz = MCLBYTES; 1013 else if (adapter->max_frame_size <= 4096) 1014 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1015 else 1016 adapter->rx_mbuf_sz = MJUM9BYTES; 1017 #endif 1018 1019 /* Prepare receive descriptors and buffers */ 1020 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1021 int error; 1022 1023 error = igb_init_rx_ring(&sc->rx_rings[i]); 1024 if (error) { 1025 if_printf(ifp, "Could not setup receive structures\n"); 1026 igb_stop(sc); 1027 return; 1028 } 1029 } 1030 igb_init_rx_unit(sc, polling); 1031 1032 /* Enable VLAN support */ 1033 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1034 igb_set_vlan(sc); 1035 1036 /* Don't lose promiscuous settings */ 1037 igb_set_promisc(sc); 1038 1039 /* Clear counters */ 1040 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1041 1042 /* This clears any pending interrupts */ 1043 E1000_READ_REG(&sc->hw, E1000_ICR); 1044 1045 /* 1046 * Only enable interrupts if we are not polling, make sure 1047 * they are off otherwise. 1048 */ 1049 if (polling) { 1050 igb_disable_intr(sc); 1051 } else { 1052 igb_enable_intr(sc); 1053 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1054 } 1055 1056 /* Set Energy Efficient Ethernet */ 1057 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1058 if (sc->hw.mac.type == e1000_i354) 1059 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1060 else 1061 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1062 } 1063 1064 ifp->if_flags |= IFF_RUNNING; 1065 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1066 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1067 1068 ifsq_clr_oactive(txr->ifsq); 1069 ifsq_watchdog_start(&txr->tx_watchdog); 1070 1071 if (!polling) { 1072 callout_reset_bycpu(&txr->tx_gc_timer, 1, 1073 igb_txgc_timer, txr, txr->tx_intr_cpuid); 1074 } 1075 } 1076 1077 igb_set_timer_cpuid(sc, polling); 1078 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1079 } 1080 1081 static void 1082 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1083 { 1084 struct igb_softc *sc = ifp->if_softc; 1085 1086 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1087 1088 if ((ifp->if_flags & IFF_RUNNING) == 0) 1089 sc->hw.mac.get_link_status = 1; 1090 igb_update_link_status(sc); 1091 1092 ifmr->ifm_status = IFM_AVALID; 1093 ifmr->ifm_active = IFM_ETHER; 1094 1095 if (!sc->link_active) { 1096 if (sc->hw.mac.autoneg) 1097 ifmr->ifm_active |= IFM_NONE; 1098 else 1099 ifmr->ifm_active |= sc->media.ifm_media; 1100 return; 1101 } 1102 1103 ifmr->ifm_status |= IFM_ACTIVE; 1104 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1105 ifmr->ifm_active |= sc->ifm_flowctrl; 1106 1107 switch (sc->link_speed) { 1108 case 10: 1109 ifmr->ifm_active |= IFM_10_T; 1110 break; 1111 1112 case 100: 1113 /* 1114 * Support for 100Mb SFP - these are Fiber 1115 * but the media type appears as serdes 1116 */ 1117 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1118 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1119 ifmr->ifm_active |= IFM_100_FX; 1120 else 1121 ifmr->ifm_active |= IFM_100_TX; 1122 break; 1123 1124 case 1000: 1125 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1126 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1127 ifmr->ifm_active |= IFM_1000_SX; 1128 else 1129 ifmr->ifm_active |= IFM_1000_T; 1130 break; 1131 1132 case 2500: 1133 ifmr->ifm_active |= IFM_2500_SX; 1134 break; 1135 } 1136 1137 if (sc->link_duplex == FULL_DUPLEX) 1138 ifmr->ifm_active |= IFM_FDX; 1139 else 1140 ifmr->ifm_active |= IFM_HDX; 1141 1142 if (sc->link_duplex == FULL_DUPLEX) 1143 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1144 } 1145 1146 static int 1147 igb_media_change(struct ifnet *ifp) 1148 { 1149 struct igb_softc *sc = ifp->if_softc; 1150 struct ifmedia *ifm = &sc->media; 1151 1152 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1153 1154 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1155 return EINVAL; 1156 1157 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1158 case IFM_AUTO: 1159 sc->hw.mac.autoneg = DO_AUTO_NEG; 1160 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1161 break; 1162 1163 case IFM_1000_SX: 1164 case IFM_1000_T: 1165 sc->hw.mac.autoneg = DO_AUTO_NEG; 1166 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1167 break; 1168 1169 case IFM_100_TX: 1170 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1171 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1172 } else { 1173 if (IFM_OPTIONS(ifm->ifm_media) & 1174 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1175 if (bootverbose) { 1176 if_printf(ifp, "Flow control is not " 1177 "allowed for half-duplex\n"); 1178 } 1179 return EINVAL; 1180 } 1181 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1182 } 1183 sc->hw.mac.autoneg = FALSE; 1184 sc->hw.phy.autoneg_advertised = 0; 1185 break; 1186 1187 case IFM_10_T: 1188 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1189 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1190 } else { 1191 if (IFM_OPTIONS(ifm->ifm_media) & 1192 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1193 if (bootverbose) { 1194 if_printf(ifp, "Flow control is not " 1195 "allowed for half-duplex\n"); 1196 } 1197 return EINVAL; 1198 } 1199 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1200 } 1201 sc->hw.mac.autoneg = FALSE; 1202 sc->hw.phy.autoneg_advertised = 0; 1203 break; 1204 1205 default: 1206 if (bootverbose) { 1207 if_printf(ifp, "Unsupported media type %d\n", 1208 IFM_SUBTYPE(ifm->ifm_media)); 1209 } 1210 return EINVAL; 1211 } 1212 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1213 1214 if (ifp->if_flags & IFF_RUNNING) 1215 igb_init(sc); 1216 1217 return 0; 1218 } 1219 1220 static void 1221 igb_set_promisc(struct igb_softc *sc) 1222 { 1223 struct ifnet *ifp = &sc->arpcom.ac_if; 1224 struct e1000_hw *hw = &sc->hw; 1225 uint32_t reg; 1226 1227 if (sc->vf_ifp) { 1228 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1229 return; 1230 } 1231 1232 reg = E1000_READ_REG(hw, E1000_RCTL); 1233 if (ifp->if_flags & IFF_PROMISC) { 1234 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1235 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1236 } else if (ifp->if_flags & IFF_ALLMULTI) { 1237 reg |= E1000_RCTL_MPE; 1238 reg &= ~E1000_RCTL_UPE; 1239 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1240 } 1241 } 1242 1243 static void 1244 igb_disable_promisc(struct igb_softc *sc) 1245 { 1246 struct e1000_hw *hw = &sc->hw; 1247 struct ifnet *ifp = &sc->arpcom.ac_if; 1248 uint32_t reg; 1249 int mcnt = 0; 1250 1251 if (sc->vf_ifp) { 1252 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1253 return; 1254 } 1255 reg = E1000_READ_REG(hw, E1000_RCTL); 1256 reg &= ~E1000_RCTL_UPE; 1257 if (ifp->if_flags & IFF_ALLMULTI) { 1258 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1259 } else { 1260 struct ifmultiaddr *ifma; 1261 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1262 if (ifma->ifma_addr->sa_family != AF_LINK) 1263 continue; 1264 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1265 break; 1266 mcnt++; 1267 } 1268 } 1269 /* Don't disable if in MAX groups */ 1270 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1271 reg &= ~E1000_RCTL_MPE; 1272 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1273 } 1274 1275 static void 1276 igb_set_multi(struct igb_softc *sc) 1277 { 1278 struct ifnet *ifp = &sc->arpcom.ac_if; 1279 struct ifmultiaddr *ifma; 1280 uint32_t reg_rctl = 0; 1281 uint8_t *mta; 1282 int mcnt = 0; 1283 1284 mta = sc->mta; 1285 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1286 1287 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1288 if (ifma->ifma_addr->sa_family != AF_LINK) 1289 continue; 1290 1291 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1292 break; 1293 1294 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1295 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1296 mcnt++; 1297 } 1298 1299 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1300 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1301 reg_rctl |= E1000_RCTL_MPE; 1302 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1303 } else { 1304 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1305 } 1306 } 1307 1308 static void 1309 igb_timer(void *xsc) 1310 { 1311 struct igb_softc *sc = xsc; 1312 1313 lwkt_serialize_enter(&sc->main_serialize); 1314 1315 igb_update_link_status(sc); 1316 igb_update_stats_counters(sc); 1317 1318 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1319 1320 lwkt_serialize_exit(&sc->main_serialize); 1321 } 1322 1323 static void 1324 igb_update_link_status(struct igb_softc *sc) 1325 { 1326 struct ifnet *ifp = &sc->arpcom.ac_if; 1327 struct e1000_hw *hw = &sc->hw; 1328 uint32_t link_check, thstat, ctrl; 1329 1330 link_check = thstat = ctrl = 0; 1331 1332 /* Get the cached link value or read for real */ 1333 switch (hw->phy.media_type) { 1334 case e1000_media_type_copper: 1335 if (hw->mac.get_link_status) { 1336 /* Do the work to read phy */ 1337 e1000_check_for_link(hw); 1338 link_check = !hw->mac.get_link_status; 1339 } else { 1340 link_check = TRUE; 1341 } 1342 break; 1343 1344 case e1000_media_type_fiber: 1345 e1000_check_for_link(hw); 1346 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1347 break; 1348 1349 case e1000_media_type_internal_serdes: 1350 e1000_check_for_link(hw); 1351 link_check = hw->mac.serdes_has_link; 1352 break; 1353 1354 /* VF device is type_unknown */ 1355 case e1000_media_type_unknown: 1356 e1000_check_for_link(hw); 1357 link_check = !hw->mac.get_link_status; 1358 /* Fall thru */ 1359 default: 1360 break; 1361 } 1362 1363 /* Check for thermal downshift or shutdown */ 1364 if (hw->mac.type == e1000_i350) { 1365 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1366 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1367 } 1368 1369 /* Now we check if a transition has happened */ 1370 if (link_check && sc->link_active == 0) { 1371 e1000_get_speed_and_duplex(hw, 1372 &sc->link_speed, &sc->link_duplex); 1373 if (bootverbose) { 1374 char flowctrl[IFM_ETH_FC_STRLEN]; 1375 1376 /* Get the flow control for display */ 1377 e1000_fc2str(hw->fc.current_mode, flowctrl, 1378 sizeof(flowctrl)); 1379 1380 if_printf(ifp, "Link is up %d Mbps %s, " 1381 "Flow control: %s\n", 1382 sc->link_speed, 1383 sc->link_duplex == FULL_DUPLEX ? 1384 "Full Duplex" : "Half Duplex", 1385 flowctrl); 1386 } 1387 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1388 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1389 sc->link_active = 1; 1390 1391 ifp->if_baudrate = sc->link_speed * 1000000; 1392 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1393 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1394 if_printf(ifp, "Link: thermal downshift\n"); 1395 /* Delay Link Up for Phy update */ 1396 if ((hw->mac.type == e1000_i210 || 1397 hw->mac.type == e1000_i211) && 1398 hw->phy.id == I210_I_PHY_ID) 1399 msec_delay(IGB_I210_LINK_DELAY); 1400 /* 1401 * Reset if the media type changed. 1402 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1403 */ 1404 if (hw->dev_spec._82575.media_changed) { 1405 hw->dev_spec._82575.media_changed = FALSE; 1406 igb_reset(sc, TRUE); 1407 } 1408 /* This can sleep */ 1409 ifp->if_link_state = LINK_STATE_UP; 1410 if_link_state_change(ifp); 1411 } else if (!link_check && sc->link_active == 1) { 1412 ifp->if_baudrate = sc->link_speed = 0; 1413 sc->link_duplex = 0; 1414 if (bootverbose) 1415 if_printf(ifp, "Link is Down\n"); 1416 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1417 (thstat & E1000_THSTAT_PWR_DOWN)) 1418 if_printf(ifp, "Link: thermal shutdown\n"); 1419 sc->link_active = 0; 1420 /* This can sleep */ 1421 ifp->if_link_state = LINK_STATE_DOWN; 1422 if_link_state_change(ifp); 1423 } 1424 } 1425 1426 static void 1427 igb_stop(struct igb_softc *sc) 1428 { 1429 struct ifnet *ifp = &sc->arpcom.ac_if; 1430 int i; 1431 1432 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1433 1434 igb_disable_intr(sc); 1435 1436 callout_stop(&sc->timer); 1437 1438 ifp->if_flags &= ~IFF_RUNNING; 1439 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1440 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1441 1442 ifsq_clr_oactive(txr->ifsq); 1443 ifsq_watchdog_stop(&txr->tx_watchdog); 1444 txr->tx_flags &= ~IGB_TXFLAG_ENABLED; 1445 1446 txr->tx_running = 0; 1447 callout_stop(&txr->tx_gc_timer); 1448 } 1449 1450 e1000_reset_hw(&sc->hw); 1451 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1452 1453 e1000_led_off(&sc->hw); 1454 e1000_cleanup_led(&sc->hw); 1455 1456 for (i = 0; i < sc->tx_ring_cnt; ++i) 1457 igb_free_tx_ring(&sc->tx_rings[i]); 1458 for (i = 0; i < sc->rx_ring_cnt; ++i) 1459 igb_free_rx_ring(&sc->rx_rings[i]); 1460 } 1461 1462 static void 1463 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1464 { 1465 struct ifnet *ifp = &sc->arpcom.ac_if; 1466 struct e1000_hw *hw = &sc->hw; 1467 struct e1000_fc_info *fc = &hw->fc; 1468 uint32_t pba = 0; 1469 uint16_t hwm; 1470 1471 /* Let the firmware know the OS is in control */ 1472 igb_get_hw_control(sc); 1473 1474 /* 1475 * Packet Buffer Allocation (PBA) 1476 * Writing PBA sets the receive portion of the buffer 1477 * the remainder is used for the transmit buffer. 1478 */ 1479 switch (hw->mac.type) { 1480 case e1000_82575: 1481 pba = E1000_PBA_32K; 1482 break; 1483 1484 case e1000_82576: 1485 case e1000_vfadapt: 1486 pba = E1000_READ_REG(hw, E1000_RXPBS); 1487 pba &= E1000_RXPBS_SIZE_MASK_82576; 1488 break; 1489 1490 case e1000_82580: 1491 case e1000_i350: 1492 case e1000_i354: 1493 case e1000_vfadapt_i350: 1494 pba = E1000_READ_REG(hw, E1000_RXPBS); 1495 pba = e1000_rxpbs_adjust_82580(pba); 1496 break; 1497 1498 case e1000_i210: 1499 case e1000_i211: 1500 pba = E1000_PBA_34K; 1501 break; 1502 1503 default: 1504 break; 1505 } 1506 1507 /* Special needs in case of Jumbo frames */ 1508 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1509 uint32_t tx_space, min_tx, min_rx; 1510 1511 pba = E1000_READ_REG(hw, E1000_PBA); 1512 tx_space = pba >> 16; 1513 pba &= 0xffff; 1514 1515 min_tx = (sc->max_frame_size + 1516 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1517 min_tx = roundup2(min_tx, 1024); 1518 min_tx >>= 10; 1519 min_rx = sc->max_frame_size; 1520 min_rx = roundup2(min_rx, 1024); 1521 min_rx >>= 10; 1522 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1523 pba = pba - (min_tx - tx_space); 1524 /* 1525 * if short on rx space, rx wins 1526 * and must trump tx adjustment 1527 */ 1528 if (pba < min_rx) 1529 pba = min_rx; 1530 } 1531 E1000_WRITE_REG(hw, E1000_PBA, pba); 1532 } 1533 1534 /* 1535 * These parameters control the automatic generation (Tx) and 1536 * response (Rx) to Ethernet PAUSE frames. 1537 * - High water mark should allow for at least two frames to be 1538 * received after sending an XOFF. 1539 * - Low water mark works best when it is very near the high water mark. 1540 * This allows the receiver to restart by sending XON when it has 1541 * drained a bit. 1542 */ 1543 hwm = min(((pba << 10) * 9 / 10), 1544 ((pba << 10) - 2 * sc->max_frame_size)); 1545 1546 if (hw->mac.type < e1000_82576) { 1547 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1548 fc->low_water = fc->high_water - 8; 1549 } else { 1550 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1551 fc->low_water = fc->high_water - 16; 1552 } 1553 fc->pause_time = IGB_FC_PAUSE_TIME; 1554 fc->send_xon = TRUE; 1555 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1556 1557 /* Issue a global reset */ 1558 e1000_reset_hw(hw); 1559 E1000_WRITE_REG(hw, E1000_WUC, 0); 1560 1561 /* Reset for AutoMediaDetect */ 1562 if (media_reset) { 1563 e1000_setup_init_funcs(hw, TRUE); 1564 e1000_get_bus_info(hw); 1565 } 1566 1567 if (e1000_init_hw(hw) < 0) 1568 if_printf(ifp, "Hardware Initialization Failed\n"); 1569 1570 /* Setup DMA Coalescing */ 1571 igb_init_dmac(sc, pba); 1572 1573 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1574 e1000_get_phy_info(hw); 1575 e1000_check_for_link(hw); 1576 } 1577 1578 static void 1579 igb_setup_ifp(struct igb_softc *sc) 1580 { 1581 struct ifnet *ifp = &sc->arpcom.ac_if; 1582 int i; 1583 1584 ifp->if_softc = sc; 1585 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1586 ifp->if_init = igb_init; 1587 ifp->if_ioctl = igb_ioctl; 1588 ifp->if_start = igb_start; 1589 ifp->if_serialize = igb_serialize; 1590 ifp->if_deserialize = igb_deserialize; 1591 ifp->if_tryserialize = igb_tryserialize; 1592 #ifdef INVARIANTS 1593 ifp->if_serialize_assert = igb_serialize_assert; 1594 #endif 1595 #ifdef IFPOLL_ENABLE 1596 ifp->if_npoll = igb_npoll; 1597 #endif 1598 1599 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1600 1601 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1602 ifq_set_ready(&ifp->if_snd); 1603 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1604 1605 ifp->if_mapsubq = ifq_mapsubq_modulo; 1606 ifq_set_subq_divisor(&ifp->if_snd, 1); 1607 1608 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1609 1610 ifp->if_capabilities = 1611 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1612 if (IGB_ENABLE_HWRSS(sc)) 1613 ifp->if_capabilities |= IFCAP_RSS; 1614 ifp->if_capenable = ifp->if_capabilities; 1615 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1616 1617 /* 1618 * Tell the upper layer(s) we support long frames 1619 */ 1620 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1621 1622 /* Setup TX rings and subqueues */ 1623 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1624 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1625 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1626 1627 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1628 ifsq_set_priv(ifsq, txr); 1629 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1630 txr->ifsq = ifsq; 1631 1632 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog, 0); 1633 } 1634 1635 /* 1636 * Specify the media types supported by this adapter and register 1637 * callbacks to update media and link information 1638 */ 1639 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1640 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1641 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1642 0, NULL); 1643 } else { 1644 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1645 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1646 0, NULL); 1647 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1648 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1649 0, NULL); 1650 if (sc->hw.phy.type != e1000_phy_ife) { 1651 ifmedia_add(&sc->media, 1652 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1653 } 1654 } 1655 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1656 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1657 } 1658 1659 static void 1660 igb_add_sysctl(struct igb_softc *sc) 1661 { 1662 struct sysctl_ctx_list *ctx; 1663 struct sysctl_oid *tree; 1664 char node[32]; 1665 int i; 1666 1667 ctx = device_get_sysctl_ctx(sc->dev); 1668 tree = device_get_sysctl_tree(sc->dev); 1669 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1670 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1671 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1672 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1673 "# of RX rings used"); 1674 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1675 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1676 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1677 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1678 "# of TX rings used"); 1679 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1680 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1681 "# of RX descs"); 1682 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1683 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1684 "# of TX descs"); 1685 1686 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1687 do { \ 1688 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1689 #use " interrupt rate"); \ 1690 } while (0) 1691 1692 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1693 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1694 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1695 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1696 1697 #undef IGB_ADD_INTR_RATE_SYSCTL 1698 1699 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1700 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1701 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1702 "# of segments per TX interrupt"); 1703 1704 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1705 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1706 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1707 "# of segments sent before write to hardware register"); 1708 1709 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1710 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1711 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1712 "# of segments received before write to hardware register"); 1713 1714 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 1715 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1716 OID_AUTO, "tx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1717 sc->tx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1718 "TX MSI-X CPU map"); 1719 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1720 OID_AUTO, "rx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1721 sc->rx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1722 "RX MSI-X CPU map"); 1723 } 1724 #ifdef IFPOLL_ENABLE 1725 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1726 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1727 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1728 "TX polling CPU map"); 1729 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1730 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1731 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1732 "RX polling CPU map"); 1733 #endif 1734 1735 #ifdef IGB_RSS_DEBUG 1736 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1737 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1738 "RSS debug level"); 1739 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1740 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1741 SYSCTL_ADD_ULONG(ctx, 1742 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1743 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1744 } 1745 #endif 1746 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1747 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1748 1749 #ifdef IGB_TSS_DEBUG 1750 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1751 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1752 CTLFLAG_RW, &txr->tx_packets, "TXed packets"); 1753 #endif 1754 ksnprintf(node, sizeof(node), "tx%d_nmbuf", i); 1755 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1756 CTLFLAG_RD, &txr->tx_nmbuf, 0, "# of pending TX mbufs"); 1757 1758 ksnprintf(node, sizeof(node), "tx%d_gc", i); 1759 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1760 CTLFLAG_RW, &txr->tx_gc, "# of TX desc GC"); 1761 } 1762 1763 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1764 OID_AUTO, "dumpreg", CTLTYPE_INT | CTLFLAG_RW, 1765 sc, 0, igb_sysctl_reg_dump, "I", "dump registers"); 1766 } 1767 1768 static int 1769 igb_alloc_rings(struct igb_softc *sc) 1770 { 1771 int error, i; 1772 1773 /* 1774 * Create top level busdma tag 1775 */ 1776 error = bus_dma_tag_create(NULL, 1, 0, 1777 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1778 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1779 &sc->parent_tag); 1780 if (error) { 1781 device_printf(sc->dev, "could not create top level DMA tag\n"); 1782 return error; 1783 } 1784 1785 /* 1786 * Allocate TX descriptor rings and buffers 1787 */ 1788 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1789 M_DEVBUF, 1790 M_WAITOK | M_ZERO | M_CACHEALIGN); 1791 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1792 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1793 1794 /* Set up some basics */ 1795 txr->sc = sc; 1796 txr->me = i; 1797 txr->tx_intr_cpuid = -1; 1798 lwkt_serialize_init(&txr->tx_serialize); 1799 callout_init_mp(&txr->tx_gc_timer); 1800 1801 error = igb_create_tx_ring(txr); 1802 if (error) 1803 return error; 1804 } 1805 1806 /* 1807 * Allocate RX descriptor rings and buffers 1808 */ 1809 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1810 M_DEVBUF, 1811 M_WAITOK | M_ZERO | M_CACHEALIGN); 1812 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1813 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1814 1815 /* Set up some basics */ 1816 rxr->sc = sc; 1817 rxr->me = i; 1818 lwkt_serialize_init(&rxr->rx_serialize); 1819 1820 error = igb_create_rx_ring(rxr); 1821 if (error) 1822 return error; 1823 } 1824 1825 return 0; 1826 } 1827 1828 static void 1829 igb_free_rings(struct igb_softc *sc) 1830 { 1831 int i; 1832 1833 if (sc->tx_rings != NULL) { 1834 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1835 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1836 1837 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1838 } 1839 kfree(sc->tx_rings, M_DEVBUF); 1840 } 1841 1842 if (sc->rx_rings != NULL) { 1843 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1844 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1845 1846 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1847 } 1848 kfree(sc->rx_rings, M_DEVBUF); 1849 } 1850 } 1851 1852 static int 1853 igb_create_tx_ring(struct igb_tx_ring *txr) 1854 { 1855 int tsize, error, i, ntxd; 1856 1857 /* 1858 * Validate number of transmit descriptors. It must not exceed 1859 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1860 */ 1861 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1862 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1863 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1864 device_printf(txr->sc->dev, 1865 "Using %d TX descriptors instead of %d!\n", 1866 IGB_DEFAULT_TXD, ntxd); 1867 txr->num_tx_desc = IGB_DEFAULT_TXD; 1868 } else { 1869 txr->num_tx_desc = ntxd; 1870 } 1871 1872 /* 1873 * Allocate TX descriptor ring 1874 */ 1875 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1876 IGB_DBA_ALIGN); 1877 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1878 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1879 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1880 if (txr->txdma.dma_vaddr == NULL) { 1881 device_printf(txr->sc->dev, 1882 "Unable to allocate TX Descriptor memory\n"); 1883 return ENOMEM; 1884 } 1885 txr->tx_base = txr->txdma.dma_vaddr; 1886 bzero(txr->tx_base, tsize); 1887 1888 tsize = __VM_CACHELINE_ALIGN( 1889 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1890 txr->tx_buf = kmalloc(tsize, M_DEVBUF, 1891 M_WAITOK | M_ZERO | M_CACHEALIGN); 1892 1893 /* 1894 * Allocate TX head write-back buffer 1895 */ 1896 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1897 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1898 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1899 if (txr->tx_hdr == NULL) { 1900 device_printf(txr->sc->dev, 1901 "Unable to allocate TX head write-back buffer\n"); 1902 return ENOMEM; 1903 } 1904 1905 /* 1906 * Create DMA tag for TX buffers 1907 */ 1908 error = bus_dma_tag_create(txr->sc->parent_tag, 1909 1, 0, /* alignment, bounds */ 1910 BUS_SPACE_MAXADDR, /* lowaddr */ 1911 BUS_SPACE_MAXADDR, /* highaddr */ 1912 IGB_TSO_SIZE, /* maxsize */ 1913 IGB_MAX_SCATTER, /* nsegments */ 1914 PAGE_SIZE, /* maxsegsize */ 1915 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1916 BUS_DMA_ONEBPAGE, /* flags */ 1917 &txr->tx_tag); 1918 if (error) { 1919 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1920 kfree(txr->tx_buf, M_DEVBUF); 1921 txr->tx_buf = NULL; 1922 return error; 1923 } 1924 1925 /* 1926 * Create DMA maps for TX buffers 1927 */ 1928 for (i = 0; i < txr->num_tx_desc; ++i) { 1929 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1930 1931 error = bus_dmamap_create(txr->tx_tag, 1932 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1933 if (error) { 1934 device_printf(txr->sc->dev, 1935 "Unable to create TX DMA map\n"); 1936 igb_destroy_tx_ring(txr, i); 1937 return error; 1938 } 1939 } 1940 1941 if (txr->sc->hw.mac.type == e1000_82575) 1942 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1943 1944 /* 1945 * Initialize various watermark 1946 */ 1947 if (txr->sc->hw.mac.type == e1000_82575) { 1948 /* 1949 * There no ways to GC pending TX mbufs in 'header 1950 * write back' mode with reduced # of RS TX descs, 1951 * since TDH does _not_ move for 82575. 1952 */ 1953 txr->intr_nsegs = 1; 1954 } else { 1955 txr->intr_nsegs = txr->num_tx_desc / 16; 1956 } 1957 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1958 1959 return 0; 1960 } 1961 1962 static void 1963 igb_free_tx_ring(struct igb_tx_ring *txr) 1964 { 1965 int i; 1966 1967 for (i = 0; i < txr->num_tx_desc; ++i) { 1968 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1969 1970 if (txbuf->m_head != NULL) 1971 igb_free_txbuf(txr, txbuf); 1972 } 1973 } 1974 1975 static void 1976 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1977 { 1978 int i; 1979 1980 if (txr->txdma.dma_vaddr != NULL) { 1981 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1982 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1983 txr->txdma.dma_map); 1984 bus_dma_tag_destroy(txr->txdma.dma_tag); 1985 txr->txdma.dma_vaddr = NULL; 1986 } 1987 1988 if (txr->tx_hdr != NULL) { 1989 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1990 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1991 txr->tx_hdr_dmap); 1992 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1993 txr->tx_hdr = NULL; 1994 } 1995 1996 if (txr->tx_buf == NULL) 1997 return; 1998 1999 for (i = 0; i < ndesc; ++i) { 2000 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 2001 2002 KKASSERT(txbuf->m_head == NULL); 2003 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 2004 } 2005 bus_dma_tag_destroy(txr->tx_tag); 2006 2007 kfree(txr->tx_buf, M_DEVBUF); 2008 txr->tx_buf = NULL; 2009 } 2010 2011 static void 2012 igb_init_tx_ring(struct igb_tx_ring *txr) 2013 { 2014 /* Clear the old descriptor contents */ 2015 bzero(txr->tx_base, 2016 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 2017 2018 /* Clear TX head write-back buffer */ 2019 *(txr->tx_hdr) = 0; 2020 2021 /* Reset indices */ 2022 txr->next_avail_desc = 0; 2023 txr->next_to_clean = 0; 2024 txr->tx_nsegs = 0; 2025 txr->tx_running = 0; 2026 txr->tx_nmbuf = 0; 2027 2028 /* Set number of descriptors available */ 2029 txr->tx_avail = txr->num_tx_desc; 2030 2031 /* Enable this TX ring */ 2032 txr->tx_flags |= IGB_TXFLAG_ENABLED; 2033 } 2034 2035 static void 2036 igb_init_tx_unit(struct igb_softc *sc) 2037 { 2038 struct e1000_hw *hw = &sc->hw; 2039 uint32_t tctl; 2040 int i; 2041 2042 /* Setup the Tx Descriptor Rings */ 2043 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2044 struct igb_tx_ring *txr = &sc->tx_rings[i]; 2045 uint64_t bus_addr = txr->txdma.dma_paddr; 2046 uint64_t hdr_paddr = txr->tx_hdr_paddr; 2047 uint32_t txdctl = 0; 2048 uint32_t dca_txctrl; 2049 2050 E1000_WRITE_REG(hw, E1000_TDLEN(i), 2051 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 2052 E1000_WRITE_REG(hw, E1000_TDBAH(i), 2053 (uint32_t)(bus_addr >> 32)); 2054 E1000_WRITE_REG(hw, E1000_TDBAL(i), 2055 (uint32_t)bus_addr); 2056 2057 /* Setup the HW Tx Head and Tail descriptor pointers */ 2058 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 2059 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 2060 2061 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 2062 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2063 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 2064 2065 /* 2066 * Don't set WB_on_EITR: 2067 * - 82575 does not have it 2068 * - It almost has no effect on 82576, see: 2069 * 82576 specification update errata #26 2070 * - It causes unnecessary bus traffic 2071 */ 2072 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 2073 (uint32_t)(hdr_paddr >> 32)); 2074 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 2075 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 2076 2077 /* 2078 * WTHRESH is ignored by the hardware, since header 2079 * write back mode is used. 2080 */ 2081 txdctl |= IGB_TX_PTHRESH; 2082 txdctl |= IGB_TX_HTHRESH << 8; 2083 txdctl |= IGB_TX_WTHRESH << 16; 2084 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2085 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2086 } 2087 2088 if (sc->vf_ifp) 2089 return; 2090 2091 e1000_config_collision_dist(hw); 2092 2093 /* Program the Transmit Control Register */ 2094 tctl = E1000_READ_REG(hw, E1000_TCTL); 2095 tctl &= ~E1000_TCTL_CT; 2096 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2097 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2098 2099 /* This write will effectively turn on the transmit unit. */ 2100 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2101 } 2102 2103 static boolean_t 2104 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2105 { 2106 struct e1000_adv_tx_context_desc *TXD; 2107 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2108 int ehdrlen, ctxd, ip_hlen = 0; 2109 boolean_t offload = TRUE; 2110 2111 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2112 offload = FALSE; 2113 2114 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2115 2116 ctxd = txr->next_avail_desc; 2117 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2118 2119 /* 2120 * In advanced descriptors the vlan tag must 2121 * be placed into the context descriptor, thus 2122 * we need to be here just for that setup. 2123 */ 2124 if (mp->m_flags & M_VLANTAG) { 2125 uint16_t vlantag; 2126 2127 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2128 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2129 } else if (!offload) { 2130 return FALSE; 2131 } 2132 2133 ehdrlen = mp->m_pkthdr.csum_lhlen; 2134 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2135 2136 /* Set the ether header length */ 2137 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2138 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2139 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2140 ip_hlen = mp->m_pkthdr.csum_iphlen; 2141 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2142 } 2143 vlan_macip_lens |= ip_hlen; 2144 2145 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2146 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2147 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2148 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2149 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2150 2151 /* 2152 * 82575 needs the TX context index added; the queue 2153 * index is used as TX context index here. 2154 */ 2155 if (txr->sc->hw.mac.type == e1000_82575) 2156 mss_l4len_idx = txr->me << 4; 2157 2158 /* Now copy bits into descriptor */ 2159 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2160 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2161 TXD->seqnum_seed = htole32(0); 2162 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2163 2164 /* We've consumed the first desc, adjust counters */ 2165 if (++ctxd == txr->num_tx_desc) 2166 ctxd = 0; 2167 txr->next_avail_desc = ctxd; 2168 --txr->tx_avail; 2169 2170 return offload; 2171 } 2172 2173 static void 2174 igb_txeof(struct igb_tx_ring *txr, int hdr) 2175 { 2176 int first, avail; 2177 2178 if (txr->tx_avail == txr->num_tx_desc) 2179 return; 2180 2181 first = txr->next_to_clean; 2182 if (first == hdr) 2183 return; 2184 2185 avail = txr->tx_avail; 2186 while (first != hdr) { 2187 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2188 2189 KKASSERT(avail < txr->num_tx_desc); 2190 ++avail; 2191 2192 if (txbuf->m_head) 2193 igb_free_txbuf(txr, txbuf); 2194 2195 if (++first == txr->num_tx_desc) 2196 first = 0; 2197 } 2198 txr->next_to_clean = first; 2199 txr->tx_avail = avail; 2200 2201 /* 2202 * If we have a minimum free, clear OACTIVE 2203 * to tell the stack that it is OK to send packets. 2204 */ 2205 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2206 ifsq_clr_oactive(txr->ifsq); 2207 2208 /* 2209 * We have enough TX descriptors, turn off 2210 * the watchdog. We allow small amount of 2211 * packets (roughly intr_nsegs) pending on 2212 * the transmit ring. 2213 */ 2214 ifsq_watchdog_set_count(&txr->tx_watchdog, 0); 2215 } 2216 txr->tx_running = IGB_TX_RUNNING; 2217 } 2218 2219 static void 2220 igb_txgc(struct igb_tx_ring *txr) 2221 { 2222 int first, hdr; 2223 #ifdef INVARIANTS 2224 int avail; 2225 #endif 2226 2227 if (txr->tx_avail == txr->num_tx_desc) 2228 return; 2229 2230 hdr = E1000_READ_REG(&txr->sc->hw, E1000_TDH(txr->me)), 2231 first = txr->next_to_clean; 2232 if (first == hdr) 2233 goto done; 2234 txr->tx_gc++; 2235 2236 #ifdef INVARIANTS 2237 avail = txr->tx_avail; 2238 #endif 2239 while (first != hdr) { 2240 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2241 2242 #ifdef INVARIANTS 2243 KKASSERT(avail < txr->num_tx_desc); 2244 ++avail; 2245 #endif 2246 if (txbuf->m_head) 2247 igb_free_txbuf(txr, txbuf); 2248 2249 if (++first == txr->num_tx_desc) 2250 first = 0; 2251 } 2252 done: 2253 if (txr->tx_nmbuf) 2254 txr->tx_running = IGB_TX_RUNNING; 2255 } 2256 2257 static int 2258 igb_create_rx_ring(struct igb_rx_ring *rxr) 2259 { 2260 int rsize, i, error, nrxd; 2261 2262 /* 2263 * Validate number of receive descriptors. It must not exceed 2264 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2265 */ 2266 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2267 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2268 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2269 device_printf(rxr->sc->dev, 2270 "Using %d RX descriptors instead of %d!\n", 2271 IGB_DEFAULT_RXD, nrxd); 2272 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2273 } else { 2274 rxr->num_rx_desc = nrxd; 2275 } 2276 2277 /* 2278 * Allocate RX descriptor ring 2279 */ 2280 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2281 IGB_DBA_ALIGN); 2282 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2283 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2284 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2285 &rxr->rxdma.dma_paddr); 2286 if (rxr->rxdma.dma_vaddr == NULL) { 2287 device_printf(rxr->sc->dev, 2288 "Unable to allocate RxDescriptor memory\n"); 2289 return ENOMEM; 2290 } 2291 rxr->rx_base = rxr->rxdma.dma_vaddr; 2292 bzero(rxr->rx_base, rsize); 2293 2294 rsize = __VM_CACHELINE_ALIGN( 2295 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2296 rxr->rx_buf = kmalloc(rsize, M_DEVBUF, 2297 M_WAITOK | M_ZERO | M_CACHEALIGN); 2298 2299 /* 2300 * Create DMA tag for RX buffers 2301 */ 2302 error = bus_dma_tag_create(rxr->sc->parent_tag, 2303 1, 0, /* alignment, bounds */ 2304 BUS_SPACE_MAXADDR, /* lowaddr */ 2305 BUS_SPACE_MAXADDR, /* highaddr */ 2306 MCLBYTES, /* maxsize */ 2307 1, /* nsegments */ 2308 MCLBYTES, /* maxsegsize */ 2309 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2310 &rxr->rx_tag); 2311 if (error) { 2312 device_printf(rxr->sc->dev, 2313 "Unable to create RX payload DMA tag\n"); 2314 kfree(rxr->rx_buf, M_DEVBUF); 2315 rxr->rx_buf = NULL; 2316 return error; 2317 } 2318 2319 /* 2320 * Create spare DMA map for RX buffers 2321 */ 2322 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2323 &rxr->rx_sparemap); 2324 if (error) { 2325 device_printf(rxr->sc->dev, 2326 "Unable to create spare RX DMA maps\n"); 2327 bus_dma_tag_destroy(rxr->rx_tag); 2328 kfree(rxr->rx_buf, M_DEVBUF); 2329 rxr->rx_buf = NULL; 2330 return error; 2331 } 2332 2333 /* 2334 * Create DMA maps for RX buffers 2335 */ 2336 for (i = 0; i < rxr->num_rx_desc; i++) { 2337 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2338 2339 error = bus_dmamap_create(rxr->rx_tag, 2340 BUS_DMA_WAITOK, &rxbuf->map); 2341 if (error) { 2342 device_printf(rxr->sc->dev, 2343 "Unable to create RX DMA maps\n"); 2344 igb_destroy_rx_ring(rxr, i); 2345 return error; 2346 } 2347 } 2348 2349 /* 2350 * Initialize various watermark 2351 */ 2352 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2353 2354 return 0; 2355 } 2356 2357 static void 2358 igb_free_rx_ring(struct igb_rx_ring *rxr) 2359 { 2360 int i; 2361 2362 for (i = 0; i < rxr->num_rx_desc; ++i) { 2363 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2364 2365 if (rxbuf->m_head != NULL) { 2366 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2367 m_freem(rxbuf->m_head); 2368 rxbuf->m_head = NULL; 2369 } 2370 } 2371 2372 if (rxr->fmp != NULL) 2373 m_freem(rxr->fmp); 2374 rxr->fmp = NULL; 2375 rxr->lmp = NULL; 2376 } 2377 2378 static void 2379 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2380 { 2381 int i; 2382 2383 if (rxr->rxdma.dma_vaddr != NULL) { 2384 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2385 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2386 rxr->rxdma.dma_map); 2387 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2388 rxr->rxdma.dma_vaddr = NULL; 2389 } 2390 2391 if (rxr->rx_buf == NULL) 2392 return; 2393 2394 for (i = 0; i < ndesc; ++i) { 2395 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2396 2397 KKASSERT(rxbuf->m_head == NULL); 2398 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2399 } 2400 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2401 bus_dma_tag_destroy(rxr->rx_tag); 2402 2403 kfree(rxr->rx_buf, M_DEVBUF); 2404 rxr->rx_buf = NULL; 2405 } 2406 2407 static void 2408 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2409 { 2410 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2411 rxd->wb.upper.status_error = 0; 2412 } 2413 2414 static int 2415 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2416 { 2417 struct mbuf *m; 2418 bus_dma_segment_t seg; 2419 bus_dmamap_t map; 2420 struct igb_rx_buf *rxbuf; 2421 int error, nseg; 2422 2423 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2424 if (m == NULL) { 2425 if (wait) { 2426 if_printf(&rxr->sc->arpcom.ac_if, 2427 "Unable to allocate RX mbuf\n"); 2428 } 2429 return ENOBUFS; 2430 } 2431 m->m_len = m->m_pkthdr.len = MCLBYTES; 2432 2433 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2434 m_adj(m, ETHER_ALIGN); 2435 2436 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2437 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2438 if (error) { 2439 m_freem(m); 2440 if (wait) { 2441 if_printf(&rxr->sc->arpcom.ac_if, 2442 "Unable to load RX mbuf\n"); 2443 } 2444 return error; 2445 } 2446 2447 rxbuf = &rxr->rx_buf[i]; 2448 if (rxbuf->m_head != NULL) 2449 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2450 2451 map = rxbuf->map; 2452 rxbuf->map = rxr->rx_sparemap; 2453 rxr->rx_sparemap = map; 2454 2455 rxbuf->m_head = m; 2456 rxbuf->paddr = seg.ds_addr; 2457 2458 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2459 return 0; 2460 } 2461 2462 static int 2463 igb_init_rx_ring(struct igb_rx_ring *rxr) 2464 { 2465 int i; 2466 2467 /* Clear the ring contents */ 2468 bzero(rxr->rx_base, 2469 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2470 2471 /* Now replenish the ring mbufs */ 2472 for (i = 0; i < rxr->num_rx_desc; ++i) { 2473 int error; 2474 2475 error = igb_newbuf(rxr, i, TRUE); 2476 if (error) 2477 return error; 2478 } 2479 2480 /* Setup our descriptor indices */ 2481 rxr->next_to_check = 0; 2482 2483 rxr->fmp = NULL; 2484 rxr->lmp = NULL; 2485 rxr->discard = FALSE; 2486 2487 return 0; 2488 } 2489 2490 static void 2491 igb_init_rx_unit(struct igb_softc *sc, boolean_t polling) 2492 { 2493 struct ifnet *ifp = &sc->arpcom.ac_if; 2494 struct e1000_hw *hw = &sc->hw; 2495 uint32_t rctl, rxcsum, srrctl = 0; 2496 int i; 2497 2498 /* 2499 * Make sure receives are disabled while setting 2500 * up the descriptor ring 2501 */ 2502 rctl = E1000_READ_REG(hw, E1000_RCTL); 2503 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2504 2505 #if 0 2506 /* 2507 ** Set up for header split 2508 */ 2509 if (igb_header_split) { 2510 /* Use a standard mbuf for the header */ 2511 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2512 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2513 } else 2514 #endif 2515 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2516 2517 /* 2518 ** Set up for jumbo frames 2519 */ 2520 if (ifp->if_mtu > ETHERMTU) { 2521 rctl |= E1000_RCTL_LPE; 2522 #if 0 2523 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2524 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2525 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2526 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2527 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2528 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2529 } 2530 /* Set maximum packet len */ 2531 psize = adapter->max_frame_size; 2532 /* are we on a vlan? */ 2533 if (adapter->ifp->if_vlantrunk != NULL) 2534 psize += VLAN_TAG_SIZE; 2535 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2536 #else 2537 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2538 rctl |= E1000_RCTL_SZ_2048; 2539 #endif 2540 } else { 2541 rctl &= ~E1000_RCTL_LPE; 2542 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2543 rctl |= E1000_RCTL_SZ_2048; 2544 } 2545 2546 /* 2547 * If TX flow control is disabled and more the 1 RX rings 2548 * are enabled, enable DROP. 2549 * 2550 * This drops frames rather than hanging the RX MAC for all 2551 * RX rings. 2552 */ 2553 if (sc->rx_ring_inuse > 1 && 2554 (sc->ifm_flowctrl & IFM_ETH_TXPAUSE) == 0) { 2555 srrctl |= E1000_SRRCTL_DROP_EN; 2556 if (bootverbose) 2557 if_printf(ifp, "enable RX drop\n"); 2558 } 2559 2560 /* Setup the Base and Length of the Rx Descriptor Rings */ 2561 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2562 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2563 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2564 uint32_t rxdctl; 2565 2566 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2567 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2568 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2569 (uint32_t)(bus_addr >> 32)); 2570 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2571 (uint32_t)bus_addr); 2572 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2573 /* Enable this Queue */ 2574 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2575 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2576 rxdctl &= 0xFFF00000; 2577 rxdctl |= IGB_RX_PTHRESH; 2578 rxdctl |= IGB_RX_HTHRESH << 8; 2579 /* 2580 * Don't set WTHRESH to a value above 1 on 82576, see: 2581 * 82576 specification update errata #26 2582 */ 2583 rxdctl |= IGB_RX_WTHRESH << 16; 2584 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2585 } 2586 2587 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2588 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2589 2590 /* 2591 * Receive Checksum Offload for TCP and UDP 2592 * 2593 * Checksum offloading is also enabled if multiple receive 2594 * queue is to be supported, since we need it to figure out 2595 * fragments. 2596 */ 2597 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2598 /* 2599 * NOTE: 2600 * PCSD must be enabled to enable multiple 2601 * receive queues. 2602 */ 2603 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2604 E1000_RXCSUM_PCSD; 2605 } else { 2606 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2607 E1000_RXCSUM_PCSD); 2608 } 2609 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2610 2611 if (sc->rx_ring_inuse > 1) { 2612 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2613 const struct if_ringmap *rm; 2614 uint32_t reta_shift; 2615 int j, r; 2616 2617 /* 2618 * NOTE: 2619 * When we reach here, RSS has already been disabled 2620 * in igb_stop(), so we could safely configure RSS key 2621 * and redirect table. 2622 */ 2623 2624 /* 2625 * Configure RSS key 2626 */ 2627 toeplitz_get_key(key, sizeof(key)); 2628 for (i = 0; i < IGB_NRSSRK; ++i) { 2629 uint32_t rssrk; 2630 2631 rssrk = IGB_RSSRK_VAL(key, i); 2632 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2633 2634 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2635 } 2636 2637 /* 2638 * Configure RSS redirect table 2639 */ 2640 if (polling) 2641 rm = sc->rx_rmap; 2642 else 2643 rm = sc->rx_rmap_intr; 2644 if_ringmap_rdrtable(rm, sc->rdr_table, IGB_RDRTABLE_SIZE); 2645 2646 reta_shift = IGB_RETA_SHIFT; 2647 if (hw->mac.type == e1000_82575) 2648 reta_shift = IGB_RETA_SHIFT_82575; 2649 2650 r = 0; 2651 for (j = 0; j < IGB_NRETA; ++j) { 2652 uint32_t reta = 0; 2653 2654 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2655 uint32_t q; 2656 2657 q = sc->rdr_table[r] << reta_shift; 2658 reta |= q << (8 * i); 2659 ++r; 2660 } 2661 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2662 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2663 } 2664 2665 /* 2666 * Enable multiple receive queues. 2667 * Enable IPv4 RSS standard hash functions. 2668 * Disable RSS interrupt on 82575 2669 */ 2670 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2671 E1000_MRQC_ENABLE_RSS_4Q | 2672 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2673 E1000_MRQC_RSS_FIELD_IPV4); 2674 } 2675 2676 /* Setup the Receive Control Register */ 2677 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2678 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2679 E1000_RCTL_RDMTS_HALF | 2680 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2681 /* Strip CRC bytes. */ 2682 rctl |= E1000_RCTL_SECRC; 2683 /* Make sure VLAN Filters are off */ 2684 rctl &= ~E1000_RCTL_VFE; 2685 /* Don't store bad packets */ 2686 rctl &= ~E1000_RCTL_SBP; 2687 2688 /* Enable Receives */ 2689 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2690 2691 /* 2692 * Setup the HW Rx Head and Tail Descriptor Pointers 2693 * - needs to be after enable 2694 */ 2695 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2696 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2697 2698 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2699 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2700 } 2701 } 2702 2703 static void 2704 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2705 { 2706 if (--i < 0) 2707 i = rxr->num_rx_desc - 1; 2708 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2709 } 2710 2711 static void 2712 igb_rxeof(struct igb_rx_ring *rxr, int count) 2713 { 2714 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2715 union e1000_adv_rx_desc *cur; 2716 uint32_t staterr; 2717 int i, ncoll = 0, cpuid = mycpuid; 2718 2719 i = rxr->next_to_check; 2720 cur = &rxr->rx_base[i]; 2721 staterr = le32toh(cur->wb.upper.status_error); 2722 2723 if ((staterr & E1000_RXD_STAT_DD) == 0) 2724 return; 2725 2726 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2727 struct pktinfo *pi = NULL, pi0; 2728 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2729 struct mbuf *m = NULL; 2730 boolean_t eop; 2731 2732 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2733 if (eop) 2734 --count; 2735 2736 ++ncoll; 2737 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2738 !rxr->discard) { 2739 struct mbuf *mp = rxbuf->m_head; 2740 uint32_t hash, hashtype; 2741 uint16_t vlan; 2742 int len; 2743 2744 len = le16toh(cur->wb.upper.length); 2745 if ((rxr->sc->hw.mac.type == e1000_i350 || 2746 rxr->sc->hw.mac.type == e1000_i354) && 2747 (staterr & E1000_RXDEXT_STATERR_LB)) 2748 vlan = be16toh(cur->wb.upper.vlan); 2749 else 2750 vlan = le16toh(cur->wb.upper.vlan); 2751 2752 hash = le32toh(cur->wb.lower.hi_dword.rss); 2753 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2754 E1000_RXDADV_RSSTYPE_MASK; 2755 2756 IGB_RSS_DPRINTF(rxr->sc, 10, 2757 "ring%d, hash 0x%08x, hashtype %u\n", 2758 rxr->me, hash, hashtype); 2759 2760 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2761 BUS_DMASYNC_POSTREAD); 2762 2763 if (igb_newbuf(rxr, i, FALSE) != 0) { 2764 IFNET_STAT_INC(ifp, iqdrops, 1); 2765 goto discard; 2766 } 2767 2768 mp->m_len = len; 2769 if (rxr->fmp == NULL) { 2770 mp->m_pkthdr.len = len; 2771 rxr->fmp = mp; 2772 rxr->lmp = mp; 2773 } else { 2774 rxr->lmp->m_next = mp; 2775 rxr->lmp = rxr->lmp->m_next; 2776 rxr->fmp->m_pkthdr.len += len; 2777 } 2778 2779 if (eop) { 2780 m = rxr->fmp; 2781 rxr->fmp = NULL; 2782 rxr->lmp = NULL; 2783 2784 m->m_pkthdr.rcvif = ifp; 2785 IFNET_STAT_INC(ifp, ipackets, 1); 2786 2787 if (ifp->if_capenable & IFCAP_RXCSUM) 2788 igb_rxcsum(staterr, m); 2789 2790 if (staterr & E1000_RXD_STAT_VP) { 2791 m->m_pkthdr.ether_vlantag = vlan; 2792 m->m_flags |= M_VLANTAG; 2793 } 2794 2795 if (ifp->if_capenable & IFCAP_RSS) { 2796 pi = igb_rssinfo(m, &pi0, 2797 hash, hashtype, staterr); 2798 } 2799 #ifdef IGB_RSS_DEBUG 2800 rxr->rx_packets++; 2801 #endif 2802 } 2803 } else { 2804 IFNET_STAT_INC(ifp, ierrors, 1); 2805 discard: 2806 igb_setup_rxdesc(cur, rxbuf); 2807 if (!eop) 2808 rxr->discard = TRUE; 2809 else 2810 rxr->discard = FALSE; 2811 if (rxr->fmp != NULL) { 2812 m_freem(rxr->fmp); 2813 rxr->fmp = NULL; 2814 rxr->lmp = NULL; 2815 } 2816 m = NULL; 2817 } 2818 2819 if (m != NULL) 2820 ifp->if_input(ifp, m, pi, cpuid); 2821 2822 /* Advance our pointers to the next descriptor. */ 2823 if (++i == rxr->num_rx_desc) 2824 i = 0; 2825 2826 if (ncoll >= rxr->wreg_nsegs) { 2827 igb_rx_refresh(rxr, i); 2828 ncoll = 0; 2829 } 2830 2831 cur = &rxr->rx_base[i]; 2832 staterr = le32toh(cur->wb.upper.status_error); 2833 } 2834 rxr->next_to_check = i; 2835 2836 if (ncoll > 0) 2837 igb_rx_refresh(rxr, i); 2838 } 2839 2840 2841 static void 2842 igb_set_vlan(struct igb_softc *sc) 2843 { 2844 struct e1000_hw *hw = &sc->hw; 2845 uint32_t reg; 2846 #if 0 2847 struct ifnet *ifp = sc->arpcom.ac_if; 2848 #endif 2849 2850 if (sc->vf_ifp) { 2851 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2852 return; 2853 } 2854 2855 reg = E1000_READ_REG(hw, E1000_CTRL); 2856 reg |= E1000_CTRL_VME; 2857 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2858 2859 #if 0 2860 /* Enable the Filter Table */ 2861 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2862 reg = E1000_READ_REG(hw, E1000_RCTL); 2863 reg &= ~E1000_RCTL_CFIEN; 2864 reg |= E1000_RCTL_VFE; 2865 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2866 } 2867 #endif 2868 2869 /* Update the frame size */ 2870 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2871 sc->max_frame_size + VLAN_TAG_SIZE); 2872 2873 #if 0 2874 /* Don't bother with table if no vlans */ 2875 if ((adapter->num_vlans == 0) || 2876 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2877 return; 2878 /* 2879 ** A soft reset zero's out the VFTA, so 2880 ** we need to repopulate it now. 2881 */ 2882 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2883 if (adapter->shadow_vfta[i] != 0) { 2884 if (adapter->vf_ifp) 2885 e1000_vfta_set_vf(hw, 2886 adapter->shadow_vfta[i], TRUE); 2887 else 2888 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2889 i, adapter->shadow_vfta[i]); 2890 } 2891 #endif 2892 } 2893 2894 static void 2895 igb_enable_intr(struct igb_softc *sc) 2896 { 2897 int i; 2898 2899 for (i = 0; i < sc->intr_cnt; ++i) 2900 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2901 2902 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2903 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2904 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2905 else 2906 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2907 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2908 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2909 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2910 } else { 2911 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2912 } 2913 E1000_WRITE_FLUSH(&sc->hw); 2914 } 2915 2916 static void 2917 igb_disable_intr(struct igb_softc *sc) 2918 { 2919 int i; 2920 2921 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2922 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2923 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2924 } 2925 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2926 E1000_WRITE_FLUSH(&sc->hw); 2927 2928 for (i = 0; i < sc->intr_cnt; ++i) 2929 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2930 } 2931 2932 /* 2933 * Bit of a misnomer, what this really means is 2934 * to enable OS management of the system... aka 2935 * to disable special hardware management features 2936 */ 2937 static void 2938 igb_get_mgmt(struct igb_softc *sc) 2939 { 2940 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2941 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2942 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2943 2944 /* disable hardware interception of ARP */ 2945 manc &= ~E1000_MANC_ARP_EN; 2946 2947 /* enable receiving management packets to the host */ 2948 manc |= E1000_MANC_EN_MNG2HOST; 2949 manc2h |= 1 << 5; /* Mng Port 623 */ 2950 manc2h |= 1 << 6; /* Mng Port 664 */ 2951 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2952 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2953 } 2954 } 2955 2956 /* 2957 * Give control back to hardware management controller 2958 * if there is one. 2959 */ 2960 static void 2961 igb_rel_mgmt(struct igb_softc *sc) 2962 { 2963 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2964 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2965 2966 /* Re-enable hardware interception of ARP */ 2967 manc |= E1000_MANC_ARP_EN; 2968 manc &= ~E1000_MANC_EN_MNG2HOST; 2969 2970 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2971 } 2972 } 2973 2974 /* 2975 * Sets CTRL_EXT:DRV_LOAD bit. 2976 * 2977 * For ASF and Pass Through versions of f/w this means that 2978 * the driver is loaded. 2979 */ 2980 static void 2981 igb_get_hw_control(struct igb_softc *sc) 2982 { 2983 uint32_t ctrl_ext; 2984 2985 if (sc->vf_ifp) 2986 return; 2987 2988 /* Let firmware know the driver has taken over */ 2989 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2990 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2991 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2992 } 2993 2994 /* 2995 * Resets CTRL_EXT:DRV_LOAD bit. 2996 * 2997 * For ASF and Pass Through versions of f/w this means that the 2998 * driver is no longer loaded. 2999 */ 3000 static void 3001 igb_rel_hw_control(struct igb_softc *sc) 3002 { 3003 uint32_t ctrl_ext; 3004 3005 if (sc->vf_ifp) 3006 return; 3007 3008 /* Let firmware taken over control of h/w */ 3009 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3010 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3011 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3012 } 3013 3014 static boolean_t 3015 igb_is_valid_ether_addr(const uint8_t *addr) 3016 { 3017 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3018 3019 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3020 return FALSE; 3021 return TRUE; 3022 } 3023 3024 /* 3025 * Enable PCI Wake On Lan capability 3026 */ 3027 static void 3028 igb_enable_wol(struct igb_softc *sc) 3029 { 3030 device_t dev = sc->dev; 3031 int error = 0; 3032 uint32_t pmc, ctrl; 3033 uint16_t status; 3034 3035 if (pci_find_extcap(dev, PCIY_PMG, &pmc) != 0) { 3036 device_printf(dev, "no PMG\n"); 3037 return; 3038 } 3039 3040 /* 3041 * Set the type of wakeup. 3042 */ 3043 sc->wol &= ~(E1000_WUFC_EX | E1000_WUFC_MC); 3044 if ((sc->wol & (E1000_WUFC_EX | E1000_WUFC_MAG | E1000_WUFC_MC)) == 0) 3045 goto pme; 3046 3047 /* 3048 * Advertise the wakeup capabilities. 3049 */ 3050 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 3051 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); 3052 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 3053 3054 /* 3055 * Keep the laser running on Fiber adapters. 3056 */ 3057 if (sc->hw.phy.media_type == e1000_media_type_fiber || 3058 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 3059 uint32_t ctrl_ext; 3060 3061 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3062 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 3063 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, ctrl_ext); 3064 } 3065 3066 error = igb_enable_phy_wol(sc); 3067 if (error) 3068 goto pme; 3069 3070 /* XXX will this happen? ich/pch specific. */ 3071 if (sc->hw.phy.type == e1000_phy_igp_3) 3072 e1000_igp3_phy_powerdown_workaround_ich8lan(&sc->hw); 3073 3074 pme: 3075 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 3076 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3077 if (!error) 3078 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3079 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 3080 } 3081 3082 /* 3083 * WOL in the newer chipset interfaces (pchlan) 3084 * require thing to be copied into the phy 3085 */ 3086 static int 3087 igb_enable_phy_wol(struct igb_softc *sc) 3088 { 3089 struct e1000_hw *hw = &sc->hw; 3090 uint32_t mreg; 3091 uint16_t preg; 3092 int ret = 0, i; 3093 3094 /* Copy MAC RARs to PHY RARs */ 3095 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 3096 3097 /* Copy MAC MTA to PHY MTA */ 3098 for (i = 0; i < hw->mac.mta_reg_count; i++) { 3099 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 3100 e1000_write_phy_reg(hw, BM_MTA(i), (uint16_t)(mreg & 0xFFFF)); 3101 e1000_write_phy_reg(hw, BM_MTA(i) + 1, 3102 (uint16_t)((mreg >> 16) & 0xFFFF)); 3103 } 3104 3105 /* Configure PHY Rx Control register */ 3106 e1000_read_phy_reg(hw, BM_RCTL, &preg); 3107 mreg = E1000_READ_REG(hw, E1000_RCTL); 3108 if (mreg & E1000_RCTL_UPE) 3109 preg |= BM_RCTL_UPE; 3110 if (mreg & E1000_RCTL_MPE) 3111 preg |= BM_RCTL_MPE; 3112 preg &= ~(BM_RCTL_MO_MASK); 3113 if (mreg & E1000_RCTL_MO_3) { 3114 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 3115 << BM_RCTL_MO_SHIFT); 3116 } 3117 if (mreg & E1000_RCTL_BAM) 3118 preg |= BM_RCTL_BAM; 3119 if (mreg & E1000_RCTL_PMCF) 3120 preg |= BM_RCTL_PMCF; 3121 mreg = E1000_READ_REG(hw, E1000_CTRL); 3122 if (mreg & E1000_CTRL_RFCE) 3123 preg |= BM_RCTL_RFCE; 3124 e1000_write_phy_reg(&sc->hw, BM_RCTL, preg); 3125 3126 /* Enable PHY wakeup in MAC register. */ 3127 E1000_WRITE_REG(hw, E1000_WUC, 3128 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME); 3129 E1000_WRITE_REG(hw, E1000_WUFC, sc->wol); 3130 3131 /* Configure and enable PHY wakeup in PHY registers */ 3132 e1000_write_phy_reg(hw, BM_WUFC, sc->wol); 3133 e1000_write_phy_reg(hw, BM_WUC, E1000_WUC_PME_EN); 3134 /* Activate PHY wakeup */ 3135 ret = hw->phy.ops.acquire(hw); 3136 if (ret) { 3137 if_printf(&sc->arpcom.ac_if, "Could not acquire PHY\n"); 3138 return ret; 3139 } 3140 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 3141 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 3142 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); 3143 if (ret) { 3144 if_printf(&sc->arpcom.ac_if, "Could not read PHY page 769\n"); 3145 goto out; 3146 } 3147 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 3148 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); 3149 if (ret) { 3150 if_printf(&sc->arpcom.ac_if, 3151 "Could not set PHY Host Wakeup bit\n"); 3152 } 3153 out: 3154 hw->phy.ops.release(hw); 3155 return ret; 3156 } 3157 3158 static void 3159 igb_update_stats_counters(struct igb_softc *sc) 3160 { 3161 struct e1000_hw *hw = &sc->hw; 3162 struct e1000_hw_stats *stats; 3163 struct ifnet *ifp = &sc->arpcom.ac_if; 3164 3165 /* 3166 * The virtual function adapter has only a 3167 * small controlled set of stats, do only 3168 * those and return. 3169 */ 3170 if (sc->vf_ifp) { 3171 igb_update_vf_stats_counters(sc); 3172 return; 3173 } 3174 stats = sc->stats; 3175 3176 if (sc->hw.phy.media_type == e1000_media_type_copper || 3177 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3178 stats->symerrs += 3179 E1000_READ_REG(hw,E1000_SYMERRS); 3180 stats->sec += E1000_READ_REG(hw, E1000_SEC); 3181 } 3182 3183 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 3184 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 3185 stats->scc += E1000_READ_REG(hw, E1000_SCC); 3186 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 3187 3188 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 3189 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 3190 stats->colc += E1000_READ_REG(hw, E1000_COLC); 3191 stats->dc += E1000_READ_REG(hw, E1000_DC); 3192 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 3193 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 3194 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 3195 3196 /* 3197 * For watchdog management we need to know if we have been 3198 * paused during the last interval, so capture that here. 3199 */ 3200 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 3201 stats->xoffrxc += sc->pause_frames; 3202 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 3203 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 3204 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 3205 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 3206 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 3207 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 3208 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 3209 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 3210 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 3211 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 3212 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 3213 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 3214 3215 /* For the 64-bit byte counters the low dword must be read first. */ 3216 /* Both registers clear on the read of the high dword */ 3217 3218 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 3219 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 3220 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 3221 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 3222 3223 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 3224 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 3225 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 3226 stats->roc += E1000_READ_REG(hw, E1000_ROC); 3227 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 3228 3229 stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC); 3230 stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); 3231 stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC); 3232 3233 stats->tor += E1000_READ_REG(hw, E1000_TORL) + 3234 ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 3235 stats->tot += E1000_READ_REG(hw, E1000_TOTL) + 3236 ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 3237 3238 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 3239 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 3240 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 3241 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 3242 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 3243 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3244 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3245 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3246 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3247 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3248 3249 /* Interrupt Counts */ 3250 3251 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3252 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3253 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3254 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3255 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3256 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3257 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3258 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3259 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3260 3261 /* Host to Card Statistics */ 3262 3263 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3264 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3265 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3266 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3267 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3268 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3269 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3270 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3271 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3272 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3273 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3274 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3275 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3276 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3277 3278 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3279 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3280 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3281 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3282 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3283 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3284 3285 IFNET_STAT_SET(ifp, collisions, stats->colc); 3286 3287 /* Rx Errors */ 3288 IFNET_STAT_SET(ifp, ierrors, 3289 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3290 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3291 3292 /* Tx Errors */ 3293 IFNET_STAT_SET(ifp, oerrors, 3294 stats->ecol + stats->latecol + sc->watchdog_events); 3295 3296 /* Driver specific counters */ 3297 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3298 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3299 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3300 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3301 sc->packet_buf_alloc_tx = 3302 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3303 sc->packet_buf_alloc_rx = 3304 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3305 } 3306 3307 static void 3308 igb_vf_init_stats(struct igb_softc *sc) 3309 { 3310 struct e1000_hw *hw = &sc->hw; 3311 struct e1000_vf_stats *stats; 3312 3313 stats = sc->stats; 3314 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3315 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3316 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3317 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3318 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3319 } 3320 3321 static void 3322 igb_update_vf_stats_counters(struct igb_softc *sc) 3323 { 3324 struct e1000_hw *hw = &sc->hw; 3325 struct e1000_vf_stats *stats; 3326 3327 if (sc->link_speed == 0) 3328 return; 3329 3330 stats = sc->stats; 3331 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3332 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3333 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3334 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3335 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3336 } 3337 3338 #ifdef IFPOLL_ENABLE 3339 3340 static void 3341 igb_npoll_status(struct ifnet *ifp) 3342 { 3343 struct igb_softc *sc = ifp->if_softc; 3344 uint32_t reg_icr; 3345 3346 ASSERT_SERIALIZED(&sc->main_serialize); 3347 3348 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3349 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3350 sc->hw.mac.get_link_status = 1; 3351 igb_update_link_status(sc); 3352 } 3353 } 3354 3355 static void 3356 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3357 { 3358 struct igb_tx_ring *txr = arg; 3359 3360 ASSERT_SERIALIZED(&txr->tx_serialize); 3361 igb_tx_intr(txr, *(txr->tx_hdr)); 3362 igb_try_txgc(txr, 1); 3363 } 3364 3365 static void 3366 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3367 { 3368 struct igb_rx_ring *rxr = arg; 3369 3370 ASSERT_SERIALIZED(&rxr->rx_serialize); 3371 3372 igb_rxeof(rxr, cycle); 3373 } 3374 3375 static void 3376 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3377 { 3378 struct igb_softc *sc = ifp->if_softc; 3379 int i, txr_cnt, rxr_cnt; 3380 3381 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3382 3383 if (info) { 3384 int cpu; 3385 3386 info->ifpi_status.status_func = igb_npoll_status; 3387 info->ifpi_status.serializer = &sc->main_serialize; 3388 3389 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3390 for (i = 0; i < txr_cnt; ++i) { 3391 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3392 3393 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 3394 KKASSERT(cpu < netisr_ncpus); 3395 info->ifpi_tx[cpu].poll_func = igb_npoll_tx; 3396 info->ifpi_tx[cpu].arg = txr; 3397 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; 3398 ifsq_set_cpuid(txr->ifsq, cpu); 3399 } 3400 3401 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3402 for (i = 0; i < rxr_cnt; ++i) { 3403 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3404 3405 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 3406 KKASSERT(cpu < netisr_ncpus); 3407 info->ifpi_rx[cpu].poll_func = igb_npoll_rx; 3408 info->ifpi_rx[cpu].arg = rxr; 3409 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize; 3410 } 3411 } else { 3412 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3413 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3414 3415 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3416 } 3417 } 3418 if (ifp->if_flags & IFF_RUNNING) 3419 igb_init(sc); 3420 } 3421 3422 #endif /* IFPOLL_ENABLE */ 3423 3424 static void 3425 igb_intr(void *xsc) 3426 { 3427 struct igb_softc *sc = xsc; 3428 struct ifnet *ifp = &sc->arpcom.ac_if; 3429 uint32_t eicr; 3430 3431 ASSERT_SERIALIZED(&sc->main_serialize); 3432 3433 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3434 3435 if (eicr == 0) 3436 return; 3437 3438 if (ifp->if_flags & IFF_RUNNING) { 3439 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3440 int i; 3441 3442 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3443 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3444 3445 if (eicr & rxr->rx_intr_mask) { 3446 lwkt_serialize_enter(&rxr->rx_serialize); 3447 igb_rxeof(rxr, -1); 3448 lwkt_serialize_exit(&rxr->rx_serialize); 3449 } 3450 } 3451 3452 if (eicr & txr->tx_intr_mask) { 3453 lwkt_serialize_enter(&txr->tx_serialize); 3454 igb_tx_intr(txr, *(txr->tx_hdr)); 3455 lwkt_serialize_exit(&txr->tx_serialize); 3456 } 3457 } 3458 3459 if (eicr & E1000_EICR_OTHER) { 3460 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3461 3462 /* Link status change */ 3463 if (icr & E1000_ICR_LSC) { 3464 sc->hw.mac.get_link_status = 1; 3465 igb_update_link_status(sc); 3466 } 3467 } 3468 3469 /* 3470 * Reading EICR has the side effect to clear interrupt mask, 3471 * so all interrupts need to be enabled here. 3472 */ 3473 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3474 } 3475 3476 static void 3477 igb_intr_shared(void *xsc) 3478 { 3479 struct igb_softc *sc = xsc; 3480 struct ifnet *ifp = &sc->arpcom.ac_if; 3481 uint32_t reg_icr; 3482 3483 ASSERT_SERIALIZED(&sc->main_serialize); 3484 3485 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3486 3487 /* Hot eject? */ 3488 if (reg_icr == 0xffffffff) 3489 return; 3490 3491 /* Definitely not our interrupt. */ 3492 if (reg_icr == 0x0) 3493 return; 3494 3495 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3496 return; 3497 3498 if (ifp->if_flags & IFF_RUNNING) { 3499 if (reg_icr & 3500 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3501 int i; 3502 3503 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3504 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3505 3506 lwkt_serialize_enter(&rxr->rx_serialize); 3507 igb_rxeof(rxr, -1); 3508 lwkt_serialize_exit(&rxr->rx_serialize); 3509 } 3510 } 3511 3512 if (reg_icr & E1000_ICR_TXDW) { 3513 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3514 3515 lwkt_serialize_enter(&txr->tx_serialize); 3516 igb_tx_intr(txr, *(txr->tx_hdr)); 3517 lwkt_serialize_exit(&txr->tx_serialize); 3518 } 3519 } 3520 3521 /* Link status change */ 3522 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3523 sc->hw.mac.get_link_status = 1; 3524 igb_update_link_status(sc); 3525 } 3526 3527 if (reg_icr & E1000_ICR_RXO) 3528 sc->rx_overruns++; 3529 } 3530 3531 static int 3532 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3533 int *segs_used, int *idx) 3534 { 3535 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3536 bus_dmamap_t map; 3537 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3538 union e1000_adv_tx_desc *txd = NULL; 3539 struct mbuf *m_head = *m_headp; 3540 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3541 int maxsegs, nsegs, i, j, error; 3542 uint32_t hdrlen = 0; 3543 3544 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3545 error = igb_tso_pullup(txr, m_headp); 3546 if (error) 3547 return error; 3548 m_head = *m_headp; 3549 } 3550 3551 /* Set basic descriptor constants */ 3552 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3553 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3554 if (m_head->m_flags & M_VLANTAG) 3555 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3556 3557 /* 3558 * Map the packet for DMA. 3559 */ 3560 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3561 tx_buf_mapped = tx_buf; 3562 map = tx_buf->map; 3563 3564 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3565 if (maxsegs > IGB_MAX_SCATTER) 3566 maxsegs = IGB_MAX_SCATTER; 3567 3568 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3569 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3570 if (error) { 3571 if (error == ENOBUFS) 3572 txr->sc->mbuf_defrag_failed++; 3573 else 3574 txr->sc->no_tx_dma_setup++; 3575 3576 m_freem(*m_headp); 3577 *m_headp = NULL; 3578 return error; 3579 } 3580 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3581 3582 m_head = *m_headp; 3583 3584 /* 3585 * Set up the TX context descriptor, if any hardware offloading is 3586 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3587 * TX descriptor. 3588 * 3589 * Unlike these chips' predecessors (em/emx), TX context descriptor 3590 * will _not_ interfere TX data fetching pipelining. 3591 */ 3592 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3593 igb_tso_ctx(txr, m_head, &hdrlen); 3594 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3595 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3596 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3597 txr->tx_nsegs++; 3598 (*segs_used)++; 3599 } else if (igb_txcsum_ctx(txr, m_head)) { 3600 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3601 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3602 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3603 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3604 txr->tx_nsegs++; 3605 (*segs_used)++; 3606 } 3607 3608 *segs_used += nsegs; 3609 txr->tx_nsegs += nsegs; 3610 if (txr->tx_nsegs >= txr->intr_nsegs) { 3611 /* 3612 * Report Status (RS) is turned on every intr_nsegs 3613 * descriptors (roughly). 3614 */ 3615 txr->tx_nsegs = 0; 3616 cmd_rs = E1000_ADVTXD_DCMD_RS; 3617 } 3618 3619 /* Calculate payload length */ 3620 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3621 << E1000_ADVTXD_PAYLEN_SHIFT); 3622 3623 /* 3624 * 82575 needs the TX context index added; the queue 3625 * index is used as TX context index here. 3626 */ 3627 if (txr->sc->hw.mac.type == e1000_82575) 3628 olinfo_status |= txr->me << 4; 3629 3630 /* Set up our transmit descriptors */ 3631 i = txr->next_avail_desc; 3632 for (j = 0; j < nsegs; j++) { 3633 bus_size_t seg_len; 3634 bus_addr_t seg_addr; 3635 3636 tx_buf = &txr->tx_buf[i]; 3637 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3638 seg_addr = segs[j].ds_addr; 3639 seg_len = segs[j].ds_len; 3640 3641 txd->read.buffer_addr = htole64(seg_addr); 3642 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3643 txd->read.olinfo_status = htole32(olinfo_status); 3644 if (++i == txr->num_tx_desc) 3645 i = 0; 3646 tx_buf->m_head = NULL; 3647 } 3648 3649 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3650 txr->next_avail_desc = i; 3651 txr->tx_avail -= nsegs; 3652 txr->tx_nmbuf++; 3653 3654 tx_buf->m_head = m_head; 3655 tx_buf_mapped->map = tx_buf->map; 3656 tx_buf->map = map; 3657 3658 /* 3659 * Last Descriptor of Packet needs End Of Packet (EOP) 3660 */ 3661 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3662 3663 /* 3664 * Defer TDT updating, until enough descrptors are setup 3665 */ 3666 *idx = i; 3667 #ifdef IGB_TSS_DEBUG 3668 ++txr->tx_packets; 3669 #endif 3670 3671 return 0; 3672 } 3673 3674 static void 3675 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3676 { 3677 struct igb_softc *sc = ifp->if_softc; 3678 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3679 struct mbuf *m_head; 3680 int idx = -1, nsegs = 0; 3681 3682 KKASSERT(txr->ifsq == ifsq); 3683 ASSERT_SERIALIZED(&txr->tx_serialize); 3684 3685 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3686 return; 3687 3688 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3689 ifsq_purge(ifsq); 3690 return; 3691 } 3692 3693 while (!ifsq_is_empty(ifsq)) { 3694 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3695 ifsq_set_oactive(ifsq); 3696 /* Set watchdog on */ 3697 ifsq_watchdog_set_count(&txr->tx_watchdog, 5); 3698 break; 3699 } 3700 3701 m_head = ifsq_dequeue(ifsq); 3702 if (m_head == NULL) 3703 break; 3704 3705 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3706 IFNET_STAT_INC(ifp, oerrors, 1); 3707 continue; 3708 } 3709 3710 /* 3711 * TX interrupt are aggressively aggregated, so increasing 3712 * opackets at TX interrupt time will make the opackets 3713 * statistics vastly inaccurate; we do the opackets increment 3714 * now. 3715 */ 3716 IFNET_STAT_INC(ifp, opackets, 1); 3717 3718 if (nsegs >= txr->wreg_nsegs) { 3719 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3720 idx = -1; 3721 nsegs = 0; 3722 } 3723 3724 /* Send a copy of the frame to the BPF listener */ 3725 ETHER_BPF_MTAP(ifp, m_head); 3726 } 3727 if (idx >= 0) 3728 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3729 txr->tx_running = IGB_TX_RUNNING; 3730 } 3731 3732 static void 3733 igb_watchdog(struct ifaltq_subque *ifsq) 3734 { 3735 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3736 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3737 struct igb_softc *sc = ifp->if_softc; 3738 int i; 3739 3740 KKASSERT(txr->ifsq == ifsq); 3741 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3742 3743 /* 3744 * If flow control has paused us since last checking 3745 * it invalidates the watchdog timing, so dont run it. 3746 */ 3747 if (sc->pause_frames) { 3748 sc->pause_frames = 0; 3749 ifsq_watchdog_set_count(&txr->tx_watchdog, 5); 3750 return; 3751 } 3752 3753 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3754 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3755 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3756 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3757 if_printf(ifp, "TX(%d) desc avail = %d, " 3758 "Next TX to Clean = %d\n", 3759 txr->me, txr->tx_avail, txr->next_to_clean); 3760 3761 IFNET_STAT_INC(ifp, oerrors, 1); 3762 sc->watchdog_events++; 3763 3764 igb_init(sc); 3765 for (i = 0; i < sc->tx_ring_inuse; ++i) 3766 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3767 } 3768 3769 static void 3770 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3771 { 3772 uint32_t eitr = 0; 3773 3774 if (rate > 0) { 3775 if (sc->hw.mac.type == e1000_82575) { 3776 eitr = 1000000000 / 256 / rate; 3777 /* 3778 * NOTE: 3779 * Document is wrong on the 2 bits left shift 3780 */ 3781 } else { 3782 eitr = 1000000 / rate; 3783 eitr <<= IGB_EITR_INTVL_SHIFT; 3784 } 3785 3786 if (eitr == 0) { 3787 /* Don't disable it */ 3788 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3789 } else if (eitr > IGB_EITR_INTVL_MASK) { 3790 /* Don't allow it to be too large */ 3791 eitr = IGB_EITR_INTVL_MASK; 3792 } 3793 } 3794 if (sc->hw.mac.type == e1000_82575) 3795 eitr |= eitr << 16; 3796 else 3797 eitr |= E1000_EITR_CNT_IGNR; 3798 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3799 } 3800 3801 static void 3802 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3803 const char *name, const char *desc) 3804 { 3805 int i; 3806 3807 for (i = 0; i < sc->intr_cnt; ++i) { 3808 if (sc->intr_data[i].intr_use == use) { 3809 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3810 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3811 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3812 sc, use, igb_sysctl_intr_rate, "I", desc); 3813 break; 3814 } 3815 } 3816 } 3817 3818 static int 3819 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3820 { 3821 struct igb_softc *sc = (void *)arg1; 3822 int use = arg2; 3823 struct ifnet *ifp = &sc->arpcom.ac_if; 3824 int error, rate, i; 3825 struct igb_intr_data *intr; 3826 3827 rate = 0; 3828 for (i = 0; i < sc->intr_cnt; ++i) { 3829 intr = &sc->intr_data[i]; 3830 if (intr->intr_use == use) { 3831 rate = intr->intr_rate; 3832 break; 3833 } 3834 } 3835 3836 error = sysctl_handle_int(oidp, &rate, 0, req); 3837 if (error || req->newptr == NULL) 3838 return error; 3839 if (rate <= 0) 3840 return EINVAL; 3841 3842 ifnet_serialize_all(ifp); 3843 3844 for (i = 0; i < sc->intr_cnt; ++i) { 3845 intr = &sc->intr_data[i]; 3846 if (intr->intr_use == use && intr->intr_rate != rate) { 3847 intr->intr_rate = rate; 3848 if (ifp->if_flags & IFF_RUNNING) 3849 igb_set_eitr(sc, i, rate); 3850 } 3851 } 3852 3853 ifnet_deserialize_all(ifp); 3854 3855 return error; 3856 } 3857 3858 static int 3859 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3860 { 3861 struct igb_softc *sc = (void *)arg1; 3862 struct ifnet *ifp = &sc->arpcom.ac_if; 3863 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3864 int error, nsegs; 3865 3866 nsegs = txr->intr_nsegs; 3867 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3868 if (error || req->newptr == NULL) 3869 return error; 3870 if (nsegs <= 0) 3871 return EINVAL; 3872 3873 ifnet_serialize_all(ifp); 3874 3875 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3876 error = EINVAL; 3877 } else { 3878 int i; 3879 3880 error = 0; 3881 for (i = 0; i < sc->tx_ring_cnt; ++i) 3882 sc->tx_rings[i].intr_nsegs = nsegs; 3883 } 3884 3885 ifnet_deserialize_all(ifp); 3886 3887 return error; 3888 } 3889 3890 static int 3891 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3892 { 3893 struct igb_softc *sc = (void *)arg1; 3894 struct ifnet *ifp = &sc->arpcom.ac_if; 3895 int error, nsegs, i; 3896 3897 nsegs = sc->rx_rings[0].wreg_nsegs; 3898 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3899 if (error || req->newptr == NULL) 3900 return error; 3901 3902 ifnet_serialize_all(ifp); 3903 for (i = 0; i < sc->rx_ring_cnt; ++i) 3904 sc->rx_rings[i].wreg_nsegs = nsegs; 3905 ifnet_deserialize_all(ifp); 3906 3907 return 0; 3908 } 3909 3910 static int 3911 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3912 { 3913 struct igb_softc *sc = (void *)arg1; 3914 struct ifnet *ifp = &sc->arpcom.ac_if; 3915 int error, nsegs, i; 3916 3917 nsegs = sc->tx_rings[0].wreg_nsegs; 3918 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3919 if (error || req->newptr == NULL) 3920 return error; 3921 3922 ifnet_serialize_all(ifp); 3923 for (i = 0; i < sc->tx_ring_cnt; ++i) 3924 sc->tx_rings[i].wreg_nsegs = nsegs; 3925 ifnet_deserialize_all(ifp); 3926 3927 return 0; 3928 } 3929 3930 static void 3931 igb_init_intr(struct igb_softc *sc) 3932 { 3933 int i; 3934 3935 igb_set_intr_mask(sc); 3936 3937 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3938 igb_init_unshared_intr(sc); 3939 3940 for (i = 0; i < sc->intr_cnt; ++i) 3941 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3942 } 3943 3944 static void 3945 igb_init_unshared_intr(struct igb_softc *sc) 3946 { 3947 struct e1000_hw *hw = &sc->hw; 3948 const struct igb_rx_ring *rxr; 3949 const struct igb_tx_ring *txr; 3950 uint32_t ivar, index; 3951 int i; 3952 3953 /* 3954 * Enable extended mode 3955 */ 3956 if (sc->hw.mac.type != e1000_82575) { 3957 uint32_t gpie; 3958 int ivar_max; 3959 3960 gpie = E1000_GPIE_NSICR; 3961 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3962 gpie |= E1000_GPIE_MSIX_MODE | 3963 E1000_GPIE_EIAME | 3964 E1000_GPIE_PBA; 3965 } 3966 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3967 3968 /* 3969 * Clear IVARs 3970 */ 3971 switch (sc->hw.mac.type) { 3972 case e1000_82576: 3973 ivar_max = IGB_MAX_IVAR_82576; 3974 break; 3975 3976 case e1000_82580: 3977 ivar_max = IGB_MAX_IVAR_82580; 3978 break; 3979 3980 case e1000_i350: 3981 ivar_max = IGB_MAX_IVAR_I350; 3982 break; 3983 3984 case e1000_i354: 3985 ivar_max = IGB_MAX_IVAR_I354; 3986 break; 3987 3988 case e1000_vfadapt: 3989 case e1000_vfadapt_i350: 3990 ivar_max = IGB_MAX_IVAR_VF; 3991 break; 3992 3993 case e1000_i210: 3994 ivar_max = IGB_MAX_IVAR_I210; 3995 break; 3996 3997 case e1000_i211: 3998 ivar_max = IGB_MAX_IVAR_I211; 3999 break; 4000 4001 default: 4002 panic("unknown mac type %d\n", sc->hw.mac.type); 4003 } 4004 for (i = 0; i < ivar_max; ++i) 4005 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 4006 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 4007 } else { 4008 uint32_t tmp; 4009 4010 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 4011 ("82575 w/ MSI-X")); 4012 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 4013 tmp |= E1000_CTRL_EXT_IRCA; 4014 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 4015 } 4016 4017 /* 4018 * Map TX/RX interrupts to EICR 4019 */ 4020 switch (sc->hw.mac.type) { 4021 case e1000_82580: 4022 case e1000_i350: 4023 case e1000_i354: 4024 case e1000_vfadapt: 4025 case e1000_vfadapt_i350: 4026 case e1000_i210: 4027 case e1000_i211: 4028 /* RX entries */ 4029 for (i = 0; i < sc->rx_ring_inuse; ++i) { 4030 rxr = &sc->rx_rings[i]; 4031 4032 index = i >> 1; 4033 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4034 4035 if (i & 1) { 4036 ivar &= 0xff00ffff; 4037 ivar |= 4038 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 4039 } else { 4040 ivar &= 0xffffff00; 4041 ivar |= 4042 (rxr->rx_intr_vec | E1000_IVAR_VALID); 4043 } 4044 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4045 } 4046 /* TX entries */ 4047 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4048 txr = &sc->tx_rings[i]; 4049 4050 index = i >> 1; 4051 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4052 4053 if (i & 1) { 4054 ivar &= 0x00ffffff; 4055 ivar |= 4056 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 4057 } else { 4058 ivar &= 0xffff00ff; 4059 ivar |= 4060 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 4061 } 4062 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4063 } 4064 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4065 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 4066 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4067 } 4068 break; 4069 4070 case e1000_82576: 4071 /* RX entries */ 4072 for (i = 0; i < sc->rx_ring_inuse; ++i) { 4073 rxr = &sc->rx_rings[i]; 4074 4075 index = i & 0x7; /* Each IVAR has two entries */ 4076 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4077 4078 if (i < 8) { 4079 ivar &= 0xffffff00; 4080 ivar |= 4081 (rxr->rx_intr_vec | E1000_IVAR_VALID); 4082 } else { 4083 ivar &= 0xff00ffff; 4084 ivar |= 4085 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 4086 } 4087 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4088 } 4089 /* TX entries */ 4090 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4091 txr = &sc->tx_rings[i]; 4092 4093 index = i & 0x7; /* Each IVAR has two entries */ 4094 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4095 4096 if (i < 8) { 4097 ivar &= 0xffff00ff; 4098 ivar |= 4099 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 4100 } else { 4101 ivar &= 0x00ffffff; 4102 ivar |= 4103 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 4104 } 4105 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4106 } 4107 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4108 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 4109 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4110 } 4111 break; 4112 4113 case e1000_82575: 4114 /* 4115 * Enable necessary interrupt bits. 4116 * 4117 * The name of the register is confusing; in addition to 4118 * configuring the first vector of MSI-X, it also configures 4119 * which bits of EICR could be set by the hardware even when 4120 * MSI or line interrupt is used; it thus controls interrupt 4121 * generation. It MUST be configured explicitly; the default 4122 * value mentioned in the datasheet is wrong: RX queue0 and 4123 * TX queue0 are NOT enabled by default. 4124 */ 4125 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 4126 break; 4127 4128 default: 4129 panic("unknown mac type %d\n", sc->hw.mac.type); 4130 } 4131 } 4132 4133 static int 4134 igb_setup_intr(struct igb_softc *sc) 4135 { 4136 int i; 4137 4138 for (i = 0; i < sc->intr_cnt; ++i) { 4139 struct igb_intr_data *intr = &sc->intr_data[i]; 4140 int error; 4141 4142 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 4143 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 4144 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 4145 if (error) { 4146 device_printf(sc->dev, "can't setup %dth intr\n", i); 4147 igb_teardown_intr(sc, i); 4148 return error; 4149 } 4150 } 4151 return 0; 4152 } 4153 4154 static void 4155 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 4156 { 4157 if (txr->sc->hw.mac.type == e1000_82575) { 4158 txr->tx_intr_vec = 0; /* unused */ 4159 switch (txr->me) { 4160 case 0: 4161 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 4162 break; 4163 case 1: 4164 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 4165 break; 4166 case 2: 4167 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4168 break; 4169 case 3: 4170 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4171 break; 4172 default: 4173 panic("unsupported # of TX ring, %d\n", txr->me); 4174 } 4175 } else { 4176 int intr_vec = *intr_vec0; 4177 4178 txr->tx_intr_vec = intr_vec % intr_vecmax; 4179 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4180 4181 *intr_vec0 = intr_vec + 1; 4182 } 4183 } 4184 4185 static void 4186 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 4187 { 4188 if (rxr->sc->hw.mac.type == e1000_82575) { 4189 rxr->rx_intr_vec = 0; /* unused */ 4190 switch (rxr->me) { 4191 case 0: 4192 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4193 break; 4194 case 1: 4195 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4196 break; 4197 case 2: 4198 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4199 break; 4200 case 3: 4201 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4202 break; 4203 default: 4204 panic("unsupported # of RX ring, %d\n", rxr->me); 4205 } 4206 } else { 4207 int intr_vec = *intr_vec0; 4208 4209 rxr->rx_intr_vec = intr_vec % intr_vecmax; 4210 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4211 4212 *intr_vec0 = intr_vec + 1; 4213 } 4214 } 4215 4216 static void 4217 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4218 { 4219 struct igb_softc *sc = ifp->if_softc; 4220 4221 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4222 } 4223 4224 static void 4225 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4226 { 4227 struct igb_softc *sc = ifp->if_softc; 4228 4229 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4230 } 4231 4232 static int 4233 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4234 { 4235 struct igb_softc *sc = ifp->if_softc; 4236 4237 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4238 slz); 4239 } 4240 4241 #ifdef INVARIANTS 4242 4243 static void 4244 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4245 boolean_t serialized) 4246 { 4247 struct igb_softc *sc = ifp->if_softc; 4248 4249 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4250 slz, serialized); 4251 } 4252 4253 #endif /* INVARIANTS */ 4254 4255 static void 4256 igb_set_intr_mask(struct igb_softc *sc) 4257 { 4258 int i; 4259 4260 sc->intr_mask = sc->sts_intr_mask; 4261 for (i = 0; i < sc->rx_ring_inuse; ++i) 4262 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4263 for (i = 0; i < sc->tx_ring_inuse; ++i) 4264 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4265 if (bootverbose) { 4266 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4267 sc->intr_mask); 4268 } 4269 } 4270 4271 static int 4272 igb_alloc_intr(struct igb_softc *sc) 4273 { 4274 struct igb_tx_ring *txr; 4275 struct igb_intr_data *intr; 4276 int i, intr_vec, intr_vecmax; 4277 u_int intr_flags; 4278 4279 igb_alloc_msix(sc); 4280 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4281 igb_set_ring_inuse(sc, FALSE); 4282 goto done; 4283 } 4284 4285 /* 4286 * Reset some settings changed by igb_alloc_msix(). 4287 */ 4288 if (sc->rx_rmap_intr != NULL) { 4289 if_ringmap_free(sc->rx_rmap_intr); 4290 sc->rx_rmap_intr = NULL; 4291 } 4292 if (sc->tx_rmap_intr != NULL) { 4293 if_ringmap_free(sc->tx_rmap_intr); 4294 sc->tx_rmap_intr = NULL; 4295 } 4296 if (sc->intr_data != NULL) { 4297 kfree(sc->intr_data, M_DEVBUF); 4298 sc->intr_data = NULL; 4299 } 4300 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4301 txr = &sc->tx_rings[i]; 4302 txr->tx_intr_vec = 0; 4303 txr->tx_intr_mask = 0; 4304 txr->tx_intr_cpuid = -1; 4305 } 4306 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4307 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4308 4309 rxr->rx_intr_vec = 0; 4310 rxr->rx_intr_mask = 0; 4311 rxr->rx_txr = NULL; 4312 } 4313 4314 sc->intr_cnt = 1; 4315 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4316 M_WAITOK | M_ZERO); 4317 intr = &sc->intr_data[0]; 4318 4319 /* 4320 * Allocate MSI/legacy interrupt resource 4321 */ 4322 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4323 &intr->intr_rid, &intr_flags); 4324 4325 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4326 int unshared; 4327 4328 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4329 if (!unshared) { 4330 sc->flags |= IGB_FLAG_SHARED_INTR; 4331 if (bootverbose) 4332 device_printf(sc->dev, "IRQ shared\n"); 4333 } else { 4334 intr_flags &= ~RF_SHAREABLE; 4335 if (bootverbose) 4336 device_printf(sc->dev, "IRQ unshared\n"); 4337 } 4338 } 4339 4340 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4341 &intr->intr_rid, intr_flags); 4342 if (intr->intr_res == NULL) { 4343 device_printf(sc->dev, "Unable to allocate bus resource: " 4344 "interrupt\n"); 4345 return ENXIO; 4346 } 4347 4348 intr->intr_serialize = &sc->main_serialize; 4349 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4350 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4351 igb_intr_shared : igb_intr; 4352 intr->intr_funcarg = sc; 4353 intr->intr_rate = IGB_INTR_RATE; 4354 intr->intr_use = IGB_INTR_USE_RXTX; 4355 4356 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 4357 4358 /* 4359 * Setup MSI/legacy interrupt mask 4360 */ 4361 switch (sc->hw.mac.type) { 4362 case e1000_82575: 4363 intr_vecmax = IGB_MAX_TXRXINT_82575; 4364 break; 4365 4366 case e1000_82576: 4367 intr_vecmax = IGB_MAX_TXRXINT_82576; 4368 break; 4369 4370 case e1000_82580: 4371 intr_vecmax = IGB_MAX_TXRXINT_82580; 4372 break; 4373 4374 case e1000_i350: 4375 intr_vecmax = IGB_MAX_TXRXINT_I350; 4376 break; 4377 4378 case e1000_i354: 4379 intr_vecmax = IGB_MAX_TXRXINT_I354; 4380 break; 4381 4382 case e1000_i210: 4383 intr_vecmax = IGB_MAX_TXRXINT_I210; 4384 break; 4385 4386 case e1000_i211: 4387 intr_vecmax = IGB_MAX_TXRXINT_I211; 4388 break; 4389 4390 default: 4391 intr_vecmax = IGB_MIN_TXRXINT; 4392 break; 4393 } 4394 intr_vec = 0; 4395 for (i = 0; i < sc->tx_ring_cnt; ++i) 4396 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4397 for (i = 0; i < sc->rx_ring_cnt; ++i) 4398 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4399 sc->sts_intr_mask = E1000_EICR_OTHER; 4400 4401 igb_set_ring_inuse(sc, FALSE); 4402 KKASSERT(sc->rx_ring_inuse <= IGB_MIN_RING_RSS); 4403 if (sc->rx_ring_inuse == IGB_MIN_RING_RSS) { 4404 /* 4405 * Allocate RX ring map for RSS setup. 4406 */ 4407 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, 4408 IGB_MIN_RING_RSS, IGB_MIN_RING_RSS); 4409 KASSERT(if_ringmap_count(sc->rx_rmap_intr) == 4410 sc->rx_ring_inuse, ("RX ring inuse mismatch")); 4411 } 4412 done: 4413 igb_set_intr_mask(sc); 4414 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4415 txr = &sc->tx_rings[i]; 4416 if (txr->tx_intr_cpuid < 0) 4417 txr->tx_intr_cpuid = 0; 4418 } 4419 return 0; 4420 } 4421 4422 static void 4423 igb_free_intr(struct igb_softc *sc) 4424 { 4425 if (sc->intr_data == NULL) 4426 return; 4427 4428 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4429 struct igb_intr_data *intr = &sc->intr_data[0]; 4430 4431 KKASSERT(sc->intr_cnt == 1); 4432 if (intr->intr_res != NULL) { 4433 bus_release_resource(sc->dev, SYS_RES_IRQ, 4434 intr->intr_rid, intr->intr_res); 4435 } 4436 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4437 pci_release_msi(sc->dev); 4438 4439 kfree(sc->intr_data, M_DEVBUF); 4440 } else { 4441 igb_free_msix(sc, TRUE); 4442 } 4443 } 4444 4445 static void 4446 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4447 { 4448 int i; 4449 4450 if (sc->intr_data == NULL) 4451 return; 4452 4453 for (i = 0; i < intr_cnt; ++i) { 4454 struct igb_intr_data *intr = &sc->intr_data[i]; 4455 4456 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4457 } 4458 } 4459 4460 static void 4461 igb_alloc_msix(struct igb_softc *sc) 4462 { 4463 int msix_enable, msix_cnt, msix_ring, alloc_cnt; 4464 int i, x, error; 4465 int ring_cnt, ring_cntmax; 4466 struct igb_intr_data *intr; 4467 boolean_t setup = FALSE; 4468 4469 /* 4470 * Don't enable MSI-X on 82575, see: 4471 * 82575 specification update errata #25 4472 */ 4473 if (sc->hw.mac.type == e1000_82575) 4474 return; 4475 4476 /* Don't enable MSI-X on VF */ 4477 if (sc->vf_ifp) 4478 return; 4479 4480 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4481 igb_msix_enable); 4482 if (!msix_enable) 4483 return; 4484 4485 msix_cnt = pci_msix_count(sc->dev); 4486 #ifdef IGB_MSIX_DEBUG 4487 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4488 #endif 4489 if (msix_cnt <= 1) { 4490 /* One MSI-X model does not make sense. */ 4491 return; 4492 } 4493 if (bootverbose) 4494 device_printf(sc->dev, "MSI-X count %d\n", msix_cnt); 4495 msix_ring = msix_cnt - 1; /* -1 for status */ 4496 4497 /* 4498 * Configure # of RX/TX rings usable by MSI-X. 4499 */ 4500 igb_get_rxring_cnt(sc, &ring_cnt, &ring_cntmax); 4501 if (ring_cntmax > msix_ring) 4502 ring_cntmax = msix_ring; 4503 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4504 4505 igb_get_txring_cnt(sc, &ring_cnt, &ring_cntmax); 4506 if (ring_cntmax > msix_ring) 4507 ring_cntmax = msix_ring; 4508 sc->tx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4509 4510 if_ringmap_match(sc->dev, sc->rx_rmap_intr, sc->tx_rmap_intr); 4511 sc->rx_ring_msix = if_ringmap_count(sc->rx_rmap_intr); 4512 KASSERT(sc->rx_ring_msix <= sc->rx_ring_cnt, 4513 ("total RX ring count %d, MSI-X RX ring count %d", 4514 sc->rx_ring_cnt, sc->rx_ring_msix)); 4515 sc->tx_ring_msix = if_ringmap_count(sc->tx_rmap_intr); 4516 KASSERT(sc->tx_ring_msix <= sc->tx_ring_cnt, 4517 ("total TX ring count %d, MSI-X TX ring count %d", 4518 sc->tx_ring_cnt, sc->tx_ring_msix)); 4519 4520 /* 4521 * Aggregate TX/RX MSI-X 4522 */ 4523 ring_cntmax = sc->rx_ring_msix; 4524 if (ring_cntmax < sc->tx_ring_msix) 4525 ring_cntmax = sc->tx_ring_msix; 4526 KASSERT(ring_cntmax <= msix_ring, 4527 ("invalid ring count max %d, MSI-X count for rings %d", 4528 ring_cntmax, msix_ring)); 4529 4530 alloc_cnt = ring_cntmax + 1; /* +1 for status */ 4531 if (bootverbose) { 4532 device_printf(sc->dev, "MSI-X alloc %d, " 4533 "RX ring %d, TX ring %d\n", alloc_cnt, 4534 sc->rx_ring_msix, sc->tx_ring_msix); 4535 } 4536 4537 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4538 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4539 &sc->msix_mem_rid, RF_ACTIVE); 4540 if (sc->msix_mem_res == NULL) { 4541 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4542 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4543 &sc->msix_mem_rid, RF_ACTIVE); 4544 if (sc->msix_mem_res == NULL) { 4545 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4546 return; 4547 } 4548 } 4549 4550 sc->intr_cnt = alloc_cnt; 4551 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4552 M_DEVBUF, M_WAITOK | M_ZERO); 4553 for (x = 0; x < sc->intr_cnt; ++x) { 4554 intr = &sc->intr_data[x]; 4555 intr->intr_rid = -1; 4556 intr->intr_rate = IGB_INTR_RATE; 4557 } 4558 4559 x = 0; 4560 for (i = 0; i < sc->rx_ring_msix; ++i) { 4561 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4562 struct igb_tx_ring *txr = NULL; 4563 int cpuid, j; 4564 4565 KKASSERT(x < sc->intr_cnt); 4566 rxr->rx_intr_vec = x; 4567 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4568 4569 cpuid = if_ringmap_cpumap(sc->rx_rmap_intr, i); 4570 4571 /* 4572 * Try finding TX ring to piggyback. 4573 */ 4574 for (j = 0; j < sc->tx_ring_msix; ++j) { 4575 if (cpuid == 4576 if_ringmap_cpumap(sc->tx_rmap_intr, j)) { 4577 txr = &sc->tx_rings[j]; 4578 KKASSERT(txr->tx_intr_cpuid < 0); 4579 break; 4580 } 4581 } 4582 rxr->rx_txr = txr; 4583 4584 intr = &sc->intr_data[x++]; 4585 intr->intr_serialize = &rxr->rx_serialize; 4586 intr->intr_cpuid = cpuid; 4587 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4588 intr->intr_funcarg = rxr; 4589 if (txr != NULL) { 4590 intr->intr_func = igb_msix_rxtx; 4591 intr->intr_use = IGB_INTR_USE_RXTX; 4592 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4593 "%s rx%dtx%d", device_get_nameunit(sc->dev), 4594 i, txr->me); 4595 4596 txr->tx_intr_vec = rxr->rx_intr_vec; 4597 txr->tx_intr_mask = rxr->rx_intr_mask; 4598 txr->tx_intr_cpuid = intr->intr_cpuid; 4599 } else { 4600 intr->intr_func = igb_msix_rx; 4601 intr->intr_rate = IGB_MSIX_RX_RATE; 4602 intr->intr_use = IGB_INTR_USE_RX; 4603 4604 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4605 "%s rx%d", device_get_nameunit(sc->dev), i); 4606 } 4607 intr->intr_desc = intr->intr_desc0; 4608 } 4609 4610 for (i = 0; i < sc->tx_ring_msix; ++i) { 4611 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4612 4613 if (txr->tx_intr_cpuid >= 0) { 4614 /* Piggybacked by RX ring. */ 4615 continue; 4616 } 4617 4618 KKASSERT(x < sc->intr_cnt); 4619 txr->tx_intr_vec = x; 4620 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4621 4622 intr = &sc->intr_data[x++]; 4623 intr->intr_serialize = &txr->tx_serialize; 4624 intr->intr_func = igb_msix_tx; 4625 intr->intr_funcarg = txr; 4626 intr->intr_rate = IGB_MSIX_TX_RATE; 4627 intr->intr_use = IGB_INTR_USE_TX; 4628 4629 intr->intr_cpuid = if_ringmap_cpumap(sc->tx_rmap_intr, i); 4630 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4631 txr->tx_intr_cpuid = intr->intr_cpuid; 4632 4633 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4634 device_get_nameunit(sc->dev), i); 4635 intr->intr_desc = intr->intr_desc0; 4636 } 4637 4638 /* 4639 * Link status 4640 */ 4641 KKASSERT(x < sc->intr_cnt); 4642 sc->sts_msix_vec = x; 4643 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4644 4645 intr = &sc->intr_data[x++]; 4646 intr->intr_serialize = &sc->main_serialize; 4647 intr->intr_func = igb_msix_status; 4648 intr->intr_funcarg = sc; 4649 intr->intr_cpuid = 0; 4650 intr->intr_use = IGB_INTR_USE_STATUS; 4651 4652 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4653 device_get_nameunit(sc->dev)); 4654 intr->intr_desc = intr->intr_desc0; 4655 4656 KKASSERT(x == sc->intr_cnt); 4657 4658 error = pci_setup_msix(sc->dev); 4659 if (error) { 4660 device_printf(sc->dev, "Setup MSI-X failed\n"); 4661 goto back; 4662 } 4663 setup = TRUE; 4664 4665 for (i = 0; i < sc->intr_cnt; ++i) { 4666 intr = &sc->intr_data[i]; 4667 4668 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4669 intr->intr_cpuid); 4670 if (error) { 4671 device_printf(sc->dev, 4672 "Unable to allocate MSI-X %d on cpu%d\n", i, 4673 intr->intr_cpuid); 4674 goto back; 4675 } 4676 4677 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4678 &intr->intr_rid, RF_ACTIVE); 4679 if (intr->intr_res == NULL) { 4680 device_printf(sc->dev, 4681 "Unable to allocate MSI-X %d resource\n", i); 4682 error = ENOMEM; 4683 goto back; 4684 } 4685 } 4686 4687 pci_enable_msix(sc->dev); 4688 sc->intr_type = PCI_INTR_TYPE_MSIX; 4689 back: 4690 if (error) 4691 igb_free_msix(sc, setup); 4692 } 4693 4694 static void 4695 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4696 { 4697 int i; 4698 4699 KKASSERT(sc->intr_cnt > 1); 4700 4701 for (i = 0; i < sc->intr_cnt; ++i) { 4702 struct igb_intr_data *intr = &sc->intr_data[i]; 4703 4704 if (intr->intr_res != NULL) { 4705 bus_release_resource(sc->dev, SYS_RES_IRQ, 4706 intr->intr_rid, intr->intr_res); 4707 } 4708 if (intr->intr_rid >= 0) 4709 pci_release_msix_vector(sc->dev, intr->intr_rid); 4710 } 4711 if (setup) 4712 pci_teardown_msix(sc->dev); 4713 4714 sc->intr_cnt = 0; 4715 kfree(sc->intr_data, M_DEVBUF); 4716 sc->intr_data = NULL; 4717 } 4718 4719 static void 4720 igb_msix_rx(void *arg) 4721 { 4722 struct igb_rx_ring *rxr = arg; 4723 4724 ASSERT_SERIALIZED(&rxr->rx_serialize); 4725 igb_rxeof(rxr, -1); 4726 4727 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4728 } 4729 4730 static void 4731 igb_msix_tx(void *arg) 4732 { 4733 struct igb_tx_ring *txr = arg; 4734 4735 ASSERT_SERIALIZED(&txr->tx_serialize); 4736 4737 igb_tx_intr(txr, *(txr->tx_hdr)); 4738 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4739 } 4740 4741 static void 4742 igb_msix_status(void *arg) 4743 { 4744 struct igb_softc *sc = arg; 4745 uint32_t icr; 4746 4747 ASSERT_SERIALIZED(&sc->main_serialize); 4748 4749 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4750 if (icr & E1000_ICR_LSC) { 4751 sc->hw.mac.get_link_status = 1; 4752 igb_update_link_status(sc); 4753 } 4754 4755 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4756 } 4757 4758 static void 4759 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4760 { 4761 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4762 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4763 if (bootverbose) { 4764 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4765 sc->rx_ring_inuse, sc->rx_ring_cnt, 4766 sc->tx_ring_inuse, sc->tx_ring_cnt); 4767 } 4768 } 4769 4770 static int 4771 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4772 { 4773 if (!IGB_ENABLE_HWRSS(sc)) 4774 return 1; 4775 4776 if (polling) 4777 return sc->rx_ring_cnt; 4778 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4779 return IGB_MIN_RING_RSS; 4780 else 4781 return sc->rx_ring_msix; 4782 } 4783 4784 static int 4785 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4786 { 4787 if (!IGB_ENABLE_HWTSS(sc)) 4788 return 1; 4789 4790 if (polling) 4791 return sc->tx_ring_cnt; 4792 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4793 return IGB_MIN_RING; 4794 else 4795 return sc->tx_ring_msix; 4796 } 4797 4798 static int 4799 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4800 { 4801 int hoff, iphlen, thoff; 4802 struct mbuf *m; 4803 4804 m = *mp; 4805 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4806 4807 iphlen = m->m_pkthdr.csum_iphlen; 4808 thoff = m->m_pkthdr.csum_thlen; 4809 hoff = m->m_pkthdr.csum_lhlen; 4810 4811 KASSERT(iphlen > 0, ("invalid ip hlen")); 4812 KASSERT(thoff > 0, ("invalid tcp hlen")); 4813 KASSERT(hoff > 0, ("invalid ether hlen")); 4814 4815 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4816 m = m_pullup(m, hoff + iphlen + thoff); 4817 if (m == NULL) { 4818 *mp = NULL; 4819 return ENOBUFS; 4820 } 4821 *mp = m; 4822 } 4823 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4824 struct ip *ip; 4825 4826 ip = mtodoff(m, struct ip *, hoff); 4827 ip->ip_len = 0; 4828 } 4829 4830 return 0; 4831 } 4832 4833 static void 4834 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4835 { 4836 struct e1000_adv_tx_context_desc *TXD; 4837 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4838 int hoff, ctxd, iphlen, thoff; 4839 4840 iphlen = m->m_pkthdr.csum_iphlen; 4841 thoff = m->m_pkthdr.csum_thlen; 4842 hoff = m->m_pkthdr.csum_lhlen; 4843 4844 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4845 4846 ctxd = txr->next_avail_desc; 4847 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4848 4849 if (m->m_flags & M_VLANTAG) { 4850 uint16_t vlantag; 4851 4852 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4853 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4854 } 4855 4856 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4857 vlan_macip_lens |= iphlen; 4858 4859 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4860 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4861 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4862 4863 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4864 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4865 4866 /* 4867 * 82575 needs the TX context index added; the queue 4868 * index is used as TX context index here. 4869 */ 4870 if (txr->sc->hw.mac.type == e1000_82575) 4871 mss_l4len_idx |= txr->me << 4; 4872 4873 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4874 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4875 TXD->seqnum_seed = htole32(0); 4876 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4877 4878 /* We've consumed the first desc, adjust counters */ 4879 if (++ctxd == txr->num_tx_desc) 4880 ctxd = 0; 4881 txr->next_avail_desc = ctxd; 4882 --txr->tx_avail; 4883 4884 *hlen = hoff + iphlen + thoff; 4885 } 4886 4887 static void 4888 igb_setup_serialize(struct igb_softc *sc) 4889 { 4890 int i = 0, j; 4891 4892 /* Main + RX + TX */ 4893 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4894 sc->serializes = 4895 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4896 M_DEVBUF, M_WAITOK | M_ZERO); 4897 4898 /* 4899 * Setup serializes 4900 * 4901 * NOTE: Order is critical 4902 */ 4903 4904 KKASSERT(i < sc->serialize_cnt); 4905 sc->serializes[i++] = &sc->main_serialize; 4906 4907 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4908 KKASSERT(i < sc->serialize_cnt); 4909 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4910 } 4911 4912 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4913 KKASSERT(i < sc->serialize_cnt); 4914 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4915 } 4916 4917 KKASSERT(i == sc->serialize_cnt); 4918 } 4919 4920 static void 4921 igb_msix_rxtx(void *arg) 4922 { 4923 struct igb_rx_ring *rxr = arg; 4924 struct igb_tx_ring *txr; 4925 int hdr; 4926 4927 ASSERT_SERIALIZED(&rxr->rx_serialize); 4928 4929 igb_rxeof(rxr, -1); 4930 4931 /* 4932 * NOTE: 4933 * Since next_to_clean is only changed by igb_txeof(), 4934 * which is called only in interrupt handler, the 4935 * check w/o holding tx serializer is MPSAFE. 4936 */ 4937 txr = rxr->rx_txr; 4938 hdr = *(txr->tx_hdr); 4939 if (hdr != txr->next_to_clean) { 4940 lwkt_serialize_enter(&txr->tx_serialize); 4941 igb_tx_intr(txr, hdr); 4942 lwkt_serialize_exit(&txr->tx_serialize); 4943 } 4944 4945 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4946 } 4947 4948 static void 4949 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4950 { 4951 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4952 sc->timer_cpuid = 0; /* XXX fixed */ 4953 else 4954 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4955 } 4956 4957 static void 4958 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4959 { 4960 struct e1000_hw *hw = &sc->hw; 4961 uint32_t reg; 4962 4963 if (hw->mac.type == e1000_i211) 4964 return; 4965 4966 if (hw->mac.type > e1000_82580) { 4967 uint32_t dmac; 4968 uint16_t hwm; 4969 4970 if (sc->dma_coalesce == 0) { /* Disabling it */ 4971 reg = ~E1000_DMACR_DMAC_EN; 4972 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4973 return; 4974 } else { 4975 if_printf(&sc->arpcom.ac_if, 4976 "DMA Coalescing enabled\n"); 4977 } 4978 4979 /* Set starting threshold */ 4980 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4981 4982 hwm = 64 * pba - sc->max_frame_size / 16; 4983 if (hwm < 64 * (pba - 6)) 4984 hwm = 64 * (pba - 6); 4985 reg = E1000_READ_REG(hw, E1000_FCRTC); 4986 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4987 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4988 & E1000_FCRTC_RTH_COAL_MASK); 4989 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4990 4991 dmac = pba - sc->max_frame_size / 512; 4992 if (dmac < pba - 10) 4993 dmac = pba - 10; 4994 reg = E1000_READ_REG(hw, E1000_DMACR); 4995 reg &= ~E1000_DMACR_DMACTHR_MASK; 4996 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4997 & E1000_DMACR_DMACTHR_MASK); 4998 4999 /* transition to L0x or L1 if available..*/ 5000 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 5001 5002 /* 5003 * Check if status is 2.5Gb backplane connection 5004 * before configuration of watchdog timer, which 5005 * is in msec values in 12.8usec intervals watchdog 5006 * timer = msec values in 32usec intervals for non 5007 * 2.5Gb connection. 5008 */ 5009 if (hw->mac.type == e1000_i354) { 5010 int status = E1000_READ_REG(hw, E1000_STATUS); 5011 5012 if ((status & E1000_STATUS_2P5_SKU) && 5013 !(status & E1000_STATUS_2P5_SKU_OVER)) 5014 reg |= ((sc->dma_coalesce * 5) >> 6); 5015 else 5016 reg |= (sc->dma_coalesce >> 5); 5017 } else { 5018 reg |= (sc->dma_coalesce >> 5); 5019 } 5020 5021 E1000_WRITE_REG(hw, E1000_DMACR, reg); 5022 5023 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 5024 5025 /* Set the interval before transition */ 5026 reg = E1000_READ_REG(hw, E1000_DMCTLX); 5027 if (hw->mac.type == e1000_i350) 5028 reg |= IGB_DMCTLX_DCFLUSH_DIS; 5029 /* 5030 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 5031 * is 0x4*2 = 0xA. But delay is still 4 usec. 5032 */ 5033 if (hw->mac.type == e1000_i354) { 5034 int status = E1000_READ_REG(hw, E1000_STATUS); 5035 5036 if ((status & E1000_STATUS_2P5_SKU) && 5037 !(status & E1000_STATUS_2P5_SKU_OVER)) 5038 reg |= 0xA; 5039 else 5040 reg |= 0x4; 5041 } else { 5042 reg |= 0x4; 5043 } 5044 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 5045 5046 /* Free space in tx packet buffer to wake from DMA coal */ 5047 E1000_WRITE_REG(hw, E1000_DMCTXTH, 5048 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 5049 5050 /* Make low power state decision controlled by DMA coal */ 5051 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 5052 reg &= ~E1000_PCIEMISC_LX_DECISION; 5053 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 5054 } else if (hw->mac.type == e1000_82580) { 5055 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 5056 E1000_WRITE_REG(hw, E1000_PCIEMISC, 5057 reg & ~E1000_PCIEMISC_LX_DECISION); 5058 E1000_WRITE_REG(hw, E1000_DMACR, 0); 5059 } 5060 } 5061 5062 static void 5063 igb_reg_dump(struct igb_softc *sc) 5064 { 5065 device_t dev = sc->dev; 5066 int col = 0; 5067 5068 #define DUMPREG(regno) \ 5069 kprintf(" %13s=%08x", #regno + 6, E1000_READ_REG(&sc->hw, regno));\ 5070 if (++col == 3) { \ 5071 kprintf("\n"); \ 5072 col = 0; \ 5073 } \ 5074 5075 device_printf(dev, "REGISTER DUMP\n"); 5076 DUMPREG(E1000_CTRL); 5077 DUMPREG(E1000_STATUS); 5078 DUMPREG(E1000_EECD); 5079 DUMPREG(E1000_EERD); 5080 DUMPREG(E1000_CTRL_EXT); 5081 DUMPREG(E1000_FLA); 5082 DUMPREG(E1000_MDIC); 5083 DUMPREG(E1000_SCTL); 5084 DUMPREG(E1000_FCAL); 5085 DUMPREG(E1000_FCAH); 5086 DUMPREG(E1000_FCT); 5087 DUMPREG(E1000_CONNSW); 5088 DUMPREG(E1000_VET); 5089 DUMPREG(E1000_ICR); 5090 DUMPREG(E1000_ITR); 5091 DUMPREG(E1000_IMS); 5092 DUMPREG(E1000_IVAR); 5093 DUMPREG(E1000_SVCR); 5094 DUMPREG(E1000_SVT); 5095 DUMPREG(E1000_LPIC); 5096 DUMPREG(E1000_RCTL); 5097 DUMPREG(E1000_FCTTV); 5098 DUMPREG(E1000_TXCW); 5099 DUMPREG(E1000_RXCW); 5100 DUMPREG(E1000_EIMS); 5101 DUMPREG(E1000_EIAC); 5102 DUMPREG(E1000_EIAM); 5103 DUMPREG(E1000_GPIE); 5104 DUMPREG(E1000_IVAR0); 5105 DUMPREG(E1000_IVAR_MISC); 5106 DUMPREG(E1000_TCTL); 5107 DUMPREG(E1000_TCTL_EXT); 5108 DUMPREG(E1000_TIPG); 5109 DUMPREG(E1000_TBT); 5110 DUMPREG(E1000_AIT); 5111 DUMPREG(E1000_LEDCTL); 5112 DUMPREG(E1000_EXTCNF_CTRL); 5113 DUMPREG(E1000_EXTCNF_SIZE); 5114 DUMPREG(E1000_PHY_CTRL); 5115 DUMPREG(E1000_PBA); 5116 DUMPREG(E1000_PBS); 5117 DUMPREG(E1000_PBECCSTS); 5118 DUMPREG(E1000_EEMNGCTL); 5119 DUMPREG(E1000_EEARBC); 5120 DUMPREG(E1000_FLASHT); 5121 DUMPREG(E1000_EEARBC_I210); 5122 DUMPREG(E1000_EEWR); 5123 DUMPREG(E1000_FLSWCTL); 5124 DUMPREG(E1000_FLSWDATA); 5125 DUMPREG(E1000_FLSWCNT); 5126 DUMPREG(E1000_FLOP); 5127 DUMPREG(E1000_I2CCMD); 5128 DUMPREG(E1000_I2CPARAMS); 5129 DUMPREG(E1000_WDSTP); 5130 DUMPREG(E1000_SWDSTS); 5131 DUMPREG(E1000_FRTIMER); 5132 DUMPREG(E1000_TCPTIMER); 5133 DUMPREG(E1000_VPDDIAG); 5134 DUMPREG(E1000_IMS_V2); 5135 DUMPREG(E1000_IAM_V2); 5136 DUMPREG(E1000_ERT); 5137 DUMPREG(E1000_FCRTL); 5138 DUMPREG(E1000_FCRTH); 5139 DUMPREG(E1000_PSRCTL); 5140 DUMPREG(E1000_RDFH); 5141 DUMPREG(E1000_RDFT); 5142 DUMPREG(E1000_RDFHS); 5143 DUMPREG(E1000_RDFTS); 5144 DUMPREG(E1000_RDFPC); 5145 DUMPREG(E1000_PBRTH); 5146 DUMPREG(E1000_FCRTV); 5147 DUMPREG(E1000_RDPUMB); 5148 DUMPREG(E1000_RDPUAD); 5149 DUMPREG(E1000_RDPUWD); 5150 DUMPREG(E1000_RDPURD); 5151 DUMPREG(E1000_RDPUCTL); 5152 DUMPREG(E1000_PBDIAG); 5153 DUMPREG(E1000_RXPBS); 5154 DUMPREG(E1000_IRPBS); 5155 DUMPREG(E1000_PBRWAC); 5156 DUMPREG(E1000_RDTR); 5157 DUMPREG(E1000_RADV); 5158 DUMPREG(E1000_SRWR); 5159 DUMPREG(E1000_I210_FLMNGCTL); 5160 DUMPREG(E1000_I210_FLMNGDATA); 5161 DUMPREG(E1000_I210_FLMNGCNT); 5162 DUMPREG(E1000_I210_FLSWCTL); 5163 DUMPREG(E1000_I210_FLSWDATA); 5164 DUMPREG(E1000_I210_FLSWCNT); 5165 DUMPREG(E1000_I210_FLA); 5166 DUMPREG(E1000_INVM_SIZE); 5167 DUMPREG(E1000_I210_TQAVCTRL); 5168 DUMPREG(E1000_RSRPD); 5169 DUMPREG(E1000_RAID); 5170 DUMPREG(E1000_TXDMAC); 5171 DUMPREG(E1000_KABGTXD); 5172 DUMPREG(E1000_PBSLAC); 5173 DUMPREG(E1000_TXPBS); 5174 DUMPREG(E1000_ITPBS); 5175 DUMPREG(E1000_TDFH); 5176 DUMPREG(E1000_TDFT); 5177 DUMPREG(E1000_TDFHS); 5178 DUMPREG(E1000_TDFTS); 5179 DUMPREG(E1000_TDFPC); 5180 DUMPREG(E1000_TDPUMB); 5181 DUMPREG(E1000_TDPUAD); 5182 DUMPREG(E1000_TDPUWD); 5183 DUMPREG(E1000_TDPURD); 5184 DUMPREG(E1000_TDPUCTL); 5185 DUMPREG(E1000_DTXCTL); 5186 DUMPREG(E1000_DTXTCPFLGL); 5187 DUMPREG(E1000_DTXTCPFLGH); 5188 DUMPREG(E1000_DTXMXSZRQ); 5189 DUMPREG(E1000_TIDV); 5190 DUMPREG(E1000_TADV); 5191 DUMPREG(E1000_TSPMT); 5192 DUMPREG(E1000_VFGPRC); 5193 DUMPREG(E1000_VFGORC); 5194 DUMPREG(E1000_VFMPRC); 5195 DUMPREG(E1000_VFGPTC); 5196 DUMPREG(E1000_VFGOTC); 5197 DUMPREG(E1000_VFGOTLBC); 5198 DUMPREG(E1000_VFGPTLBC); 5199 DUMPREG(E1000_VFGORLBC); 5200 DUMPREG(E1000_VFGPRLBC); 5201 DUMPREG(E1000_LSECTXCAP); 5202 DUMPREG(E1000_LSECRXCAP); 5203 DUMPREG(E1000_LSECTXCTRL); 5204 DUMPREG(E1000_LSECRXCTRL); 5205 DUMPREG(E1000_LSECTXSCL); 5206 DUMPREG(E1000_LSECTXSCH); 5207 DUMPREG(E1000_LSECTXSA); 5208 DUMPREG(E1000_LSECTXPN0); 5209 DUMPREG(E1000_LSECTXPN1); 5210 DUMPREG(E1000_LSECRXSCL); 5211 DUMPREG(E1000_LSECRXSCH); 5212 DUMPREG(E1000_IPSCTRL); 5213 DUMPREG(E1000_IPSRXCMD); 5214 DUMPREG(E1000_IPSRXIDX); 5215 DUMPREG(E1000_IPSRXSALT); 5216 DUMPREG(E1000_IPSRXSPI); 5217 DUMPREG(E1000_IPSTXSALT); 5218 DUMPREG(E1000_IPSTXIDX); 5219 DUMPREG(E1000_PCS_CFG0); 5220 DUMPREG(E1000_PCS_LCTL); 5221 DUMPREG(E1000_PCS_LSTAT); 5222 DUMPREG(E1000_PCS_ANADV); 5223 DUMPREG(E1000_PCS_LPAB); 5224 DUMPREG(E1000_PCS_NPTX); 5225 DUMPREG(E1000_PCS_LPABNP); 5226 DUMPREG(E1000_RXCSUM); 5227 DUMPREG(E1000_RLPML); 5228 DUMPREG(E1000_RFCTL); 5229 DUMPREG(E1000_MTA); 5230 DUMPREG(E1000_RA); 5231 DUMPREG(E1000_RA2); 5232 DUMPREG(E1000_VFTA); 5233 DUMPREG(E1000_VT_CTL); 5234 DUMPREG(E1000_CIAA); 5235 DUMPREG(E1000_CIAD); 5236 DUMPREG(E1000_VFQA0); 5237 DUMPREG(E1000_VFQA1); 5238 DUMPREG(E1000_WUC); 5239 DUMPREG(E1000_WUFC); 5240 DUMPREG(E1000_WUS); 5241 DUMPREG(E1000_MANC); 5242 DUMPREG(E1000_IPAV); 5243 DUMPREG(E1000_IP4AT); 5244 DUMPREG(E1000_IP6AT); 5245 DUMPREG(E1000_WUPL); 5246 DUMPREG(E1000_WUPM); 5247 DUMPREG(E1000_PBACL); 5248 DUMPREG(E1000_FFLT); 5249 DUMPREG(E1000_HOST_IF); 5250 DUMPREG(E1000_HIBBA); 5251 DUMPREG(E1000_KMRNCTRLSTA); 5252 DUMPREG(E1000_MANC2H); 5253 DUMPREG(E1000_CCMCTL); 5254 DUMPREG(E1000_GIOCTL); 5255 DUMPREG(E1000_SCCTL); 5256 5257 #define E1000_WCS 0x558C 5258 DUMPREG(E1000_WCS); 5259 #define E1000_GCR_EXT 0x586C 5260 DUMPREG(E1000_GCR_EXT); 5261 DUMPREG(E1000_GCR); 5262 DUMPREG(E1000_GCR2); 5263 DUMPREG(E1000_FACTPS); 5264 DUMPREG(E1000_DCA_ID); 5265 DUMPREG(E1000_DCA_CTRL); 5266 DUMPREG(E1000_UFUSE); 5267 DUMPREG(E1000_FFLT_DBG); 5268 DUMPREG(E1000_HICR); 5269 DUMPREG(E1000_FWSTS); 5270 DUMPREG(E1000_CPUVEC); 5271 DUMPREG(E1000_MRQC); 5272 DUMPREG(E1000_SWPBS); 5273 DUMPREG(E1000_MBVFICR); 5274 DUMPREG(E1000_MBVFIMR); 5275 DUMPREG(E1000_VFLRE); 5276 DUMPREG(E1000_VFRE); 5277 DUMPREG(E1000_VFTE); 5278 DUMPREG(E1000_QDE); 5279 DUMPREG(E1000_DTXSWC); 5280 DUMPREG(E1000_WVBR); 5281 DUMPREG(E1000_RPLOLR); 5282 DUMPREG(E1000_UTA); 5283 DUMPREG(E1000_IOVTCL); 5284 DUMPREG(E1000_VMRCTL); 5285 DUMPREG(E1000_VMRVLAN); 5286 DUMPREG(E1000_VMRVM); 5287 DUMPREG(E1000_LVMMC); 5288 DUMPREG(E1000_TXSWC); 5289 DUMPREG(E1000_SCCRL); 5290 DUMPREG(E1000_BSCTRH); 5291 DUMPREG(E1000_MSCTRH); 5292 DUMPREG(E1000_RXSTMPL); 5293 DUMPREG(E1000_RXSTMPH); 5294 DUMPREG(E1000_RXSATRL); 5295 DUMPREG(E1000_RXSATRH); 5296 DUMPREG(E1000_TXSTMPL); 5297 DUMPREG(E1000_TXSTMPH); 5298 DUMPREG(E1000_TIMINCA); 5299 DUMPREG(E1000_TIMADJL); 5300 DUMPREG(E1000_TIMADJH); 5301 DUMPREG(E1000_TSAUXC); 5302 DUMPREG(E1000_SYSSTMPL); 5303 DUMPREG(E1000_SYSSTMPH); 5304 DUMPREG(E1000_PLTSTMPL); 5305 DUMPREG(E1000_PLTSTMPH); 5306 DUMPREG(E1000_RXMTRL); 5307 DUMPREG(E1000_RXUDP); 5308 DUMPREG(E1000_SYSTIMR); 5309 DUMPREG(E1000_TSICR); 5310 DUMPREG(E1000_TSIM); 5311 DUMPREG(E1000_DMACR); 5312 DUMPREG(E1000_DMCTXTH); 5313 DUMPREG(E1000_DMCTLX); 5314 DUMPREG(E1000_DMCRTRH); 5315 DUMPREG(E1000_DMCCNT); 5316 DUMPREG(E1000_FCRTC); 5317 DUMPREG(E1000_PCIEMISC); 5318 DUMPREG(E1000_PCIEERRSTS); 5319 DUMPREG(E1000_IPCNFG); 5320 DUMPREG(E1000_LTRC); 5321 DUMPREG(E1000_EEER); 5322 DUMPREG(E1000_EEE_SU); 5323 DUMPREG(E1000_TLPIC); 5324 DUMPREG(E1000_RLPIC); 5325 if (++col != 1) 5326 kprintf("\n"); 5327 kprintf("\n"); 5328 } 5329 5330 static int 5331 igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS) 5332 { 5333 struct igb_softc *sc = (void *)arg1; 5334 struct ifnet *ifp = &sc->arpcom.ac_if; 5335 int error, dump = 0; 5336 5337 error = sysctl_handle_int(oidp, &dump, 0, req); 5338 if (error || req->newptr == NULL) 5339 return error; 5340 if (dump <= 0) 5341 return EINVAL; 5342 5343 ifnet_serialize_all(ifp); 5344 igb_reg_dump(sc); 5345 ifnet_deserialize_all(ifp); 5346 5347 return error; 5348 } 5349