1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/if_ringmap.h> 59 #include <net/toeplitz.h> 60 #include <net/toeplitz2.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 #include <net/if_poll.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/ig_hal/e1000_dragonfly.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_COPPER_FLASHLESS), 128 IGB_DEVICE(I210_SERDES_FLASHLESS), 129 IGB_DEVICE(I210_FIBER), 130 IGB_DEVICE(I210_SERDES), 131 IGB_DEVICE(I210_SGMII), 132 IGB_DEVICE(I211_COPPER), 133 IGB_DEVICE(I354_BACKPLANE_1GBPS), 134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 135 IGB_DEVICE(I354_SGMII), 136 137 /* required last entry */ 138 IGB_DEVICE_NULL 139 }; 140 141 static int igb_probe(device_t); 142 static int igb_attach(device_t); 143 static int igb_detach(device_t); 144 static int igb_shutdown(device_t); 145 static int igb_suspend(device_t); 146 static int igb_resume(device_t); 147 148 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 149 static void igb_setup_ifp(struct igb_softc *); 150 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 151 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 152 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 153 static void igb_add_sysctl(struct igb_softc *); 154 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 155 const char *, const char *); 156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 160 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 161 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 162 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 163 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 164 165 static void igb_vf_init_stats(struct igb_softc *); 166 static void igb_reset(struct igb_softc *, boolean_t); 167 static void igb_update_stats_counters(struct igb_softc *); 168 static void igb_update_vf_stats_counters(struct igb_softc *); 169 static void igb_update_link_status(struct igb_softc *); 170 static void igb_init_tx_unit(struct igb_softc *); 171 static void igb_init_rx_unit(struct igb_softc *, boolean_t); 172 static void igb_init_dmac(struct igb_softc *, uint32_t); 173 174 static void igb_set_vlan(struct igb_softc *); 175 static void igb_set_multi(struct igb_softc *); 176 static void igb_set_promisc(struct igb_softc *); 177 static void igb_disable_promisc(struct igb_softc *); 178 179 static int igb_get_ring_max(const struct igb_softc *); 180 static void igb_get_rxring_cnt(const struct igb_softc *, int *, int *); 181 static void igb_get_txring_cnt(const struct igb_softc *, int *, int *); 182 static int igb_alloc_rings(struct igb_softc *); 183 static void igb_free_rings(struct igb_softc *); 184 static int igb_create_tx_ring(struct igb_tx_ring *); 185 static int igb_create_rx_ring(struct igb_rx_ring *); 186 static void igb_free_tx_ring(struct igb_tx_ring *); 187 static void igb_free_rx_ring(struct igb_rx_ring *); 188 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 189 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 190 static void igb_init_tx_ring(struct igb_tx_ring *); 191 static int igb_init_rx_ring(struct igb_rx_ring *); 192 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 193 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 194 static void igb_rx_refresh(struct igb_rx_ring *, int); 195 static void igb_setup_serialize(struct igb_softc *); 196 197 static void igb_stop(struct igb_softc *); 198 static void igb_init(void *); 199 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 200 static void igb_media_status(struct ifnet *, struct ifmediareq *); 201 static int igb_media_change(struct ifnet *); 202 static void igb_timer(void *); 203 static void igb_watchdog(struct ifaltq_subque *); 204 static void igb_start(struct ifnet *, struct ifaltq_subque *); 205 #ifdef IFPOLL_ENABLE 206 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 207 static void igb_npoll_rx(struct ifnet *, void *, int); 208 static void igb_npoll_tx(struct ifnet *, void *, int); 209 static void igb_npoll_status(struct ifnet *); 210 #endif 211 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 212 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 213 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 214 #ifdef INVARIANTS 215 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 216 boolean_t); 217 #endif 218 219 static void igb_intr(void *); 220 static void igb_intr_shared(void *); 221 static void igb_rxeof(struct igb_rx_ring *, int); 222 static void igb_txeof(struct igb_tx_ring *, int); 223 static void igb_txgc(struct igb_tx_ring *); 224 static void igb_txgc_timer(void *); 225 static void igb_set_eitr(struct igb_softc *, int, int); 226 static void igb_enable_intr(struct igb_softc *); 227 static void igb_disable_intr(struct igb_softc *); 228 static void igb_init_unshared_intr(struct igb_softc *); 229 static void igb_init_intr(struct igb_softc *); 230 static int igb_setup_intr(struct igb_softc *); 231 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 232 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 233 static void igb_set_intr_mask(struct igb_softc *); 234 static int igb_alloc_intr(struct igb_softc *); 235 static void igb_free_intr(struct igb_softc *); 236 static void igb_teardown_intr(struct igb_softc *, int); 237 static void igb_alloc_msix(struct igb_softc *); 238 static void igb_free_msix(struct igb_softc *, boolean_t); 239 static void igb_msix_rx(void *); 240 static void igb_msix_tx(void *); 241 static void igb_msix_status(void *); 242 static void igb_msix_rxtx(void *); 243 244 /* Management and WOL Support */ 245 static void igb_get_mgmt(struct igb_softc *); 246 static void igb_rel_mgmt(struct igb_softc *); 247 static void igb_get_hw_control(struct igb_softc *); 248 static void igb_rel_hw_control(struct igb_softc *); 249 static void igb_enable_wol(device_t); 250 251 static device_method_t igb_methods[] = { 252 /* Device interface */ 253 DEVMETHOD(device_probe, igb_probe), 254 DEVMETHOD(device_attach, igb_attach), 255 DEVMETHOD(device_detach, igb_detach), 256 DEVMETHOD(device_shutdown, igb_shutdown), 257 DEVMETHOD(device_suspend, igb_suspend), 258 DEVMETHOD(device_resume, igb_resume), 259 DEVMETHOD_END 260 }; 261 262 static driver_t igb_driver = { 263 "igb", 264 igb_methods, 265 sizeof(struct igb_softc), 266 }; 267 268 static devclass_t igb_devclass; 269 270 DECLARE_DUMMY_MODULE(if_igb); 271 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 272 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 273 274 static int igb_rxd = IGB_DEFAULT_RXD; 275 static int igb_txd = IGB_DEFAULT_TXD; 276 static int igb_rxr = 0; 277 static int igb_txr = 0; 278 static int igb_msi_enable = 1; 279 static int igb_msix_enable = 1; 280 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 281 282 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 283 284 /* 285 * DMA Coalescing, only for i350 - default to off, 286 * this feature is for power savings 287 */ 288 static int igb_dma_coalesce = 0; 289 290 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 291 TUNABLE_INT("hw.igb.txd", &igb_txd); 292 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 293 TUNABLE_INT("hw.igb.txr", &igb_txr); 294 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 295 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 296 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 297 298 /* i350 specific */ 299 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 300 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 301 302 static __inline void 303 igb_tx_intr(struct igb_tx_ring *txr, int hdr) 304 { 305 306 igb_txeof(txr, hdr); 307 if (!ifsq_is_empty(txr->ifsq)) 308 ifsq_devstart(txr->ifsq); 309 } 310 311 static __inline void 312 igb_try_txgc(struct igb_tx_ring *txr, int16_t dec) 313 { 314 315 if (txr->tx_running > 0) { 316 txr->tx_running -= dec; 317 if (txr->tx_running <= 0 && txr->tx_nmbuf && 318 txr->tx_avail < txr->num_tx_desc && 319 txr->tx_avail + txr->intr_nsegs > txr->num_tx_desc) 320 igb_txgc(txr); 321 } 322 } 323 324 static void 325 igb_txgc_timer(void *xtxr) 326 { 327 struct igb_tx_ring *txr = xtxr; 328 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 329 330 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 331 (IFF_RUNNING | IFF_UP)) 332 return; 333 334 if (!lwkt_serialize_try(&txr->tx_serialize)) 335 goto done; 336 337 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 338 (IFF_RUNNING | IFF_UP)) { 339 lwkt_serialize_exit(&txr->tx_serialize); 340 return; 341 } 342 igb_try_txgc(txr, IGB_TX_RUNNING_DEC); 343 344 lwkt_serialize_exit(&txr->tx_serialize); 345 done: 346 callout_reset(&txr->tx_gc_timer, 1, igb_txgc_timer, txr); 347 } 348 349 static __inline void 350 igb_free_txbuf(struct igb_tx_ring *txr, struct igb_tx_buf *txbuf) 351 { 352 353 KKASSERT(txbuf->m_head != NULL); 354 KKASSERT(txr->tx_nmbuf > 0); 355 txr->tx_nmbuf--; 356 357 bus_dmamap_unload(txr->tx_tag, txbuf->map); 358 m_freem(txbuf->m_head); 359 txbuf->m_head = NULL; 360 } 361 362 static __inline void 363 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 364 { 365 /* Ignore Checksum bit is set */ 366 if (staterr & E1000_RXD_STAT_IXSM) 367 return; 368 369 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 370 E1000_RXD_STAT_IPCS) 371 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 372 373 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 374 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 375 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 376 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 377 mp->m_pkthdr.csum_data = htons(0xffff); 378 } 379 } 380 } 381 382 static __inline struct pktinfo * 383 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 384 uint32_t hash, uint32_t hashtype, uint32_t staterr) 385 { 386 switch (hashtype) { 387 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 388 pi->pi_netisr = NETISR_IP; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_TCP; 391 break; 392 393 case E1000_RXDADV_RSSTYPE_IPV4: 394 if (staterr & E1000_RXD_STAT_IXSM) 395 return NULL; 396 397 if ((staterr & 398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 399 E1000_RXD_STAT_TCPCS) { 400 pi->pi_netisr = NETISR_IP; 401 pi->pi_flags = 0; 402 pi->pi_l3proto = IPPROTO_UDP; 403 break; 404 } 405 /* FALL THROUGH */ 406 default: 407 return NULL; 408 } 409 410 m_sethash(m, toeplitz_hash(hash)); 411 return pi; 412 } 413 414 static int 415 igb_get_ring_max(const struct igb_softc *sc) 416 { 417 418 switch (sc->hw.mac.type) { 419 case e1000_82575: 420 return (IGB_MAX_RING_82575); 421 422 case e1000_82576: 423 return (IGB_MAX_RING_82576); 424 425 case e1000_82580: 426 return (IGB_MAX_RING_82580); 427 428 case e1000_i350: 429 return (IGB_MAX_RING_I350); 430 431 case e1000_i354: 432 return (IGB_MAX_RING_I354); 433 434 case e1000_i210: 435 return (IGB_MAX_RING_I210); 436 437 case e1000_i211: 438 return (IGB_MAX_RING_I211); 439 440 default: 441 return (IGB_MIN_RING); 442 } 443 } 444 445 static void 446 igb_get_rxring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 447 { 448 449 *ring_max = igb_get_ring_max(sc); 450 *ring_cnt = device_getenv_int(sc->dev, "rxr", igb_rxr); 451 } 452 453 static void 454 igb_get_txring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 455 { 456 457 *ring_max = igb_get_ring_max(sc); 458 *ring_cnt = device_getenv_int(sc->dev, "txr", igb_txr); 459 } 460 461 static int 462 igb_probe(device_t dev) 463 { 464 const struct igb_device *d; 465 uint16_t vid, did; 466 467 vid = pci_get_vendor(dev); 468 did = pci_get_device(dev); 469 470 for (d = igb_devices; d->desc != NULL; ++d) { 471 if (vid == d->vid && did == d->did) { 472 device_set_desc(dev, d->desc); 473 return 0; 474 } 475 } 476 return ENXIO; 477 } 478 479 static int 480 igb_attach(device_t dev) 481 { 482 struct igb_softc *sc = device_get_softc(dev); 483 uint16_t eeprom_data; 484 int error = 0, ring_max, ring_cnt; 485 char flowctrl[IFM_ETH_FC_STRLEN]; 486 487 #ifdef notyet 488 /* SYSCTL stuff */ 489 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 490 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 491 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 492 igb_sysctl_nvm_info, "I", "NVM Information"); 493 #endif 494 495 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 496 igb_media_change, igb_media_status); 497 callout_init_mp(&sc->timer); 498 lwkt_serialize_init(&sc->main_serialize); 499 500 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 501 device_get_unit(dev)); 502 sc->dev = sc->osdep.dev = dev; 503 504 /* Enable bus mastering */ 505 pci_enable_busmaster(dev); 506 507 /* 508 * Determine hardware and mac type 509 */ 510 sc->hw.vendor_id = pci_get_vendor(dev); 511 sc->hw.device_id = pci_get_device(dev); 512 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 513 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 514 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 515 516 if (e1000_set_mac_type(&sc->hw)) 517 return ENXIO; 518 519 /* Are we a VF device? */ 520 if (sc->hw.mac.type == e1000_vfadapt || 521 sc->hw.mac.type == e1000_vfadapt_i350) 522 sc->vf_ifp = 1; 523 else 524 sc->vf_ifp = 0; 525 526 /* 527 * Configure total supported RX/TX ring count 528 */ 529 igb_get_rxring_cnt(sc, &ring_cnt, &ring_max); 530 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 531 igb_get_txring_cnt(sc, &ring_cnt, &ring_max); 532 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 533 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 534 535 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 536 sc->rx_ring_inuse = sc->rx_ring_cnt; 537 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 538 sc->tx_ring_inuse = sc->tx_ring_cnt; 539 540 /* Setup flow control. */ 541 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 542 igb_flowctrl); 543 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 544 545 /* 546 * Allocate IO memory 547 */ 548 sc->mem_rid = PCIR_BAR(0); 549 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 550 RF_ACTIVE); 551 if (sc->mem_res == NULL) { 552 device_printf(dev, "Unable to allocate bus resource: memory\n"); 553 error = ENXIO; 554 goto failed; 555 } 556 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 557 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 558 559 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 560 561 /* Save PCI command register for Shared Code */ 562 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 563 sc->hw.back = &sc->osdep; 564 565 /* Do Shared Code initialization */ 566 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 567 device_printf(dev, "Setup of Shared code failed\n"); 568 error = ENXIO; 569 goto failed; 570 } 571 572 e1000_get_bus_info(&sc->hw); 573 574 sc->hw.mac.autoneg = DO_AUTO_NEG; 575 sc->hw.phy.autoneg_wait_to_complete = FALSE; 576 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 577 578 /* Copper options */ 579 if (sc->hw.phy.media_type == e1000_media_type_copper) { 580 sc->hw.phy.mdix = AUTO_ALL_MODES; 581 sc->hw.phy.disable_polarity_correction = FALSE; 582 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 583 } 584 585 /* Set the frame limits assuming standard ethernet sized frames. */ 586 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 587 588 /* Allocate RX/TX rings */ 589 error = igb_alloc_rings(sc); 590 if (error) 591 goto failed; 592 593 /* Allocate interrupt */ 594 error = igb_alloc_intr(sc); 595 if (error) 596 goto failed; 597 598 /* Setup serializes */ 599 igb_setup_serialize(sc); 600 601 /* Allocate the appropriate stats memory */ 602 if (sc->vf_ifp) { 603 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 604 M_WAITOK | M_ZERO); 605 igb_vf_init_stats(sc); 606 } else { 607 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 608 M_WAITOK | M_ZERO); 609 } 610 611 /* Allocate multicast array memory. */ 612 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 613 M_DEVBUF, M_WAITOK); 614 615 /* Some adapter-specific advanced features */ 616 if (sc->hw.mac.type >= e1000_i350) { 617 #ifdef notyet 618 igb_set_sysctl_value(adapter, "dma_coalesce", 619 "configure dma coalesce", 620 &adapter->dma_coalesce, igb_dma_coalesce); 621 igb_set_sysctl_value(adapter, "eee_disabled", 622 "enable Energy Efficient Ethernet", 623 &adapter->hw.dev_spec._82575.eee_disable, 624 igb_eee_disabled); 625 #else 626 sc->dma_coalesce = igb_dma_coalesce; 627 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 628 #endif 629 if (sc->hw.phy.media_type == e1000_media_type_copper) { 630 if (sc->hw.mac.type == e1000_i354) 631 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 632 else 633 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 634 } 635 } 636 637 /* 638 * Start from a known state, this is important in reading the nvm and 639 * mac from that. 640 */ 641 e1000_reset_hw(&sc->hw); 642 643 /* Make sure we have a good EEPROM before we read from it */ 644 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 645 e1000_validate_nvm_checksum(&sc->hw) < 0) { 646 /* 647 * Some PCI-E parts fail the first check due to 648 * the link being in sleep state, call it again, 649 * if it fails a second time its a real issue. 650 */ 651 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 652 device_printf(dev, 653 "The EEPROM Checksum Is Not Valid\n"); 654 error = EIO; 655 goto failed; 656 } 657 } 658 659 /* Copy the permanent MAC address out of the EEPROM */ 660 if (e1000_read_mac_addr(&sc->hw) < 0) { 661 device_printf(dev, "EEPROM read error while reading MAC" 662 " address\n"); 663 error = EIO; 664 goto failed; 665 } 666 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 667 device_printf(dev, "Invalid MAC address\n"); 668 error = EIO; 669 goto failed; 670 } 671 672 /* Setup OS specific network interface */ 673 igb_setup_ifp(sc); 674 675 /* Add sysctl tree, must after igb_setup_ifp() */ 676 igb_add_sysctl(sc); 677 678 /* Now get a good starting state */ 679 igb_reset(sc, FALSE); 680 681 /* Initialize statistics */ 682 igb_update_stats_counters(sc); 683 684 sc->hw.mac.get_link_status = 1; 685 igb_update_link_status(sc); 686 687 /* Indicate SOL/IDER usage */ 688 if (e1000_check_reset_block(&sc->hw)) { 689 device_printf(dev, 690 "PHY reset is blocked due to SOL/IDER session.\n"); 691 } 692 693 /* Determine if we have to control management hardware */ 694 if (e1000_enable_mng_pass_thru(&sc->hw)) 695 sc->flags |= IGB_FLAG_HAS_MGMT; 696 697 /* 698 * Setup Wake-on-Lan 699 */ 700 /* APME bit in EEPROM is mapped to WUC.APME */ 701 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 702 if (eeprom_data) 703 sc->wol = E1000_WUFC_MAG; 704 /* XXX disable WOL */ 705 sc->wol = 0; 706 707 #ifdef notyet 708 /* Register for VLAN events */ 709 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 710 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 711 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 712 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 713 #endif 714 715 #ifdef notyet 716 igb_add_hw_stats(adapter); 717 #endif 718 719 /* 720 * Disable interrupt to prevent spurious interrupts (line based 721 * interrupt, MSI or even MSI-X), which had been observed on 722 * several types of LOMs, from being handled. 723 */ 724 igb_disable_intr(sc); 725 726 error = igb_setup_intr(sc); 727 if (error) { 728 ether_ifdetach(&sc->arpcom.ac_if); 729 goto failed; 730 } 731 return 0; 732 733 failed: 734 igb_detach(dev); 735 return error; 736 } 737 738 static int 739 igb_detach(device_t dev) 740 { 741 struct igb_softc *sc = device_get_softc(dev); 742 743 if (device_is_attached(dev)) { 744 struct ifnet *ifp = &sc->arpcom.ac_if; 745 746 ifnet_serialize_all(ifp); 747 748 igb_stop(sc); 749 750 e1000_phy_hw_reset(&sc->hw); 751 752 /* Give control back to firmware */ 753 igb_rel_mgmt(sc); 754 igb_rel_hw_control(sc); 755 756 if (sc->wol) { 757 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 758 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 759 igb_enable_wol(dev); 760 } 761 762 igb_teardown_intr(sc, sc->intr_cnt); 763 764 ifnet_deserialize_all(ifp); 765 766 ether_ifdetach(ifp); 767 } else if (sc->mem_res != NULL) { 768 igb_rel_hw_control(sc); 769 } 770 771 ifmedia_removeall(&sc->media); 772 bus_generic_detach(dev); 773 774 igb_free_intr(sc); 775 776 if (sc->msix_mem_res != NULL) { 777 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 778 sc->msix_mem_res); 779 } 780 if (sc->mem_res != NULL) { 781 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 782 sc->mem_res); 783 } 784 785 igb_free_rings(sc); 786 787 if (sc->mta != NULL) 788 kfree(sc->mta, M_DEVBUF); 789 if (sc->stats != NULL) 790 kfree(sc->stats, M_DEVBUF); 791 if (sc->serializes != NULL) 792 kfree(sc->serializes, M_DEVBUF); 793 if (sc->rx_rmap != NULL) 794 if_ringmap_free(sc->rx_rmap); 795 if (sc->rx_rmap_intr != NULL) 796 if_ringmap_free(sc->rx_rmap_intr); 797 if (sc->tx_rmap != NULL) 798 if_ringmap_free(sc->tx_rmap); 799 if (sc->tx_rmap_intr != NULL) 800 if_ringmap_free(sc->tx_rmap_intr); 801 802 return 0; 803 } 804 805 static int 806 igb_shutdown(device_t dev) 807 { 808 return igb_suspend(dev); 809 } 810 811 static int 812 igb_suspend(device_t dev) 813 { 814 struct igb_softc *sc = device_get_softc(dev); 815 struct ifnet *ifp = &sc->arpcom.ac_if; 816 817 ifnet_serialize_all(ifp); 818 819 igb_stop(sc); 820 821 igb_rel_mgmt(sc); 822 igb_rel_hw_control(sc); 823 824 if (sc->wol) { 825 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 826 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 827 igb_enable_wol(dev); 828 } 829 830 ifnet_deserialize_all(ifp); 831 832 return bus_generic_suspend(dev); 833 } 834 835 static int 836 igb_resume(device_t dev) 837 { 838 struct igb_softc *sc = device_get_softc(dev); 839 struct ifnet *ifp = &sc->arpcom.ac_if; 840 int i; 841 842 ifnet_serialize_all(ifp); 843 844 igb_init(sc); 845 igb_get_mgmt(sc); 846 847 for (i = 0; i < sc->tx_ring_inuse; ++i) 848 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 849 850 ifnet_deserialize_all(ifp); 851 852 return bus_generic_resume(dev); 853 } 854 855 static int 856 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 857 { 858 struct igb_softc *sc = ifp->if_softc; 859 struct ifreq *ifr = (struct ifreq *)data; 860 int max_frame_size, mask, reinit; 861 int error = 0; 862 863 ASSERT_IFNET_SERIALIZED_ALL(ifp); 864 865 switch (command) { 866 case SIOCSIFMTU: 867 max_frame_size = 9234; 868 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 869 ETHER_CRC_LEN) { 870 error = EINVAL; 871 break; 872 } 873 874 ifp->if_mtu = ifr->ifr_mtu; 875 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 876 ETHER_CRC_LEN; 877 878 if (ifp->if_flags & IFF_RUNNING) 879 igb_init(sc); 880 break; 881 882 case SIOCSIFFLAGS: 883 if (ifp->if_flags & IFF_UP) { 884 if (ifp->if_flags & IFF_RUNNING) { 885 if ((ifp->if_flags ^ sc->if_flags) & 886 (IFF_PROMISC | IFF_ALLMULTI)) { 887 igb_disable_promisc(sc); 888 igb_set_promisc(sc); 889 } 890 } else { 891 igb_init(sc); 892 } 893 } else if (ifp->if_flags & IFF_RUNNING) { 894 igb_stop(sc); 895 } 896 sc->if_flags = ifp->if_flags; 897 break; 898 899 case SIOCADDMULTI: 900 case SIOCDELMULTI: 901 if (ifp->if_flags & IFF_RUNNING) { 902 igb_disable_intr(sc); 903 igb_set_multi(sc); 904 #ifdef IFPOLL_ENABLE 905 if (!(ifp->if_flags & IFF_NPOLLING)) 906 #endif 907 igb_enable_intr(sc); 908 } 909 break; 910 911 case SIOCSIFMEDIA: 912 /* Check SOL/IDER usage */ 913 if (e1000_check_reset_block(&sc->hw)) { 914 if_printf(ifp, "Media change is " 915 "blocked due to SOL/IDER session.\n"); 916 break; 917 } 918 /* FALL THROUGH */ 919 920 case SIOCGIFMEDIA: 921 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 922 break; 923 924 case SIOCSIFCAP: 925 reinit = 0; 926 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 927 if (mask & IFCAP_RXCSUM) { 928 ifp->if_capenable ^= IFCAP_RXCSUM; 929 reinit = 1; 930 } 931 if (mask & IFCAP_VLAN_HWTAGGING) { 932 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 933 reinit = 1; 934 } 935 if (mask & IFCAP_TXCSUM) { 936 ifp->if_capenable ^= IFCAP_TXCSUM; 937 if (ifp->if_capenable & IFCAP_TXCSUM) 938 ifp->if_hwassist |= IGB_CSUM_FEATURES; 939 else 940 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 941 } 942 if (mask & IFCAP_TSO) { 943 ifp->if_capenable ^= IFCAP_TSO; 944 if (ifp->if_capenable & IFCAP_TSO) 945 ifp->if_hwassist |= CSUM_TSO; 946 else 947 ifp->if_hwassist &= ~CSUM_TSO; 948 } 949 if (mask & IFCAP_RSS) 950 ifp->if_capenable ^= IFCAP_RSS; 951 if (reinit && (ifp->if_flags & IFF_RUNNING)) 952 igb_init(sc); 953 break; 954 955 default: 956 error = ether_ioctl(ifp, command, data); 957 break; 958 } 959 return error; 960 } 961 962 static void 963 igb_init(void *xsc) 964 { 965 struct igb_softc *sc = xsc; 966 struct ifnet *ifp = &sc->arpcom.ac_if; 967 boolean_t polling; 968 int i; 969 970 ASSERT_IFNET_SERIALIZED_ALL(ifp); 971 972 igb_stop(sc); 973 974 /* Get the latest mac address, User can use a LAA */ 975 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 976 977 /* Put the address into the Receive Address Array */ 978 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 979 980 igb_reset(sc, FALSE); 981 igb_update_link_status(sc); 982 983 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 984 985 /* Clear bad data from Rx FIFOs */ 986 e1000_rx_fifo_flush_82575(&sc->hw); 987 988 /* Configure for OS presence */ 989 igb_get_mgmt(sc); 990 991 polling = FALSE; 992 #ifdef IFPOLL_ENABLE 993 if (ifp->if_flags & IFF_NPOLLING) 994 polling = TRUE; 995 #endif 996 997 /* Configured used RX/TX rings */ 998 igb_set_ring_inuse(sc, polling); 999 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1000 1001 /* Initialize interrupt */ 1002 igb_init_intr(sc); 1003 1004 /* Prepare transmit descriptors and buffers */ 1005 for (i = 0; i < sc->tx_ring_inuse; ++i) 1006 igb_init_tx_ring(&sc->tx_rings[i]); 1007 igb_init_tx_unit(sc); 1008 1009 /* Setup Multicast table */ 1010 igb_set_multi(sc); 1011 1012 #if 0 1013 /* 1014 * Figure out the desired mbuf pool 1015 * for doing jumbo/packetsplit 1016 */ 1017 if (adapter->max_frame_size <= 2048) 1018 adapter->rx_mbuf_sz = MCLBYTES; 1019 else if (adapter->max_frame_size <= 4096) 1020 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1021 else 1022 adapter->rx_mbuf_sz = MJUM9BYTES; 1023 #endif 1024 1025 /* Prepare receive descriptors and buffers */ 1026 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1027 int error; 1028 1029 error = igb_init_rx_ring(&sc->rx_rings[i]); 1030 if (error) { 1031 if_printf(ifp, "Could not setup receive structures\n"); 1032 igb_stop(sc); 1033 return; 1034 } 1035 } 1036 igb_init_rx_unit(sc, polling); 1037 1038 /* Enable VLAN support */ 1039 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1040 igb_set_vlan(sc); 1041 1042 /* Don't lose promiscuous settings */ 1043 igb_set_promisc(sc); 1044 1045 /* Clear counters */ 1046 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1047 1048 /* This clears any pending interrupts */ 1049 E1000_READ_REG(&sc->hw, E1000_ICR); 1050 1051 /* 1052 * Only enable interrupts if we are not polling, make sure 1053 * they are off otherwise. 1054 */ 1055 if (polling) { 1056 igb_disable_intr(sc); 1057 } else { 1058 igb_enable_intr(sc); 1059 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1060 } 1061 1062 /* Set Energy Efficient Ethernet */ 1063 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1064 if (sc->hw.mac.type == e1000_i354) 1065 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1066 else 1067 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1068 } 1069 1070 ifp->if_flags |= IFF_RUNNING; 1071 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1072 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1073 1074 ifsq_clr_oactive(txr->ifsq); 1075 ifsq_watchdog_start(&txr->tx_watchdog); 1076 1077 if (!polling) { 1078 callout_reset_bycpu(&txr->tx_gc_timer, 1, 1079 igb_txgc_timer, txr, txr->tx_intr_cpuid); 1080 } 1081 } 1082 1083 igb_set_timer_cpuid(sc, polling); 1084 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1085 } 1086 1087 static void 1088 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1089 { 1090 struct igb_softc *sc = ifp->if_softc; 1091 1092 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1093 1094 if ((ifp->if_flags & IFF_RUNNING) == 0) 1095 sc->hw.mac.get_link_status = 1; 1096 igb_update_link_status(sc); 1097 1098 ifmr->ifm_status = IFM_AVALID; 1099 ifmr->ifm_active = IFM_ETHER; 1100 1101 if (!sc->link_active) { 1102 if (sc->hw.mac.autoneg) 1103 ifmr->ifm_active |= IFM_NONE; 1104 else 1105 ifmr->ifm_active |= sc->media.ifm_media; 1106 return; 1107 } 1108 1109 ifmr->ifm_status |= IFM_ACTIVE; 1110 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1111 ifmr->ifm_active |= sc->ifm_flowctrl; 1112 1113 switch (sc->link_speed) { 1114 case 10: 1115 ifmr->ifm_active |= IFM_10_T; 1116 break; 1117 1118 case 100: 1119 /* 1120 * Support for 100Mb SFP - these are Fiber 1121 * but the media type appears as serdes 1122 */ 1123 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1124 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1125 ifmr->ifm_active |= IFM_100_FX; 1126 else 1127 ifmr->ifm_active |= IFM_100_TX; 1128 break; 1129 1130 case 1000: 1131 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1132 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1133 ifmr->ifm_active |= IFM_1000_SX; 1134 else 1135 ifmr->ifm_active |= IFM_1000_T; 1136 break; 1137 1138 case 2500: 1139 ifmr->ifm_active |= IFM_2500_SX; 1140 break; 1141 } 1142 1143 if (sc->link_duplex == FULL_DUPLEX) 1144 ifmr->ifm_active |= IFM_FDX; 1145 else 1146 ifmr->ifm_active |= IFM_HDX; 1147 1148 if (sc->link_duplex == FULL_DUPLEX) 1149 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1150 } 1151 1152 static int 1153 igb_media_change(struct ifnet *ifp) 1154 { 1155 struct igb_softc *sc = ifp->if_softc; 1156 struct ifmedia *ifm = &sc->media; 1157 1158 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1159 1160 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1161 return EINVAL; 1162 1163 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1164 case IFM_AUTO: 1165 sc->hw.mac.autoneg = DO_AUTO_NEG; 1166 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1167 break; 1168 1169 case IFM_1000_SX: 1170 case IFM_1000_T: 1171 sc->hw.mac.autoneg = DO_AUTO_NEG; 1172 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1173 break; 1174 1175 case IFM_100_TX: 1176 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1177 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1178 } else { 1179 if (IFM_OPTIONS(ifm->ifm_media) & 1180 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1181 if (bootverbose) { 1182 if_printf(ifp, "Flow control is not " 1183 "allowed for half-duplex\n"); 1184 } 1185 return EINVAL; 1186 } 1187 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1188 } 1189 sc->hw.mac.autoneg = FALSE; 1190 sc->hw.phy.autoneg_advertised = 0; 1191 break; 1192 1193 case IFM_10_T: 1194 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1195 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1196 } else { 1197 if (IFM_OPTIONS(ifm->ifm_media) & 1198 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1199 if (bootverbose) { 1200 if_printf(ifp, "Flow control is not " 1201 "allowed for half-duplex\n"); 1202 } 1203 return EINVAL; 1204 } 1205 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1206 } 1207 sc->hw.mac.autoneg = FALSE; 1208 sc->hw.phy.autoneg_advertised = 0; 1209 break; 1210 1211 default: 1212 if (bootverbose) { 1213 if_printf(ifp, "Unsupported media type %d\n", 1214 IFM_SUBTYPE(ifm->ifm_media)); 1215 } 1216 return EINVAL; 1217 } 1218 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1219 1220 if (ifp->if_flags & IFF_RUNNING) 1221 igb_init(sc); 1222 1223 return 0; 1224 } 1225 1226 static void 1227 igb_set_promisc(struct igb_softc *sc) 1228 { 1229 struct ifnet *ifp = &sc->arpcom.ac_if; 1230 struct e1000_hw *hw = &sc->hw; 1231 uint32_t reg; 1232 1233 if (sc->vf_ifp) { 1234 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1235 return; 1236 } 1237 1238 reg = E1000_READ_REG(hw, E1000_RCTL); 1239 if (ifp->if_flags & IFF_PROMISC) { 1240 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1241 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1242 } else if (ifp->if_flags & IFF_ALLMULTI) { 1243 reg |= E1000_RCTL_MPE; 1244 reg &= ~E1000_RCTL_UPE; 1245 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1246 } 1247 } 1248 1249 static void 1250 igb_disable_promisc(struct igb_softc *sc) 1251 { 1252 struct e1000_hw *hw = &sc->hw; 1253 struct ifnet *ifp = &sc->arpcom.ac_if; 1254 uint32_t reg; 1255 int mcnt = 0; 1256 1257 if (sc->vf_ifp) { 1258 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1259 return; 1260 } 1261 reg = E1000_READ_REG(hw, E1000_RCTL); 1262 reg &= ~E1000_RCTL_UPE; 1263 if (ifp->if_flags & IFF_ALLMULTI) { 1264 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1265 } else { 1266 struct ifmultiaddr *ifma; 1267 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1268 if (ifma->ifma_addr->sa_family != AF_LINK) 1269 continue; 1270 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1271 break; 1272 mcnt++; 1273 } 1274 } 1275 /* Don't disable if in MAX groups */ 1276 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1277 reg &= ~E1000_RCTL_MPE; 1278 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1279 } 1280 1281 static void 1282 igb_set_multi(struct igb_softc *sc) 1283 { 1284 struct ifnet *ifp = &sc->arpcom.ac_if; 1285 struct ifmultiaddr *ifma; 1286 uint32_t reg_rctl = 0; 1287 uint8_t *mta; 1288 int mcnt = 0; 1289 1290 mta = sc->mta; 1291 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1292 1293 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1294 if (ifma->ifma_addr->sa_family != AF_LINK) 1295 continue; 1296 1297 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1298 break; 1299 1300 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1301 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1302 mcnt++; 1303 } 1304 1305 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1306 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1307 reg_rctl |= E1000_RCTL_MPE; 1308 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1309 } else { 1310 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1311 } 1312 } 1313 1314 static void 1315 igb_timer(void *xsc) 1316 { 1317 struct igb_softc *sc = xsc; 1318 1319 lwkt_serialize_enter(&sc->main_serialize); 1320 1321 igb_update_link_status(sc); 1322 igb_update_stats_counters(sc); 1323 1324 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1325 1326 lwkt_serialize_exit(&sc->main_serialize); 1327 } 1328 1329 static void 1330 igb_update_link_status(struct igb_softc *sc) 1331 { 1332 struct ifnet *ifp = &sc->arpcom.ac_if; 1333 struct e1000_hw *hw = &sc->hw; 1334 uint32_t link_check, thstat, ctrl; 1335 1336 link_check = thstat = ctrl = 0; 1337 1338 /* Get the cached link value or read for real */ 1339 switch (hw->phy.media_type) { 1340 case e1000_media_type_copper: 1341 if (hw->mac.get_link_status) { 1342 /* Do the work to read phy */ 1343 e1000_check_for_link(hw); 1344 link_check = !hw->mac.get_link_status; 1345 } else { 1346 link_check = TRUE; 1347 } 1348 break; 1349 1350 case e1000_media_type_fiber: 1351 e1000_check_for_link(hw); 1352 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1353 break; 1354 1355 case e1000_media_type_internal_serdes: 1356 e1000_check_for_link(hw); 1357 link_check = hw->mac.serdes_has_link; 1358 break; 1359 1360 /* VF device is type_unknown */ 1361 case e1000_media_type_unknown: 1362 e1000_check_for_link(hw); 1363 link_check = !hw->mac.get_link_status; 1364 /* Fall thru */ 1365 default: 1366 break; 1367 } 1368 1369 /* Check for thermal downshift or shutdown */ 1370 if (hw->mac.type == e1000_i350) { 1371 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1372 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1373 } 1374 1375 /* Now we check if a transition has happened */ 1376 if (link_check && sc->link_active == 0) { 1377 e1000_get_speed_and_duplex(hw, 1378 &sc->link_speed, &sc->link_duplex); 1379 if (bootverbose) { 1380 char flowctrl[IFM_ETH_FC_STRLEN]; 1381 1382 /* Get the flow control for display */ 1383 e1000_fc2str(hw->fc.current_mode, flowctrl, 1384 sizeof(flowctrl)); 1385 1386 if_printf(ifp, "Link is up %d Mbps %s, " 1387 "Flow control: %s\n", 1388 sc->link_speed, 1389 sc->link_duplex == FULL_DUPLEX ? 1390 "Full Duplex" : "Half Duplex", 1391 flowctrl); 1392 } 1393 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1394 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1395 sc->link_active = 1; 1396 1397 ifp->if_baudrate = sc->link_speed * 1000000; 1398 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1399 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1400 if_printf(ifp, "Link: thermal downshift\n"); 1401 /* Delay Link Up for Phy update */ 1402 if ((hw->mac.type == e1000_i210 || 1403 hw->mac.type == e1000_i211) && 1404 hw->phy.id == I210_I_PHY_ID) 1405 msec_delay(IGB_I210_LINK_DELAY); 1406 /* 1407 * Reset if the media type changed. 1408 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1409 */ 1410 if (hw->dev_spec._82575.media_changed) { 1411 hw->dev_spec._82575.media_changed = FALSE; 1412 igb_reset(sc, TRUE); 1413 } 1414 /* This can sleep */ 1415 ifp->if_link_state = LINK_STATE_UP; 1416 if_link_state_change(ifp); 1417 } else if (!link_check && sc->link_active == 1) { 1418 ifp->if_baudrate = sc->link_speed = 0; 1419 sc->link_duplex = 0; 1420 if (bootverbose) 1421 if_printf(ifp, "Link is Down\n"); 1422 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1423 (thstat & E1000_THSTAT_PWR_DOWN)) 1424 if_printf(ifp, "Link: thermal shutdown\n"); 1425 sc->link_active = 0; 1426 /* This can sleep */ 1427 ifp->if_link_state = LINK_STATE_DOWN; 1428 if_link_state_change(ifp); 1429 } 1430 } 1431 1432 static void 1433 igb_stop(struct igb_softc *sc) 1434 { 1435 struct ifnet *ifp = &sc->arpcom.ac_if; 1436 int i; 1437 1438 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1439 1440 igb_disable_intr(sc); 1441 1442 callout_stop(&sc->timer); 1443 1444 ifp->if_flags &= ~IFF_RUNNING; 1445 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1446 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1447 1448 ifsq_clr_oactive(txr->ifsq); 1449 ifsq_watchdog_stop(&txr->tx_watchdog); 1450 txr->tx_flags &= ~IGB_TXFLAG_ENABLED; 1451 1452 txr->tx_running = 0; 1453 callout_stop(&txr->tx_gc_timer); 1454 } 1455 1456 e1000_reset_hw(&sc->hw); 1457 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1458 1459 e1000_led_off(&sc->hw); 1460 e1000_cleanup_led(&sc->hw); 1461 1462 for (i = 0; i < sc->tx_ring_cnt; ++i) 1463 igb_free_tx_ring(&sc->tx_rings[i]); 1464 for (i = 0; i < sc->rx_ring_cnt; ++i) 1465 igb_free_rx_ring(&sc->rx_rings[i]); 1466 } 1467 1468 static void 1469 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1470 { 1471 struct ifnet *ifp = &sc->arpcom.ac_if; 1472 struct e1000_hw *hw = &sc->hw; 1473 struct e1000_fc_info *fc = &hw->fc; 1474 uint32_t pba = 0; 1475 uint16_t hwm; 1476 1477 /* Let the firmware know the OS is in control */ 1478 igb_get_hw_control(sc); 1479 1480 /* 1481 * Packet Buffer Allocation (PBA) 1482 * Writing PBA sets the receive portion of the buffer 1483 * the remainder is used for the transmit buffer. 1484 */ 1485 switch (hw->mac.type) { 1486 case e1000_82575: 1487 pba = E1000_PBA_32K; 1488 break; 1489 1490 case e1000_82576: 1491 case e1000_vfadapt: 1492 pba = E1000_READ_REG(hw, E1000_RXPBS); 1493 pba &= E1000_RXPBS_SIZE_MASK_82576; 1494 break; 1495 1496 case e1000_82580: 1497 case e1000_i350: 1498 case e1000_i354: 1499 case e1000_vfadapt_i350: 1500 pba = E1000_READ_REG(hw, E1000_RXPBS); 1501 pba = e1000_rxpbs_adjust_82580(pba); 1502 break; 1503 1504 case e1000_i210: 1505 case e1000_i211: 1506 pba = E1000_PBA_34K; 1507 break; 1508 1509 default: 1510 break; 1511 } 1512 1513 /* Special needs in case of Jumbo frames */ 1514 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1515 uint32_t tx_space, min_tx, min_rx; 1516 1517 pba = E1000_READ_REG(hw, E1000_PBA); 1518 tx_space = pba >> 16; 1519 pba &= 0xffff; 1520 1521 min_tx = (sc->max_frame_size + 1522 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1523 min_tx = roundup2(min_tx, 1024); 1524 min_tx >>= 10; 1525 min_rx = sc->max_frame_size; 1526 min_rx = roundup2(min_rx, 1024); 1527 min_rx >>= 10; 1528 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1529 pba = pba - (min_tx - tx_space); 1530 /* 1531 * if short on rx space, rx wins 1532 * and must trump tx adjustment 1533 */ 1534 if (pba < min_rx) 1535 pba = min_rx; 1536 } 1537 E1000_WRITE_REG(hw, E1000_PBA, pba); 1538 } 1539 1540 /* 1541 * These parameters control the automatic generation (Tx) and 1542 * response (Rx) to Ethernet PAUSE frames. 1543 * - High water mark should allow for at least two frames to be 1544 * received after sending an XOFF. 1545 * - Low water mark works best when it is very near the high water mark. 1546 * This allows the receiver to restart by sending XON when it has 1547 * drained a bit. 1548 */ 1549 hwm = min(((pba << 10) * 9 / 10), 1550 ((pba << 10) - 2 * sc->max_frame_size)); 1551 1552 if (hw->mac.type < e1000_82576) { 1553 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1554 fc->low_water = fc->high_water - 8; 1555 } else { 1556 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1557 fc->low_water = fc->high_water - 16; 1558 } 1559 fc->pause_time = IGB_FC_PAUSE_TIME; 1560 fc->send_xon = TRUE; 1561 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1562 1563 /* Issue a global reset */ 1564 e1000_reset_hw(hw); 1565 E1000_WRITE_REG(hw, E1000_WUC, 0); 1566 1567 /* Reset for AutoMediaDetect */ 1568 if (media_reset) { 1569 e1000_setup_init_funcs(hw, TRUE); 1570 e1000_get_bus_info(hw); 1571 } 1572 1573 if (e1000_init_hw(hw) < 0) 1574 if_printf(ifp, "Hardware Initialization Failed\n"); 1575 1576 /* Setup DMA Coalescing */ 1577 igb_init_dmac(sc, pba); 1578 1579 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1580 e1000_get_phy_info(hw); 1581 e1000_check_for_link(hw); 1582 } 1583 1584 static void 1585 igb_setup_ifp(struct igb_softc *sc) 1586 { 1587 struct ifnet *ifp = &sc->arpcom.ac_if; 1588 int i; 1589 1590 ifp->if_softc = sc; 1591 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1592 ifp->if_init = igb_init; 1593 ifp->if_ioctl = igb_ioctl; 1594 ifp->if_start = igb_start; 1595 ifp->if_serialize = igb_serialize; 1596 ifp->if_deserialize = igb_deserialize; 1597 ifp->if_tryserialize = igb_tryserialize; 1598 #ifdef INVARIANTS 1599 ifp->if_serialize_assert = igb_serialize_assert; 1600 #endif 1601 #ifdef IFPOLL_ENABLE 1602 ifp->if_npoll = igb_npoll; 1603 #endif 1604 1605 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1606 1607 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1608 ifq_set_ready(&ifp->if_snd); 1609 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1610 1611 ifp->if_mapsubq = ifq_mapsubq_modulo; 1612 ifq_set_subq_divisor(&ifp->if_snd, 1); 1613 1614 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1615 1616 ifp->if_capabilities = 1617 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1618 if (IGB_ENABLE_HWRSS(sc)) 1619 ifp->if_capabilities |= IFCAP_RSS; 1620 ifp->if_capenable = ifp->if_capabilities; 1621 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1622 1623 /* 1624 * Tell the upper layer(s) we support long frames 1625 */ 1626 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1627 1628 /* Setup TX rings and subqueues */ 1629 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1630 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1631 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1632 1633 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1634 ifsq_set_priv(ifsq, txr); 1635 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1636 txr->ifsq = ifsq; 1637 1638 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1639 } 1640 1641 /* 1642 * Specify the media types supported by this adapter and register 1643 * callbacks to update media and link information 1644 */ 1645 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1646 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1647 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1648 0, NULL); 1649 } else { 1650 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1651 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1652 0, NULL); 1653 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1654 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1655 0, NULL); 1656 if (sc->hw.phy.type != e1000_phy_ife) { 1657 ifmedia_add(&sc->media, 1658 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1659 } 1660 } 1661 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1662 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1663 } 1664 1665 static void 1666 igb_add_sysctl(struct igb_softc *sc) 1667 { 1668 struct sysctl_ctx_list *ctx; 1669 struct sysctl_oid *tree; 1670 char node[32]; 1671 int i; 1672 1673 ctx = device_get_sysctl_ctx(sc->dev); 1674 tree = device_get_sysctl_tree(sc->dev); 1675 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1676 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1677 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1678 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1679 "# of RX rings used"); 1680 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1681 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1682 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1683 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1684 "# of TX rings used"); 1685 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1686 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1687 "# of RX descs"); 1688 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1689 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1690 "# of TX descs"); 1691 1692 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1693 do { \ 1694 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1695 #use " interrupt rate"); \ 1696 } while (0) 1697 1698 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1699 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1700 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1701 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1702 1703 #undef IGB_ADD_INTR_RATE_SYSCTL 1704 1705 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1706 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1707 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1708 "# of segments per TX interrupt"); 1709 1710 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1711 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1712 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1713 "# of segments sent before write to hardware register"); 1714 1715 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1716 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1717 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1718 "# of segments received before write to hardware register"); 1719 1720 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 1721 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1722 OID_AUTO, "tx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1723 sc->tx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1724 "TX MSI-X CPU map"); 1725 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1726 OID_AUTO, "rx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1727 sc->rx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1728 "RX MSI-X CPU map"); 1729 } 1730 #ifdef IFPOLL_ENABLE 1731 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1732 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1733 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1734 "TX polling CPU map"); 1735 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1736 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1737 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1738 "RX polling CPU map"); 1739 #endif 1740 1741 #ifdef IGB_RSS_DEBUG 1742 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1743 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1744 "RSS debug level"); 1745 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1746 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1747 SYSCTL_ADD_ULONG(ctx, 1748 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1749 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1750 } 1751 #endif 1752 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1753 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1754 1755 #ifdef IGB_TSS_DEBUG 1756 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1757 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1758 CTLFLAG_RW, &txr->tx_packets, "TXed packets"); 1759 #endif 1760 ksnprintf(node, sizeof(node), "tx%d_nmbuf", i); 1761 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1762 CTLFLAG_RD, &txr->tx_nmbuf, 0, "# of pending TX mbufs"); 1763 1764 ksnprintf(node, sizeof(node), "tx%d_gc", i); 1765 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1766 CTLFLAG_RW, &txr->tx_gc, "# of TX desc GC"); 1767 } 1768 } 1769 1770 static int 1771 igb_alloc_rings(struct igb_softc *sc) 1772 { 1773 int error, i; 1774 1775 /* 1776 * Create top level busdma tag 1777 */ 1778 error = bus_dma_tag_create(NULL, 1, 0, 1779 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1780 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1781 &sc->parent_tag); 1782 if (error) { 1783 device_printf(sc->dev, "could not create top level DMA tag\n"); 1784 return error; 1785 } 1786 1787 /* 1788 * Allocate TX descriptor rings and buffers 1789 */ 1790 sc->tx_rings = kmalloc_cachealign( 1791 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1792 M_DEVBUF, M_WAITOK | M_ZERO); 1793 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1794 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1795 1796 /* Set up some basics */ 1797 txr->sc = sc; 1798 txr->me = i; 1799 txr->tx_intr_cpuid = -1; 1800 lwkt_serialize_init(&txr->tx_serialize); 1801 callout_init_mp(&txr->tx_gc_timer); 1802 1803 error = igb_create_tx_ring(txr); 1804 if (error) 1805 return error; 1806 } 1807 1808 /* 1809 * Allocate RX descriptor rings and buffers 1810 */ 1811 sc->rx_rings = kmalloc_cachealign( 1812 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1813 M_DEVBUF, M_WAITOK | M_ZERO); 1814 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1815 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1816 1817 /* Set up some basics */ 1818 rxr->sc = sc; 1819 rxr->me = i; 1820 lwkt_serialize_init(&rxr->rx_serialize); 1821 1822 error = igb_create_rx_ring(rxr); 1823 if (error) 1824 return error; 1825 } 1826 1827 return 0; 1828 } 1829 1830 static void 1831 igb_free_rings(struct igb_softc *sc) 1832 { 1833 int i; 1834 1835 if (sc->tx_rings != NULL) { 1836 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1837 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1838 1839 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1840 } 1841 kfree(sc->tx_rings, M_DEVBUF); 1842 } 1843 1844 if (sc->rx_rings != NULL) { 1845 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1846 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1847 1848 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1849 } 1850 kfree(sc->rx_rings, M_DEVBUF); 1851 } 1852 } 1853 1854 static int 1855 igb_create_tx_ring(struct igb_tx_ring *txr) 1856 { 1857 int tsize, error, i, ntxd; 1858 1859 /* 1860 * Validate number of transmit descriptors. It must not exceed 1861 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1862 */ 1863 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1864 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1865 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1866 device_printf(txr->sc->dev, 1867 "Using %d TX descriptors instead of %d!\n", 1868 IGB_DEFAULT_TXD, ntxd); 1869 txr->num_tx_desc = IGB_DEFAULT_TXD; 1870 } else { 1871 txr->num_tx_desc = ntxd; 1872 } 1873 1874 /* 1875 * Allocate TX descriptor ring 1876 */ 1877 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1878 IGB_DBA_ALIGN); 1879 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1880 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1881 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1882 if (txr->txdma.dma_vaddr == NULL) { 1883 device_printf(txr->sc->dev, 1884 "Unable to allocate TX Descriptor memory\n"); 1885 return ENOMEM; 1886 } 1887 txr->tx_base = txr->txdma.dma_vaddr; 1888 bzero(txr->tx_base, tsize); 1889 1890 tsize = __VM_CACHELINE_ALIGN( 1891 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1892 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1893 1894 /* 1895 * Allocate TX head write-back buffer 1896 */ 1897 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1898 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1899 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1900 if (txr->tx_hdr == NULL) { 1901 device_printf(txr->sc->dev, 1902 "Unable to allocate TX head write-back buffer\n"); 1903 return ENOMEM; 1904 } 1905 1906 /* 1907 * Create DMA tag for TX buffers 1908 */ 1909 error = bus_dma_tag_create(txr->sc->parent_tag, 1910 1, 0, /* alignment, bounds */ 1911 BUS_SPACE_MAXADDR, /* lowaddr */ 1912 BUS_SPACE_MAXADDR, /* highaddr */ 1913 NULL, NULL, /* filter, filterarg */ 1914 IGB_TSO_SIZE, /* maxsize */ 1915 IGB_MAX_SCATTER, /* nsegments */ 1916 PAGE_SIZE, /* maxsegsize */ 1917 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1918 BUS_DMA_ONEBPAGE, /* flags */ 1919 &txr->tx_tag); 1920 if (error) { 1921 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1922 kfree(txr->tx_buf, M_DEVBUF); 1923 txr->tx_buf = NULL; 1924 return error; 1925 } 1926 1927 /* 1928 * Create DMA maps for TX buffers 1929 */ 1930 for (i = 0; i < txr->num_tx_desc; ++i) { 1931 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1932 1933 error = bus_dmamap_create(txr->tx_tag, 1934 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1935 if (error) { 1936 device_printf(txr->sc->dev, 1937 "Unable to create TX DMA map\n"); 1938 igb_destroy_tx_ring(txr, i); 1939 return error; 1940 } 1941 } 1942 1943 if (txr->sc->hw.mac.type == e1000_82575) 1944 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1945 1946 /* 1947 * Initialize various watermark 1948 */ 1949 if (txr->sc->hw.mac.type == e1000_82575) { 1950 /* 1951 * There no ways to GC pending TX mbufs in 'header 1952 * write back' mode with reduced # of RS TX descs, 1953 * since TDH does _not_ move for 82575. 1954 */ 1955 txr->intr_nsegs = 1; 1956 } else { 1957 txr->intr_nsegs = txr->num_tx_desc / 16; 1958 } 1959 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1960 1961 return 0; 1962 } 1963 1964 static void 1965 igb_free_tx_ring(struct igb_tx_ring *txr) 1966 { 1967 int i; 1968 1969 for (i = 0; i < txr->num_tx_desc; ++i) { 1970 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1971 1972 if (txbuf->m_head != NULL) 1973 igb_free_txbuf(txr, txbuf); 1974 } 1975 } 1976 1977 static void 1978 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1979 { 1980 int i; 1981 1982 if (txr->txdma.dma_vaddr != NULL) { 1983 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1984 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1985 txr->txdma.dma_map); 1986 bus_dma_tag_destroy(txr->txdma.dma_tag); 1987 txr->txdma.dma_vaddr = NULL; 1988 } 1989 1990 if (txr->tx_hdr != NULL) { 1991 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1992 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1993 txr->tx_hdr_dmap); 1994 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1995 txr->tx_hdr = NULL; 1996 } 1997 1998 if (txr->tx_buf == NULL) 1999 return; 2000 2001 for (i = 0; i < ndesc; ++i) { 2002 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 2003 2004 KKASSERT(txbuf->m_head == NULL); 2005 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 2006 } 2007 bus_dma_tag_destroy(txr->tx_tag); 2008 2009 kfree(txr->tx_buf, M_DEVBUF); 2010 txr->tx_buf = NULL; 2011 } 2012 2013 static void 2014 igb_init_tx_ring(struct igb_tx_ring *txr) 2015 { 2016 /* Clear the old descriptor contents */ 2017 bzero(txr->tx_base, 2018 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 2019 2020 /* Clear TX head write-back buffer */ 2021 *(txr->tx_hdr) = 0; 2022 2023 /* Reset indices */ 2024 txr->next_avail_desc = 0; 2025 txr->next_to_clean = 0; 2026 txr->tx_nsegs = 0; 2027 txr->tx_running = 0; 2028 txr->tx_nmbuf = 0; 2029 2030 /* Set number of descriptors available */ 2031 txr->tx_avail = txr->num_tx_desc; 2032 2033 /* Enable this TX ring */ 2034 txr->tx_flags |= IGB_TXFLAG_ENABLED; 2035 } 2036 2037 static void 2038 igb_init_tx_unit(struct igb_softc *sc) 2039 { 2040 struct e1000_hw *hw = &sc->hw; 2041 uint32_t tctl; 2042 int i; 2043 2044 /* Setup the Tx Descriptor Rings */ 2045 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2046 struct igb_tx_ring *txr = &sc->tx_rings[i]; 2047 uint64_t bus_addr = txr->txdma.dma_paddr; 2048 uint64_t hdr_paddr = txr->tx_hdr_paddr; 2049 uint32_t txdctl = 0; 2050 uint32_t dca_txctrl; 2051 2052 E1000_WRITE_REG(hw, E1000_TDLEN(i), 2053 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 2054 E1000_WRITE_REG(hw, E1000_TDBAH(i), 2055 (uint32_t)(bus_addr >> 32)); 2056 E1000_WRITE_REG(hw, E1000_TDBAL(i), 2057 (uint32_t)bus_addr); 2058 2059 /* Setup the HW Tx Head and Tail descriptor pointers */ 2060 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 2061 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 2062 2063 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 2064 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2065 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 2066 2067 /* 2068 * Don't set WB_on_EITR: 2069 * - 82575 does not have it 2070 * - It almost has no effect on 82576, see: 2071 * 82576 specification update errata #26 2072 * - It causes unnecessary bus traffic 2073 */ 2074 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 2075 (uint32_t)(hdr_paddr >> 32)); 2076 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 2077 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 2078 2079 /* 2080 * WTHRESH is ignored by the hardware, since header 2081 * write back mode is used. 2082 */ 2083 txdctl |= IGB_TX_PTHRESH; 2084 txdctl |= IGB_TX_HTHRESH << 8; 2085 txdctl |= IGB_TX_WTHRESH << 16; 2086 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2087 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2088 } 2089 2090 if (sc->vf_ifp) 2091 return; 2092 2093 e1000_config_collision_dist(hw); 2094 2095 /* Program the Transmit Control Register */ 2096 tctl = E1000_READ_REG(hw, E1000_TCTL); 2097 tctl &= ~E1000_TCTL_CT; 2098 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2099 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2100 2101 /* This write will effectively turn on the transmit unit. */ 2102 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2103 } 2104 2105 static boolean_t 2106 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2107 { 2108 struct e1000_adv_tx_context_desc *TXD; 2109 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2110 int ehdrlen, ctxd, ip_hlen = 0; 2111 boolean_t offload = TRUE; 2112 2113 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2114 offload = FALSE; 2115 2116 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2117 2118 ctxd = txr->next_avail_desc; 2119 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2120 2121 /* 2122 * In advanced descriptors the vlan tag must 2123 * be placed into the context descriptor, thus 2124 * we need to be here just for that setup. 2125 */ 2126 if (mp->m_flags & M_VLANTAG) { 2127 uint16_t vlantag; 2128 2129 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2130 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2131 } else if (!offload) { 2132 return FALSE; 2133 } 2134 2135 ehdrlen = mp->m_pkthdr.csum_lhlen; 2136 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2137 2138 /* Set the ether header length */ 2139 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2140 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2141 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2142 ip_hlen = mp->m_pkthdr.csum_iphlen; 2143 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2144 } 2145 vlan_macip_lens |= ip_hlen; 2146 2147 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2148 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2149 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2150 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2151 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2152 2153 /* 2154 * 82575 needs the TX context index added; the queue 2155 * index is used as TX context index here. 2156 */ 2157 if (txr->sc->hw.mac.type == e1000_82575) 2158 mss_l4len_idx = txr->me << 4; 2159 2160 /* Now copy bits into descriptor */ 2161 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2162 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2163 TXD->seqnum_seed = htole32(0); 2164 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2165 2166 /* We've consumed the first desc, adjust counters */ 2167 if (++ctxd == txr->num_tx_desc) 2168 ctxd = 0; 2169 txr->next_avail_desc = ctxd; 2170 --txr->tx_avail; 2171 2172 return offload; 2173 } 2174 2175 static void 2176 igb_txeof(struct igb_tx_ring *txr, int hdr) 2177 { 2178 int first, avail; 2179 2180 if (txr->tx_avail == txr->num_tx_desc) 2181 return; 2182 2183 first = txr->next_to_clean; 2184 if (first == hdr) 2185 return; 2186 2187 avail = txr->tx_avail; 2188 while (first != hdr) { 2189 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2190 2191 KKASSERT(avail < txr->num_tx_desc); 2192 ++avail; 2193 2194 if (txbuf->m_head) 2195 igb_free_txbuf(txr, txbuf); 2196 2197 if (++first == txr->num_tx_desc) 2198 first = 0; 2199 } 2200 txr->next_to_clean = first; 2201 txr->tx_avail = avail; 2202 2203 /* 2204 * If we have a minimum free, clear OACTIVE 2205 * to tell the stack that it is OK to send packets. 2206 */ 2207 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2208 ifsq_clr_oactive(txr->ifsq); 2209 2210 /* 2211 * We have enough TX descriptors, turn off 2212 * the watchdog. We allow small amount of 2213 * packets (roughly intr_nsegs) pending on 2214 * the transmit ring. 2215 */ 2216 txr->tx_watchdog.wd_timer = 0; 2217 } 2218 txr->tx_running = IGB_TX_RUNNING; 2219 } 2220 2221 static void 2222 igb_txgc(struct igb_tx_ring *txr) 2223 { 2224 int first, hdr; 2225 #ifdef INVARIANTS 2226 int avail; 2227 #endif 2228 2229 if (txr->tx_avail == txr->num_tx_desc) 2230 return; 2231 2232 hdr = E1000_READ_REG(&txr->sc->hw, E1000_TDH(txr->me)), 2233 first = txr->next_to_clean; 2234 if (first == hdr) 2235 goto done; 2236 txr->tx_gc++; 2237 2238 #ifdef INVARIANTS 2239 avail = txr->tx_avail; 2240 #endif 2241 while (first != hdr) { 2242 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2243 2244 #ifdef INVARIANTS 2245 KKASSERT(avail < txr->num_tx_desc); 2246 ++avail; 2247 #endif 2248 if (txbuf->m_head) 2249 igb_free_txbuf(txr, txbuf); 2250 2251 if (++first == txr->num_tx_desc) 2252 first = 0; 2253 } 2254 done: 2255 if (txr->tx_nmbuf) 2256 txr->tx_running = IGB_TX_RUNNING; 2257 } 2258 2259 static int 2260 igb_create_rx_ring(struct igb_rx_ring *rxr) 2261 { 2262 int rsize, i, error, nrxd; 2263 2264 /* 2265 * Validate number of receive descriptors. It must not exceed 2266 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2267 */ 2268 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2269 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2270 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2271 device_printf(rxr->sc->dev, 2272 "Using %d RX descriptors instead of %d!\n", 2273 IGB_DEFAULT_RXD, nrxd); 2274 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2275 } else { 2276 rxr->num_rx_desc = nrxd; 2277 } 2278 2279 /* 2280 * Allocate RX descriptor ring 2281 */ 2282 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2283 IGB_DBA_ALIGN); 2284 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2285 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2286 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2287 &rxr->rxdma.dma_paddr); 2288 if (rxr->rxdma.dma_vaddr == NULL) { 2289 device_printf(rxr->sc->dev, 2290 "Unable to allocate RxDescriptor memory\n"); 2291 return ENOMEM; 2292 } 2293 rxr->rx_base = rxr->rxdma.dma_vaddr; 2294 bzero(rxr->rx_base, rsize); 2295 2296 rsize = __VM_CACHELINE_ALIGN( 2297 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2298 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2299 2300 /* 2301 * Create DMA tag for RX buffers 2302 */ 2303 error = bus_dma_tag_create(rxr->sc->parent_tag, 2304 1, 0, /* alignment, bounds */ 2305 BUS_SPACE_MAXADDR, /* lowaddr */ 2306 BUS_SPACE_MAXADDR, /* highaddr */ 2307 NULL, NULL, /* filter, filterarg */ 2308 MCLBYTES, /* maxsize */ 2309 1, /* nsegments */ 2310 MCLBYTES, /* maxsegsize */ 2311 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2312 &rxr->rx_tag); 2313 if (error) { 2314 device_printf(rxr->sc->dev, 2315 "Unable to create RX payload DMA tag\n"); 2316 kfree(rxr->rx_buf, M_DEVBUF); 2317 rxr->rx_buf = NULL; 2318 return error; 2319 } 2320 2321 /* 2322 * Create spare DMA map for RX buffers 2323 */ 2324 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2325 &rxr->rx_sparemap); 2326 if (error) { 2327 device_printf(rxr->sc->dev, 2328 "Unable to create spare RX DMA maps\n"); 2329 bus_dma_tag_destroy(rxr->rx_tag); 2330 kfree(rxr->rx_buf, M_DEVBUF); 2331 rxr->rx_buf = NULL; 2332 return error; 2333 } 2334 2335 /* 2336 * Create DMA maps for RX buffers 2337 */ 2338 for (i = 0; i < rxr->num_rx_desc; i++) { 2339 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2340 2341 error = bus_dmamap_create(rxr->rx_tag, 2342 BUS_DMA_WAITOK, &rxbuf->map); 2343 if (error) { 2344 device_printf(rxr->sc->dev, 2345 "Unable to create RX DMA maps\n"); 2346 igb_destroy_rx_ring(rxr, i); 2347 return error; 2348 } 2349 } 2350 2351 /* 2352 * Initialize various watermark 2353 */ 2354 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2355 2356 return 0; 2357 } 2358 2359 static void 2360 igb_free_rx_ring(struct igb_rx_ring *rxr) 2361 { 2362 int i; 2363 2364 for (i = 0; i < rxr->num_rx_desc; ++i) { 2365 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2366 2367 if (rxbuf->m_head != NULL) { 2368 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2369 m_freem(rxbuf->m_head); 2370 rxbuf->m_head = NULL; 2371 } 2372 } 2373 2374 if (rxr->fmp != NULL) 2375 m_freem(rxr->fmp); 2376 rxr->fmp = NULL; 2377 rxr->lmp = NULL; 2378 } 2379 2380 static void 2381 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2382 { 2383 int i; 2384 2385 if (rxr->rxdma.dma_vaddr != NULL) { 2386 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2387 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2388 rxr->rxdma.dma_map); 2389 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2390 rxr->rxdma.dma_vaddr = NULL; 2391 } 2392 2393 if (rxr->rx_buf == NULL) 2394 return; 2395 2396 for (i = 0; i < ndesc; ++i) { 2397 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2398 2399 KKASSERT(rxbuf->m_head == NULL); 2400 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2401 } 2402 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2403 bus_dma_tag_destroy(rxr->rx_tag); 2404 2405 kfree(rxr->rx_buf, M_DEVBUF); 2406 rxr->rx_buf = NULL; 2407 } 2408 2409 static void 2410 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2411 { 2412 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2413 rxd->wb.upper.status_error = 0; 2414 } 2415 2416 static int 2417 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2418 { 2419 struct mbuf *m; 2420 bus_dma_segment_t seg; 2421 bus_dmamap_t map; 2422 struct igb_rx_buf *rxbuf; 2423 int error, nseg; 2424 2425 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2426 if (m == NULL) { 2427 if (wait) { 2428 if_printf(&rxr->sc->arpcom.ac_if, 2429 "Unable to allocate RX mbuf\n"); 2430 } 2431 return ENOBUFS; 2432 } 2433 m->m_len = m->m_pkthdr.len = MCLBYTES; 2434 2435 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2436 m_adj(m, ETHER_ALIGN); 2437 2438 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2439 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2440 if (error) { 2441 m_freem(m); 2442 if (wait) { 2443 if_printf(&rxr->sc->arpcom.ac_if, 2444 "Unable to load RX mbuf\n"); 2445 } 2446 return error; 2447 } 2448 2449 rxbuf = &rxr->rx_buf[i]; 2450 if (rxbuf->m_head != NULL) 2451 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2452 2453 map = rxbuf->map; 2454 rxbuf->map = rxr->rx_sparemap; 2455 rxr->rx_sparemap = map; 2456 2457 rxbuf->m_head = m; 2458 rxbuf->paddr = seg.ds_addr; 2459 2460 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2461 return 0; 2462 } 2463 2464 static int 2465 igb_init_rx_ring(struct igb_rx_ring *rxr) 2466 { 2467 int i; 2468 2469 /* Clear the ring contents */ 2470 bzero(rxr->rx_base, 2471 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2472 2473 /* Now replenish the ring mbufs */ 2474 for (i = 0; i < rxr->num_rx_desc; ++i) { 2475 int error; 2476 2477 error = igb_newbuf(rxr, i, TRUE); 2478 if (error) 2479 return error; 2480 } 2481 2482 /* Setup our descriptor indices */ 2483 rxr->next_to_check = 0; 2484 2485 rxr->fmp = NULL; 2486 rxr->lmp = NULL; 2487 rxr->discard = FALSE; 2488 2489 return 0; 2490 } 2491 2492 static void 2493 igb_init_rx_unit(struct igb_softc *sc, boolean_t polling) 2494 { 2495 struct ifnet *ifp = &sc->arpcom.ac_if; 2496 struct e1000_hw *hw = &sc->hw; 2497 uint32_t rctl, rxcsum, srrctl = 0; 2498 int i; 2499 2500 /* 2501 * Make sure receives are disabled while setting 2502 * up the descriptor ring 2503 */ 2504 rctl = E1000_READ_REG(hw, E1000_RCTL); 2505 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2506 2507 #if 0 2508 /* 2509 ** Set up for header split 2510 */ 2511 if (igb_header_split) { 2512 /* Use a standard mbuf for the header */ 2513 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2514 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2515 } else 2516 #endif 2517 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2518 2519 /* 2520 ** Set up for jumbo frames 2521 */ 2522 if (ifp->if_mtu > ETHERMTU) { 2523 rctl |= E1000_RCTL_LPE; 2524 #if 0 2525 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2526 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2527 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2528 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2529 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2530 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2531 } 2532 /* Set maximum packet len */ 2533 psize = adapter->max_frame_size; 2534 /* are we on a vlan? */ 2535 if (adapter->ifp->if_vlantrunk != NULL) 2536 psize += VLAN_TAG_SIZE; 2537 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2538 #else 2539 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2540 rctl |= E1000_RCTL_SZ_2048; 2541 #endif 2542 } else { 2543 rctl &= ~E1000_RCTL_LPE; 2544 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2545 rctl |= E1000_RCTL_SZ_2048; 2546 } 2547 2548 /* 2549 * If TX flow control is disabled and more the 1 RX rings 2550 * are enabled, enable DROP. 2551 * 2552 * This drops frames rather than hanging the RX MAC for all 2553 * RX rings. 2554 */ 2555 if (sc->rx_ring_inuse > 1 && 2556 (sc->ifm_flowctrl & IFM_ETH_TXPAUSE) == 0) { 2557 srrctl |= E1000_SRRCTL_DROP_EN; 2558 if (bootverbose) 2559 if_printf(ifp, "enable RX drop\n"); 2560 } 2561 2562 /* Setup the Base and Length of the Rx Descriptor Rings */ 2563 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2564 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2565 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2566 uint32_t rxdctl; 2567 2568 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2569 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2570 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2571 (uint32_t)(bus_addr >> 32)); 2572 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2573 (uint32_t)bus_addr); 2574 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2575 /* Enable this Queue */ 2576 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2577 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2578 rxdctl &= 0xFFF00000; 2579 rxdctl |= IGB_RX_PTHRESH; 2580 rxdctl |= IGB_RX_HTHRESH << 8; 2581 /* 2582 * Don't set WTHRESH to a value above 1 on 82576, see: 2583 * 82576 specification update errata #26 2584 */ 2585 rxdctl |= IGB_RX_WTHRESH << 16; 2586 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2587 } 2588 2589 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2590 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2591 2592 /* 2593 * Receive Checksum Offload for TCP and UDP 2594 * 2595 * Checksum offloading is also enabled if multiple receive 2596 * queue is to be supported, since we need it to figure out 2597 * fragments. 2598 */ 2599 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2600 /* 2601 * NOTE: 2602 * PCSD must be enabled to enable multiple 2603 * receive queues. 2604 */ 2605 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2606 E1000_RXCSUM_PCSD; 2607 } else { 2608 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2609 E1000_RXCSUM_PCSD); 2610 } 2611 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2612 2613 if (sc->rx_ring_inuse > 1) { 2614 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2615 const struct if_ringmap *rm; 2616 uint32_t reta_shift; 2617 int j, r; 2618 2619 /* 2620 * NOTE: 2621 * When we reach here, RSS has already been disabled 2622 * in igb_stop(), so we could safely configure RSS key 2623 * and redirect table. 2624 */ 2625 2626 /* 2627 * Configure RSS key 2628 */ 2629 toeplitz_get_key(key, sizeof(key)); 2630 for (i = 0; i < IGB_NRSSRK; ++i) { 2631 uint32_t rssrk; 2632 2633 rssrk = IGB_RSSRK_VAL(key, i); 2634 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2635 2636 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2637 } 2638 2639 /* 2640 * Configure RSS redirect table 2641 */ 2642 if (polling) 2643 rm = sc->rx_rmap; 2644 else 2645 rm = sc->rx_rmap_intr; 2646 if_ringmap_rdrtable(rm, sc->rdr_table, IGB_RDRTABLE_SIZE); 2647 2648 reta_shift = IGB_RETA_SHIFT; 2649 if (hw->mac.type == e1000_82575) 2650 reta_shift = IGB_RETA_SHIFT_82575; 2651 2652 r = 0; 2653 for (j = 0; j < IGB_NRETA; ++j) { 2654 uint32_t reta = 0; 2655 2656 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2657 uint32_t q; 2658 2659 q = sc->rdr_table[r] << reta_shift; 2660 reta |= q << (8 * i); 2661 ++r; 2662 } 2663 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2664 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2665 } 2666 2667 /* 2668 * Enable multiple receive queues. 2669 * Enable IPv4 RSS standard hash functions. 2670 * Disable RSS interrupt on 82575 2671 */ 2672 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2673 E1000_MRQC_ENABLE_RSS_4Q | 2674 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2675 E1000_MRQC_RSS_FIELD_IPV4); 2676 } 2677 2678 /* Setup the Receive Control Register */ 2679 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2680 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2681 E1000_RCTL_RDMTS_HALF | 2682 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2683 /* Strip CRC bytes. */ 2684 rctl |= E1000_RCTL_SECRC; 2685 /* Make sure VLAN Filters are off */ 2686 rctl &= ~E1000_RCTL_VFE; 2687 /* Don't store bad packets */ 2688 rctl &= ~E1000_RCTL_SBP; 2689 2690 /* Enable Receives */ 2691 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2692 2693 /* 2694 * Setup the HW Rx Head and Tail Descriptor Pointers 2695 * - needs to be after enable 2696 */ 2697 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2698 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2699 2700 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2701 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2702 } 2703 } 2704 2705 static void 2706 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2707 { 2708 if (--i < 0) 2709 i = rxr->num_rx_desc - 1; 2710 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2711 } 2712 2713 static void 2714 igb_rxeof(struct igb_rx_ring *rxr, int count) 2715 { 2716 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2717 union e1000_adv_rx_desc *cur; 2718 uint32_t staterr; 2719 int i, ncoll = 0, cpuid = mycpuid; 2720 2721 i = rxr->next_to_check; 2722 cur = &rxr->rx_base[i]; 2723 staterr = le32toh(cur->wb.upper.status_error); 2724 2725 if ((staterr & E1000_RXD_STAT_DD) == 0) 2726 return; 2727 2728 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2729 struct pktinfo *pi = NULL, pi0; 2730 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2731 struct mbuf *m = NULL; 2732 boolean_t eop; 2733 2734 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2735 if (eop) 2736 --count; 2737 2738 ++ncoll; 2739 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2740 !rxr->discard) { 2741 struct mbuf *mp = rxbuf->m_head; 2742 uint32_t hash, hashtype; 2743 uint16_t vlan; 2744 int len; 2745 2746 len = le16toh(cur->wb.upper.length); 2747 if ((rxr->sc->hw.mac.type == e1000_i350 || 2748 rxr->sc->hw.mac.type == e1000_i354) && 2749 (staterr & E1000_RXDEXT_STATERR_LB)) 2750 vlan = be16toh(cur->wb.upper.vlan); 2751 else 2752 vlan = le16toh(cur->wb.upper.vlan); 2753 2754 hash = le32toh(cur->wb.lower.hi_dword.rss); 2755 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2756 E1000_RXDADV_RSSTYPE_MASK; 2757 2758 IGB_RSS_DPRINTF(rxr->sc, 10, 2759 "ring%d, hash 0x%08x, hashtype %u\n", 2760 rxr->me, hash, hashtype); 2761 2762 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2763 BUS_DMASYNC_POSTREAD); 2764 2765 if (igb_newbuf(rxr, i, FALSE) != 0) { 2766 IFNET_STAT_INC(ifp, iqdrops, 1); 2767 goto discard; 2768 } 2769 2770 mp->m_len = len; 2771 if (rxr->fmp == NULL) { 2772 mp->m_pkthdr.len = len; 2773 rxr->fmp = mp; 2774 rxr->lmp = mp; 2775 } else { 2776 rxr->lmp->m_next = mp; 2777 rxr->lmp = rxr->lmp->m_next; 2778 rxr->fmp->m_pkthdr.len += len; 2779 } 2780 2781 if (eop) { 2782 m = rxr->fmp; 2783 rxr->fmp = NULL; 2784 rxr->lmp = NULL; 2785 2786 m->m_pkthdr.rcvif = ifp; 2787 IFNET_STAT_INC(ifp, ipackets, 1); 2788 2789 if (ifp->if_capenable & IFCAP_RXCSUM) 2790 igb_rxcsum(staterr, m); 2791 2792 if (staterr & E1000_RXD_STAT_VP) { 2793 m->m_pkthdr.ether_vlantag = vlan; 2794 m->m_flags |= M_VLANTAG; 2795 } 2796 2797 if (ifp->if_capenable & IFCAP_RSS) { 2798 pi = igb_rssinfo(m, &pi0, 2799 hash, hashtype, staterr); 2800 } 2801 #ifdef IGB_RSS_DEBUG 2802 rxr->rx_packets++; 2803 #endif 2804 } 2805 } else { 2806 IFNET_STAT_INC(ifp, ierrors, 1); 2807 discard: 2808 igb_setup_rxdesc(cur, rxbuf); 2809 if (!eop) 2810 rxr->discard = TRUE; 2811 else 2812 rxr->discard = FALSE; 2813 if (rxr->fmp != NULL) { 2814 m_freem(rxr->fmp); 2815 rxr->fmp = NULL; 2816 rxr->lmp = NULL; 2817 } 2818 m = NULL; 2819 } 2820 2821 if (m != NULL) 2822 ifp->if_input(ifp, m, pi, cpuid); 2823 2824 /* Advance our pointers to the next descriptor. */ 2825 if (++i == rxr->num_rx_desc) 2826 i = 0; 2827 2828 if (ncoll >= rxr->wreg_nsegs) { 2829 igb_rx_refresh(rxr, i); 2830 ncoll = 0; 2831 } 2832 2833 cur = &rxr->rx_base[i]; 2834 staterr = le32toh(cur->wb.upper.status_error); 2835 } 2836 rxr->next_to_check = i; 2837 2838 if (ncoll > 0) 2839 igb_rx_refresh(rxr, i); 2840 } 2841 2842 2843 static void 2844 igb_set_vlan(struct igb_softc *sc) 2845 { 2846 struct e1000_hw *hw = &sc->hw; 2847 uint32_t reg; 2848 #if 0 2849 struct ifnet *ifp = sc->arpcom.ac_if; 2850 #endif 2851 2852 if (sc->vf_ifp) { 2853 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2854 return; 2855 } 2856 2857 reg = E1000_READ_REG(hw, E1000_CTRL); 2858 reg |= E1000_CTRL_VME; 2859 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2860 2861 #if 0 2862 /* Enable the Filter Table */ 2863 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2864 reg = E1000_READ_REG(hw, E1000_RCTL); 2865 reg &= ~E1000_RCTL_CFIEN; 2866 reg |= E1000_RCTL_VFE; 2867 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2868 } 2869 #endif 2870 2871 /* Update the frame size */ 2872 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2873 sc->max_frame_size + VLAN_TAG_SIZE); 2874 2875 #if 0 2876 /* Don't bother with table if no vlans */ 2877 if ((adapter->num_vlans == 0) || 2878 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2879 return; 2880 /* 2881 ** A soft reset zero's out the VFTA, so 2882 ** we need to repopulate it now. 2883 */ 2884 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2885 if (adapter->shadow_vfta[i] != 0) { 2886 if (adapter->vf_ifp) 2887 e1000_vfta_set_vf(hw, 2888 adapter->shadow_vfta[i], TRUE); 2889 else 2890 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2891 i, adapter->shadow_vfta[i]); 2892 } 2893 #endif 2894 } 2895 2896 static void 2897 igb_enable_intr(struct igb_softc *sc) 2898 { 2899 int i; 2900 2901 for (i = 0; i < sc->intr_cnt; ++i) 2902 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2903 2904 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2905 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2906 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2907 else 2908 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2909 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2910 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2911 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2912 } else { 2913 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2914 } 2915 E1000_WRITE_FLUSH(&sc->hw); 2916 } 2917 2918 static void 2919 igb_disable_intr(struct igb_softc *sc) 2920 { 2921 int i; 2922 2923 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2924 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2925 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2926 } 2927 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2928 E1000_WRITE_FLUSH(&sc->hw); 2929 2930 for (i = 0; i < sc->intr_cnt; ++i) 2931 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2932 } 2933 2934 /* 2935 * Bit of a misnomer, what this really means is 2936 * to enable OS management of the system... aka 2937 * to disable special hardware management features 2938 */ 2939 static void 2940 igb_get_mgmt(struct igb_softc *sc) 2941 { 2942 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2943 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2944 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2945 2946 /* disable hardware interception of ARP */ 2947 manc &= ~E1000_MANC_ARP_EN; 2948 2949 /* enable receiving management packets to the host */ 2950 manc |= E1000_MANC_EN_MNG2HOST; 2951 manc2h |= 1 << 5; /* Mng Port 623 */ 2952 manc2h |= 1 << 6; /* Mng Port 664 */ 2953 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2954 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2955 } 2956 } 2957 2958 /* 2959 * Give control back to hardware management controller 2960 * if there is one. 2961 */ 2962 static void 2963 igb_rel_mgmt(struct igb_softc *sc) 2964 { 2965 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2966 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2967 2968 /* Re-enable hardware interception of ARP */ 2969 manc |= E1000_MANC_ARP_EN; 2970 manc &= ~E1000_MANC_EN_MNG2HOST; 2971 2972 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2973 } 2974 } 2975 2976 /* 2977 * Sets CTRL_EXT:DRV_LOAD bit. 2978 * 2979 * For ASF and Pass Through versions of f/w this means that 2980 * the driver is loaded. 2981 */ 2982 static void 2983 igb_get_hw_control(struct igb_softc *sc) 2984 { 2985 uint32_t ctrl_ext; 2986 2987 if (sc->vf_ifp) 2988 return; 2989 2990 /* Let firmware know the driver has taken over */ 2991 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2992 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2993 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2994 } 2995 2996 /* 2997 * Resets CTRL_EXT:DRV_LOAD bit. 2998 * 2999 * For ASF and Pass Through versions of f/w this means that the 3000 * driver is no longer loaded. 3001 */ 3002 static void 3003 igb_rel_hw_control(struct igb_softc *sc) 3004 { 3005 uint32_t ctrl_ext; 3006 3007 if (sc->vf_ifp) 3008 return; 3009 3010 /* Let firmware taken over control of h/w */ 3011 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3012 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3013 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3014 } 3015 3016 static boolean_t 3017 igb_is_valid_ether_addr(const uint8_t *addr) 3018 { 3019 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3020 3021 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3022 return FALSE; 3023 return TRUE; 3024 } 3025 3026 /* 3027 * Enable PCI Wake On Lan capability 3028 */ 3029 static void 3030 igb_enable_wol(device_t dev) 3031 { 3032 uint16_t cap, status; 3033 uint8_t id; 3034 3035 /* First find the capabilities pointer*/ 3036 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3037 3038 /* Read the PM Capabilities */ 3039 id = pci_read_config(dev, cap, 1); 3040 if (id != PCIY_PMG) /* Something wrong */ 3041 return; 3042 3043 /* 3044 * OK, we have the power capabilities, 3045 * so now get the status register 3046 */ 3047 cap += PCIR_POWER_STATUS; 3048 status = pci_read_config(dev, cap, 2); 3049 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3050 pci_write_config(dev, cap, status, 2); 3051 } 3052 3053 static void 3054 igb_update_stats_counters(struct igb_softc *sc) 3055 { 3056 struct e1000_hw *hw = &sc->hw; 3057 struct e1000_hw_stats *stats; 3058 struct ifnet *ifp = &sc->arpcom.ac_if; 3059 3060 /* 3061 * The virtual function adapter has only a 3062 * small controlled set of stats, do only 3063 * those and return. 3064 */ 3065 if (sc->vf_ifp) { 3066 igb_update_vf_stats_counters(sc); 3067 return; 3068 } 3069 stats = sc->stats; 3070 3071 if (sc->hw.phy.media_type == e1000_media_type_copper || 3072 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3073 stats->symerrs += 3074 E1000_READ_REG(hw,E1000_SYMERRS); 3075 stats->sec += E1000_READ_REG(hw, E1000_SEC); 3076 } 3077 3078 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 3079 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 3080 stats->scc += E1000_READ_REG(hw, E1000_SCC); 3081 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 3082 3083 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 3084 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 3085 stats->colc += E1000_READ_REG(hw, E1000_COLC); 3086 stats->dc += E1000_READ_REG(hw, E1000_DC); 3087 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 3088 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 3089 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 3090 3091 /* 3092 * For watchdog management we need to know if we have been 3093 * paused during the last interval, so capture that here. 3094 */ 3095 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 3096 stats->xoffrxc += sc->pause_frames; 3097 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 3098 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 3099 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 3100 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 3101 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 3102 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 3103 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 3104 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 3105 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 3106 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 3107 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 3108 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 3109 3110 /* For the 64-bit byte counters the low dword must be read first. */ 3111 /* Both registers clear on the read of the high dword */ 3112 3113 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 3114 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 3115 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 3116 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 3117 3118 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 3119 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 3120 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 3121 stats->roc += E1000_READ_REG(hw, E1000_ROC); 3122 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 3123 3124 stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC); 3125 stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); 3126 stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC); 3127 3128 stats->tor += E1000_READ_REG(hw, E1000_TORL) + 3129 ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 3130 stats->tot += E1000_READ_REG(hw, E1000_TOTL) + 3131 ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 3132 3133 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 3134 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 3135 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 3136 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 3137 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 3138 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3139 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3140 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3141 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3142 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3143 3144 /* Interrupt Counts */ 3145 3146 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3147 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3148 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3149 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3150 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3151 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3152 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3153 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3154 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3155 3156 /* Host to Card Statistics */ 3157 3158 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3159 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3160 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3161 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3162 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3163 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3164 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3165 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3166 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3167 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3168 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3169 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3170 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3171 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3172 3173 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3174 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3175 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3176 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3177 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3178 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3179 3180 IFNET_STAT_SET(ifp, collisions, stats->colc); 3181 3182 /* Rx Errors */ 3183 IFNET_STAT_SET(ifp, ierrors, 3184 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3185 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3186 3187 /* Tx Errors */ 3188 IFNET_STAT_SET(ifp, oerrors, 3189 stats->ecol + stats->latecol + sc->watchdog_events); 3190 3191 /* Driver specific counters */ 3192 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3193 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3194 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3195 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3196 sc->packet_buf_alloc_tx = 3197 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3198 sc->packet_buf_alloc_rx = 3199 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3200 } 3201 3202 static void 3203 igb_vf_init_stats(struct igb_softc *sc) 3204 { 3205 struct e1000_hw *hw = &sc->hw; 3206 struct e1000_vf_stats *stats; 3207 3208 stats = sc->stats; 3209 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3210 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3211 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3212 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3213 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3214 } 3215 3216 static void 3217 igb_update_vf_stats_counters(struct igb_softc *sc) 3218 { 3219 struct e1000_hw *hw = &sc->hw; 3220 struct e1000_vf_stats *stats; 3221 3222 if (sc->link_speed == 0) 3223 return; 3224 3225 stats = sc->stats; 3226 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3227 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3228 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3229 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3230 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3231 } 3232 3233 #ifdef IFPOLL_ENABLE 3234 3235 static void 3236 igb_npoll_status(struct ifnet *ifp) 3237 { 3238 struct igb_softc *sc = ifp->if_softc; 3239 uint32_t reg_icr; 3240 3241 ASSERT_SERIALIZED(&sc->main_serialize); 3242 3243 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3244 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3245 sc->hw.mac.get_link_status = 1; 3246 igb_update_link_status(sc); 3247 } 3248 } 3249 3250 static void 3251 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3252 { 3253 struct igb_tx_ring *txr = arg; 3254 3255 ASSERT_SERIALIZED(&txr->tx_serialize); 3256 igb_tx_intr(txr, *(txr->tx_hdr)); 3257 igb_try_txgc(txr, 1); 3258 } 3259 3260 static void 3261 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3262 { 3263 struct igb_rx_ring *rxr = arg; 3264 3265 ASSERT_SERIALIZED(&rxr->rx_serialize); 3266 3267 igb_rxeof(rxr, cycle); 3268 } 3269 3270 static void 3271 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3272 { 3273 struct igb_softc *sc = ifp->if_softc; 3274 int i, txr_cnt, rxr_cnt; 3275 3276 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3277 3278 if (info) { 3279 int cpu; 3280 3281 info->ifpi_status.status_func = igb_npoll_status; 3282 info->ifpi_status.serializer = &sc->main_serialize; 3283 3284 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3285 for (i = 0; i < txr_cnt; ++i) { 3286 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3287 3288 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 3289 KKASSERT(cpu < netisr_ncpus); 3290 info->ifpi_tx[cpu].poll_func = igb_npoll_tx; 3291 info->ifpi_tx[cpu].arg = txr; 3292 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; 3293 ifsq_set_cpuid(txr->ifsq, cpu); 3294 } 3295 3296 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3297 for (i = 0; i < rxr_cnt; ++i) { 3298 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3299 3300 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 3301 KKASSERT(cpu < netisr_ncpus); 3302 info->ifpi_rx[cpu].poll_func = igb_npoll_rx; 3303 info->ifpi_rx[cpu].arg = rxr; 3304 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize; 3305 } 3306 } else { 3307 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3308 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3309 3310 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3311 } 3312 } 3313 if (ifp->if_flags & IFF_RUNNING) 3314 igb_init(sc); 3315 } 3316 3317 #endif /* IFPOLL_ENABLE */ 3318 3319 static void 3320 igb_intr(void *xsc) 3321 { 3322 struct igb_softc *sc = xsc; 3323 struct ifnet *ifp = &sc->arpcom.ac_if; 3324 uint32_t eicr; 3325 3326 ASSERT_SERIALIZED(&sc->main_serialize); 3327 3328 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3329 3330 if (eicr == 0) 3331 return; 3332 3333 if (ifp->if_flags & IFF_RUNNING) { 3334 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3335 int i; 3336 3337 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3338 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3339 3340 if (eicr & rxr->rx_intr_mask) { 3341 lwkt_serialize_enter(&rxr->rx_serialize); 3342 igb_rxeof(rxr, -1); 3343 lwkt_serialize_exit(&rxr->rx_serialize); 3344 } 3345 } 3346 3347 if (eicr & txr->tx_intr_mask) { 3348 lwkt_serialize_enter(&txr->tx_serialize); 3349 igb_tx_intr(txr, *(txr->tx_hdr)); 3350 lwkt_serialize_exit(&txr->tx_serialize); 3351 } 3352 } 3353 3354 if (eicr & E1000_EICR_OTHER) { 3355 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3356 3357 /* Link status change */ 3358 if (icr & E1000_ICR_LSC) { 3359 sc->hw.mac.get_link_status = 1; 3360 igb_update_link_status(sc); 3361 } 3362 } 3363 3364 /* 3365 * Reading EICR has the side effect to clear interrupt mask, 3366 * so all interrupts need to be enabled here. 3367 */ 3368 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3369 } 3370 3371 static void 3372 igb_intr_shared(void *xsc) 3373 { 3374 struct igb_softc *sc = xsc; 3375 struct ifnet *ifp = &sc->arpcom.ac_if; 3376 uint32_t reg_icr; 3377 3378 ASSERT_SERIALIZED(&sc->main_serialize); 3379 3380 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3381 3382 /* Hot eject? */ 3383 if (reg_icr == 0xffffffff) 3384 return; 3385 3386 /* Definitely not our interrupt. */ 3387 if (reg_icr == 0x0) 3388 return; 3389 3390 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3391 return; 3392 3393 if (ifp->if_flags & IFF_RUNNING) { 3394 if (reg_icr & 3395 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3396 int i; 3397 3398 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3399 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3400 3401 lwkt_serialize_enter(&rxr->rx_serialize); 3402 igb_rxeof(rxr, -1); 3403 lwkt_serialize_exit(&rxr->rx_serialize); 3404 } 3405 } 3406 3407 if (reg_icr & E1000_ICR_TXDW) { 3408 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3409 3410 lwkt_serialize_enter(&txr->tx_serialize); 3411 igb_tx_intr(txr, *(txr->tx_hdr)); 3412 lwkt_serialize_exit(&txr->tx_serialize); 3413 } 3414 } 3415 3416 /* Link status change */ 3417 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3418 sc->hw.mac.get_link_status = 1; 3419 igb_update_link_status(sc); 3420 } 3421 3422 if (reg_icr & E1000_ICR_RXO) 3423 sc->rx_overruns++; 3424 } 3425 3426 static int 3427 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3428 int *segs_used, int *idx) 3429 { 3430 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3431 bus_dmamap_t map; 3432 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3433 union e1000_adv_tx_desc *txd = NULL; 3434 struct mbuf *m_head = *m_headp; 3435 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3436 int maxsegs, nsegs, i, j, error; 3437 uint32_t hdrlen = 0; 3438 3439 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3440 error = igb_tso_pullup(txr, m_headp); 3441 if (error) 3442 return error; 3443 m_head = *m_headp; 3444 } 3445 3446 /* Set basic descriptor constants */ 3447 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3448 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3449 if (m_head->m_flags & M_VLANTAG) 3450 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3451 3452 /* 3453 * Map the packet for DMA. 3454 */ 3455 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3456 tx_buf_mapped = tx_buf; 3457 map = tx_buf->map; 3458 3459 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3460 if (maxsegs > IGB_MAX_SCATTER) 3461 maxsegs = IGB_MAX_SCATTER; 3462 3463 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3464 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3465 if (error) { 3466 if (error == ENOBUFS) 3467 txr->sc->mbuf_defrag_failed++; 3468 else 3469 txr->sc->no_tx_dma_setup++; 3470 3471 m_freem(*m_headp); 3472 *m_headp = NULL; 3473 return error; 3474 } 3475 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3476 3477 m_head = *m_headp; 3478 3479 /* 3480 * Set up the TX context descriptor, if any hardware offloading is 3481 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3482 * TX descriptor. 3483 * 3484 * Unlike these chips' predecessors (em/emx), TX context descriptor 3485 * will _not_ interfere TX data fetching pipelining. 3486 */ 3487 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3488 igb_tso_ctx(txr, m_head, &hdrlen); 3489 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3490 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3491 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3492 txr->tx_nsegs++; 3493 (*segs_used)++; 3494 } else if (igb_txcsum_ctx(txr, m_head)) { 3495 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3496 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3497 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3498 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3499 txr->tx_nsegs++; 3500 (*segs_used)++; 3501 } 3502 3503 *segs_used += nsegs; 3504 txr->tx_nsegs += nsegs; 3505 if (txr->tx_nsegs >= txr->intr_nsegs) { 3506 /* 3507 * Report Status (RS) is turned on every intr_nsegs 3508 * descriptors (roughly). 3509 */ 3510 txr->tx_nsegs = 0; 3511 cmd_rs = E1000_ADVTXD_DCMD_RS; 3512 } 3513 3514 /* Calculate payload length */ 3515 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3516 << E1000_ADVTXD_PAYLEN_SHIFT); 3517 3518 /* 3519 * 82575 needs the TX context index added; the queue 3520 * index is used as TX context index here. 3521 */ 3522 if (txr->sc->hw.mac.type == e1000_82575) 3523 olinfo_status |= txr->me << 4; 3524 3525 /* Set up our transmit descriptors */ 3526 i = txr->next_avail_desc; 3527 for (j = 0; j < nsegs; j++) { 3528 bus_size_t seg_len; 3529 bus_addr_t seg_addr; 3530 3531 tx_buf = &txr->tx_buf[i]; 3532 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3533 seg_addr = segs[j].ds_addr; 3534 seg_len = segs[j].ds_len; 3535 3536 txd->read.buffer_addr = htole64(seg_addr); 3537 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3538 txd->read.olinfo_status = htole32(olinfo_status); 3539 if (++i == txr->num_tx_desc) 3540 i = 0; 3541 tx_buf->m_head = NULL; 3542 } 3543 3544 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3545 txr->next_avail_desc = i; 3546 txr->tx_avail -= nsegs; 3547 txr->tx_nmbuf++; 3548 3549 tx_buf->m_head = m_head; 3550 tx_buf_mapped->map = tx_buf->map; 3551 tx_buf->map = map; 3552 3553 /* 3554 * Last Descriptor of Packet needs End Of Packet (EOP) 3555 */ 3556 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3557 3558 /* 3559 * Defer TDT updating, until enough descrptors are setup 3560 */ 3561 *idx = i; 3562 #ifdef IGB_TSS_DEBUG 3563 ++txr->tx_packets; 3564 #endif 3565 3566 return 0; 3567 } 3568 3569 static void 3570 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3571 { 3572 struct igb_softc *sc = ifp->if_softc; 3573 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3574 struct mbuf *m_head; 3575 int idx = -1, nsegs = 0; 3576 3577 KKASSERT(txr->ifsq == ifsq); 3578 ASSERT_SERIALIZED(&txr->tx_serialize); 3579 3580 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3581 return; 3582 3583 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3584 ifsq_purge(ifsq); 3585 return; 3586 } 3587 3588 while (!ifsq_is_empty(ifsq)) { 3589 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3590 ifsq_set_oactive(ifsq); 3591 /* Set watchdog on */ 3592 txr->tx_watchdog.wd_timer = 5; 3593 break; 3594 } 3595 3596 m_head = ifsq_dequeue(ifsq); 3597 if (m_head == NULL) 3598 break; 3599 3600 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3601 IFNET_STAT_INC(ifp, oerrors, 1); 3602 continue; 3603 } 3604 3605 /* 3606 * TX interrupt are aggressively aggregated, so increasing 3607 * opackets at TX interrupt time will make the opackets 3608 * statistics vastly inaccurate; we do the opackets increment 3609 * now. 3610 */ 3611 IFNET_STAT_INC(ifp, opackets, 1); 3612 3613 if (nsegs >= txr->wreg_nsegs) { 3614 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3615 idx = -1; 3616 nsegs = 0; 3617 } 3618 3619 /* Send a copy of the frame to the BPF listener */ 3620 ETHER_BPF_MTAP(ifp, m_head); 3621 } 3622 if (idx >= 0) 3623 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3624 txr->tx_running = IGB_TX_RUNNING; 3625 } 3626 3627 static void 3628 igb_watchdog(struct ifaltq_subque *ifsq) 3629 { 3630 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3631 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3632 struct igb_softc *sc = ifp->if_softc; 3633 int i; 3634 3635 KKASSERT(txr->ifsq == ifsq); 3636 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3637 3638 /* 3639 * If flow control has paused us since last checking 3640 * it invalidates the watchdog timing, so dont run it. 3641 */ 3642 if (sc->pause_frames) { 3643 sc->pause_frames = 0; 3644 txr->tx_watchdog.wd_timer = 5; 3645 return; 3646 } 3647 3648 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3649 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3650 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3651 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3652 if_printf(ifp, "TX(%d) desc avail = %d, " 3653 "Next TX to Clean = %d\n", 3654 txr->me, txr->tx_avail, txr->next_to_clean); 3655 3656 IFNET_STAT_INC(ifp, oerrors, 1); 3657 sc->watchdog_events++; 3658 3659 igb_init(sc); 3660 for (i = 0; i < sc->tx_ring_inuse; ++i) 3661 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3662 } 3663 3664 static void 3665 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3666 { 3667 uint32_t eitr = 0; 3668 3669 if (rate > 0) { 3670 if (sc->hw.mac.type == e1000_82575) { 3671 eitr = 1000000000 / 256 / rate; 3672 /* 3673 * NOTE: 3674 * Document is wrong on the 2 bits left shift 3675 */ 3676 } else { 3677 eitr = 1000000 / rate; 3678 eitr <<= IGB_EITR_INTVL_SHIFT; 3679 } 3680 3681 if (eitr == 0) { 3682 /* Don't disable it */ 3683 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3684 } else if (eitr > IGB_EITR_INTVL_MASK) { 3685 /* Don't allow it to be too large */ 3686 eitr = IGB_EITR_INTVL_MASK; 3687 } 3688 } 3689 if (sc->hw.mac.type == e1000_82575) 3690 eitr |= eitr << 16; 3691 else 3692 eitr |= E1000_EITR_CNT_IGNR; 3693 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3694 } 3695 3696 static void 3697 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3698 const char *name, const char *desc) 3699 { 3700 int i; 3701 3702 for (i = 0; i < sc->intr_cnt; ++i) { 3703 if (sc->intr_data[i].intr_use == use) { 3704 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3705 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3706 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3707 sc, use, igb_sysctl_intr_rate, "I", desc); 3708 break; 3709 } 3710 } 3711 } 3712 3713 static int 3714 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3715 { 3716 struct igb_softc *sc = (void *)arg1; 3717 int use = arg2; 3718 struct ifnet *ifp = &sc->arpcom.ac_if; 3719 int error, rate, i; 3720 struct igb_intr_data *intr; 3721 3722 rate = 0; 3723 for (i = 0; i < sc->intr_cnt; ++i) { 3724 intr = &sc->intr_data[i]; 3725 if (intr->intr_use == use) { 3726 rate = intr->intr_rate; 3727 break; 3728 } 3729 } 3730 3731 error = sysctl_handle_int(oidp, &rate, 0, req); 3732 if (error || req->newptr == NULL) 3733 return error; 3734 if (rate <= 0) 3735 return EINVAL; 3736 3737 ifnet_serialize_all(ifp); 3738 3739 for (i = 0; i < sc->intr_cnt; ++i) { 3740 intr = &sc->intr_data[i]; 3741 if (intr->intr_use == use && intr->intr_rate != rate) { 3742 intr->intr_rate = rate; 3743 if (ifp->if_flags & IFF_RUNNING) 3744 igb_set_eitr(sc, i, rate); 3745 } 3746 } 3747 3748 ifnet_deserialize_all(ifp); 3749 3750 return error; 3751 } 3752 3753 static int 3754 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3755 { 3756 struct igb_softc *sc = (void *)arg1; 3757 struct ifnet *ifp = &sc->arpcom.ac_if; 3758 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3759 int error, nsegs; 3760 3761 nsegs = txr->intr_nsegs; 3762 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3763 if (error || req->newptr == NULL) 3764 return error; 3765 if (nsegs <= 0) 3766 return EINVAL; 3767 3768 ifnet_serialize_all(ifp); 3769 3770 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3771 error = EINVAL; 3772 } else { 3773 int i; 3774 3775 error = 0; 3776 for (i = 0; i < sc->tx_ring_cnt; ++i) 3777 sc->tx_rings[i].intr_nsegs = nsegs; 3778 } 3779 3780 ifnet_deserialize_all(ifp); 3781 3782 return error; 3783 } 3784 3785 static int 3786 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3787 { 3788 struct igb_softc *sc = (void *)arg1; 3789 struct ifnet *ifp = &sc->arpcom.ac_if; 3790 int error, nsegs, i; 3791 3792 nsegs = sc->rx_rings[0].wreg_nsegs; 3793 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3794 if (error || req->newptr == NULL) 3795 return error; 3796 3797 ifnet_serialize_all(ifp); 3798 for (i = 0; i < sc->rx_ring_cnt; ++i) 3799 sc->rx_rings[i].wreg_nsegs = nsegs; 3800 ifnet_deserialize_all(ifp); 3801 3802 return 0; 3803 } 3804 3805 static int 3806 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3807 { 3808 struct igb_softc *sc = (void *)arg1; 3809 struct ifnet *ifp = &sc->arpcom.ac_if; 3810 int error, nsegs, i; 3811 3812 nsegs = sc->tx_rings[0].wreg_nsegs; 3813 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3814 if (error || req->newptr == NULL) 3815 return error; 3816 3817 ifnet_serialize_all(ifp); 3818 for (i = 0; i < sc->tx_ring_cnt; ++i) 3819 sc->tx_rings[i].wreg_nsegs = nsegs; 3820 ifnet_deserialize_all(ifp); 3821 3822 return 0; 3823 } 3824 3825 static void 3826 igb_init_intr(struct igb_softc *sc) 3827 { 3828 int i; 3829 3830 igb_set_intr_mask(sc); 3831 3832 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3833 igb_init_unshared_intr(sc); 3834 3835 for (i = 0; i < sc->intr_cnt; ++i) 3836 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3837 } 3838 3839 static void 3840 igb_init_unshared_intr(struct igb_softc *sc) 3841 { 3842 struct e1000_hw *hw = &sc->hw; 3843 const struct igb_rx_ring *rxr; 3844 const struct igb_tx_ring *txr; 3845 uint32_t ivar, index; 3846 int i; 3847 3848 /* 3849 * Enable extended mode 3850 */ 3851 if (sc->hw.mac.type != e1000_82575) { 3852 uint32_t gpie; 3853 int ivar_max; 3854 3855 gpie = E1000_GPIE_NSICR; 3856 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3857 gpie |= E1000_GPIE_MSIX_MODE | 3858 E1000_GPIE_EIAME | 3859 E1000_GPIE_PBA; 3860 } 3861 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3862 3863 /* 3864 * Clear IVARs 3865 */ 3866 switch (sc->hw.mac.type) { 3867 case e1000_82576: 3868 ivar_max = IGB_MAX_IVAR_82576; 3869 break; 3870 3871 case e1000_82580: 3872 ivar_max = IGB_MAX_IVAR_82580; 3873 break; 3874 3875 case e1000_i350: 3876 ivar_max = IGB_MAX_IVAR_I350; 3877 break; 3878 3879 case e1000_i354: 3880 ivar_max = IGB_MAX_IVAR_I354; 3881 break; 3882 3883 case e1000_vfadapt: 3884 case e1000_vfadapt_i350: 3885 ivar_max = IGB_MAX_IVAR_VF; 3886 break; 3887 3888 case e1000_i210: 3889 ivar_max = IGB_MAX_IVAR_I210; 3890 break; 3891 3892 case e1000_i211: 3893 ivar_max = IGB_MAX_IVAR_I211; 3894 break; 3895 3896 default: 3897 panic("unknown mac type %d\n", sc->hw.mac.type); 3898 } 3899 for (i = 0; i < ivar_max; ++i) 3900 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3901 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3902 } else { 3903 uint32_t tmp; 3904 3905 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3906 ("82575 w/ MSI-X")); 3907 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3908 tmp |= E1000_CTRL_EXT_IRCA; 3909 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3910 } 3911 3912 /* 3913 * Map TX/RX interrupts to EICR 3914 */ 3915 switch (sc->hw.mac.type) { 3916 case e1000_82580: 3917 case e1000_i350: 3918 case e1000_i354: 3919 case e1000_vfadapt: 3920 case e1000_vfadapt_i350: 3921 case e1000_i210: 3922 case e1000_i211: 3923 /* RX entries */ 3924 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3925 rxr = &sc->rx_rings[i]; 3926 3927 index = i >> 1; 3928 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3929 3930 if (i & 1) { 3931 ivar &= 0xff00ffff; 3932 ivar |= 3933 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3934 } else { 3935 ivar &= 0xffffff00; 3936 ivar |= 3937 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3938 } 3939 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3940 } 3941 /* TX entries */ 3942 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3943 txr = &sc->tx_rings[i]; 3944 3945 index = i >> 1; 3946 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3947 3948 if (i & 1) { 3949 ivar &= 0x00ffffff; 3950 ivar |= 3951 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3952 } else { 3953 ivar &= 0xffff00ff; 3954 ivar |= 3955 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3956 } 3957 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3958 } 3959 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3960 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3961 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3962 } 3963 break; 3964 3965 case e1000_82576: 3966 /* RX entries */ 3967 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3968 rxr = &sc->rx_rings[i]; 3969 3970 index = i & 0x7; /* Each IVAR has two entries */ 3971 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3972 3973 if (i < 8) { 3974 ivar &= 0xffffff00; 3975 ivar |= 3976 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3977 } else { 3978 ivar &= 0xff00ffff; 3979 ivar |= 3980 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3981 } 3982 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3983 } 3984 /* TX entries */ 3985 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3986 txr = &sc->tx_rings[i]; 3987 3988 index = i & 0x7; /* Each IVAR has two entries */ 3989 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3990 3991 if (i < 8) { 3992 ivar &= 0xffff00ff; 3993 ivar |= 3994 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3995 } else { 3996 ivar &= 0x00ffffff; 3997 ivar |= 3998 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3999 } 4000 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4001 } 4002 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4003 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 4004 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4005 } 4006 break; 4007 4008 case e1000_82575: 4009 /* 4010 * Enable necessary interrupt bits. 4011 * 4012 * The name of the register is confusing; in addition to 4013 * configuring the first vector of MSI-X, it also configures 4014 * which bits of EICR could be set by the hardware even when 4015 * MSI or line interrupt is used; it thus controls interrupt 4016 * generation. It MUST be configured explicitly; the default 4017 * value mentioned in the datasheet is wrong: RX queue0 and 4018 * TX queue0 are NOT enabled by default. 4019 */ 4020 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 4021 break; 4022 4023 default: 4024 panic("unknown mac type %d\n", sc->hw.mac.type); 4025 } 4026 } 4027 4028 static int 4029 igb_setup_intr(struct igb_softc *sc) 4030 { 4031 int i; 4032 4033 for (i = 0; i < sc->intr_cnt; ++i) { 4034 struct igb_intr_data *intr = &sc->intr_data[i]; 4035 int error; 4036 4037 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 4038 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 4039 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 4040 if (error) { 4041 device_printf(sc->dev, "can't setup %dth intr\n", i); 4042 igb_teardown_intr(sc, i); 4043 return error; 4044 } 4045 } 4046 return 0; 4047 } 4048 4049 static void 4050 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 4051 { 4052 if (txr->sc->hw.mac.type == e1000_82575) { 4053 txr->tx_intr_vec = 0; /* unused */ 4054 switch (txr->me) { 4055 case 0: 4056 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 4057 break; 4058 case 1: 4059 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 4060 break; 4061 case 2: 4062 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4063 break; 4064 case 3: 4065 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4066 break; 4067 default: 4068 panic("unsupported # of TX ring, %d\n", txr->me); 4069 } 4070 } else { 4071 int intr_vec = *intr_vec0; 4072 4073 txr->tx_intr_vec = intr_vec % intr_vecmax; 4074 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4075 4076 *intr_vec0 = intr_vec + 1; 4077 } 4078 } 4079 4080 static void 4081 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 4082 { 4083 if (rxr->sc->hw.mac.type == e1000_82575) { 4084 rxr->rx_intr_vec = 0; /* unused */ 4085 switch (rxr->me) { 4086 case 0: 4087 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4088 break; 4089 case 1: 4090 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4091 break; 4092 case 2: 4093 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4094 break; 4095 case 3: 4096 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4097 break; 4098 default: 4099 panic("unsupported # of RX ring, %d\n", rxr->me); 4100 } 4101 } else { 4102 int intr_vec = *intr_vec0; 4103 4104 rxr->rx_intr_vec = intr_vec % intr_vecmax; 4105 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4106 4107 *intr_vec0 = intr_vec + 1; 4108 } 4109 } 4110 4111 static void 4112 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4113 { 4114 struct igb_softc *sc = ifp->if_softc; 4115 4116 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4117 } 4118 4119 static void 4120 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4121 { 4122 struct igb_softc *sc = ifp->if_softc; 4123 4124 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4125 } 4126 4127 static int 4128 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4129 { 4130 struct igb_softc *sc = ifp->if_softc; 4131 4132 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4133 slz); 4134 } 4135 4136 #ifdef INVARIANTS 4137 4138 static void 4139 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4140 boolean_t serialized) 4141 { 4142 struct igb_softc *sc = ifp->if_softc; 4143 4144 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4145 slz, serialized); 4146 } 4147 4148 #endif /* INVARIANTS */ 4149 4150 static void 4151 igb_set_intr_mask(struct igb_softc *sc) 4152 { 4153 int i; 4154 4155 sc->intr_mask = sc->sts_intr_mask; 4156 for (i = 0; i < sc->rx_ring_inuse; ++i) 4157 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4158 for (i = 0; i < sc->tx_ring_inuse; ++i) 4159 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4160 if (bootverbose) { 4161 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4162 sc->intr_mask); 4163 } 4164 } 4165 4166 static int 4167 igb_alloc_intr(struct igb_softc *sc) 4168 { 4169 struct igb_tx_ring *txr; 4170 struct igb_intr_data *intr; 4171 int i, intr_vec, intr_vecmax; 4172 u_int intr_flags; 4173 4174 igb_alloc_msix(sc); 4175 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4176 igb_set_ring_inuse(sc, FALSE); 4177 goto done; 4178 } 4179 4180 /* 4181 * Reset some settings changed by igb_alloc_msix(). 4182 */ 4183 if (sc->rx_rmap_intr != NULL) { 4184 if_ringmap_free(sc->rx_rmap_intr); 4185 sc->rx_rmap_intr = NULL; 4186 } 4187 if (sc->tx_rmap_intr != NULL) { 4188 if_ringmap_free(sc->tx_rmap_intr); 4189 sc->tx_rmap_intr = NULL; 4190 } 4191 if (sc->intr_data != NULL) { 4192 kfree(sc->intr_data, M_DEVBUF); 4193 sc->intr_data = NULL; 4194 } 4195 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4196 txr = &sc->tx_rings[i]; 4197 txr->tx_intr_vec = 0; 4198 txr->tx_intr_mask = 0; 4199 txr->tx_intr_cpuid = -1; 4200 } 4201 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4202 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4203 4204 rxr->rx_intr_vec = 0; 4205 rxr->rx_intr_mask = 0; 4206 rxr->rx_txr = NULL; 4207 } 4208 4209 sc->intr_cnt = 1; 4210 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4211 M_WAITOK | M_ZERO); 4212 intr = &sc->intr_data[0]; 4213 4214 /* 4215 * Allocate MSI/legacy interrupt resource 4216 */ 4217 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4218 &intr->intr_rid, &intr_flags); 4219 4220 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4221 int unshared; 4222 4223 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4224 if (!unshared) { 4225 sc->flags |= IGB_FLAG_SHARED_INTR; 4226 if (bootverbose) 4227 device_printf(sc->dev, "IRQ shared\n"); 4228 } else { 4229 intr_flags &= ~RF_SHAREABLE; 4230 if (bootverbose) 4231 device_printf(sc->dev, "IRQ unshared\n"); 4232 } 4233 } 4234 4235 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4236 &intr->intr_rid, intr_flags); 4237 if (intr->intr_res == NULL) { 4238 device_printf(sc->dev, "Unable to allocate bus resource: " 4239 "interrupt\n"); 4240 return ENXIO; 4241 } 4242 4243 intr->intr_serialize = &sc->main_serialize; 4244 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4245 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4246 igb_intr_shared : igb_intr; 4247 intr->intr_funcarg = sc; 4248 intr->intr_rate = IGB_INTR_RATE; 4249 intr->intr_use = IGB_INTR_USE_RXTX; 4250 4251 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 4252 4253 /* 4254 * Setup MSI/legacy interrupt mask 4255 */ 4256 switch (sc->hw.mac.type) { 4257 case e1000_82575: 4258 intr_vecmax = IGB_MAX_TXRXINT_82575; 4259 break; 4260 4261 case e1000_82576: 4262 intr_vecmax = IGB_MAX_TXRXINT_82576; 4263 break; 4264 4265 case e1000_82580: 4266 intr_vecmax = IGB_MAX_TXRXINT_82580; 4267 break; 4268 4269 case e1000_i350: 4270 intr_vecmax = IGB_MAX_TXRXINT_I350; 4271 break; 4272 4273 case e1000_i354: 4274 intr_vecmax = IGB_MAX_TXRXINT_I354; 4275 break; 4276 4277 case e1000_i210: 4278 intr_vecmax = IGB_MAX_TXRXINT_I210; 4279 break; 4280 4281 case e1000_i211: 4282 intr_vecmax = IGB_MAX_TXRXINT_I211; 4283 break; 4284 4285 default: 4286 intr_vecmax = IGB_MIN_TXRXINT; 4287 break; 4288 } 4289 intr_vec = 0; 4290 for (i = 0; i < sc->tx_ring_cnt; ++i) 4291 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4292 for (i = 0; i < sc->rx_ring_cnt; ++i) 4293 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4294 sc->sts_intr_mask = E1000_EICR_OTHER; 4295 4296 igb_set_ring_inuse(sc, FALSE); 4297 KKASSERT(sc->rx_ring_inuse <= IGB_MIN_RING_RSS); 4298 if (sc->rx_ring_inuse == IGB_MIN_RING_RSS) { 4299 /* 4300 * Allocate RX ring map for RSS setup. 4301 */ 4302 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, 4303 IGB_MIN_RING_RSS, IGB_MIN_RING_RSS); 4304 KASSERT(if_ringmap_count(sc->rx_rmap_intr) == 4305 sc->rx_ring_inuse, ("RX ring inuse mismatch")); 4306 } 4307 done: 4308 igb_set_intr_mask(sc); 4309 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4310 txr = &sc->tx_rings[i]; 4311 if (txr->tx_intr_cpuid < 0) 4312 txr->tx_intr_cpuid = 0; 4313 } 4314 return 0; 4315 } 4316 4317 static void 4318 igb_free_intr(struct igb_softc *sc) 4319 { 4320 if (sc->intr_data == NULL) 4321 return; 4322 4323 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4324 struct igb_intr_data *intr = &sc->intr_data[0]; 4325 4326 KKASSERT(sc->intr_cnt == 1); 4327 if (intr->intr_res != NULL) { 4328 bus_release_resource(sc->dev, SYS_RES_IRQ, 4329 intr->intr_rid, intr->intr_res); 4330 } 4331 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4332 pci_release_msi(sc->dev); 4333 4334 kfree(sc->intr_data, M_DEVBUF); 4335 } else { 4336 igb_free_msix(sc, TRUE); 4337 } 4338 } 4339 4340 static void 4341 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4342 { 4343 int i; 4344 4345 if (sc->intr_data == NULL) 4346 return; 4347 4348 for (i = 0; i < intr_cnt; ++i) { 4349 struct igb_intr_data *intr = &sc->intr_data[i]; 4350 4351 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4352 } 4353 } 4354 4355 static void 4356 igb_alloc_msix(struct igb_softc *sc) 4357 { 4358 int msix_enable, msix_cnt, msix_ring, alloc_cnt; 4359 int i, x, error; 4360 int ring_cnt, ring_cntmax; 4361 struct igb_intr_data *intr; 4362 boolean_t setup = FALSE; 4363 4364 /* 4365 * Don't enable MSI-X on 82575, see: 4366 * 82575 specification update errata #25 4367 */ 4368 if (sc->hw.mac.type == e1000_82575) 4369 return; 4370 4371 /* Don't enable MSI-X on VF */ 4372 if (sc->vf_ifp) 4373 return; 4374 4375 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4376 igb_msix_enable); 4377 if (!msix_enable) 4378 return; 4379 4380 msix_cnt = pci_msix_count(sc->dev); 4381 #ifdef IGB_MSIX_DEBUG 4382 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4383 #endif 4384 if (msix_cnt <= 1) { 4385 /* One MSI-X model does not make sense. */ 4386 return; 4387 } 4388 if (bootverbose) 4389 device_printf(sc->dev, "MSI-X count %d\n", msix_cnt); 4390 msix_ring = msix_cnt - 1; /* -1 for status */ 4391 4392 /* 4393 * Configure # of RX/TX rings usable by MSI-X. 4394 */ 4395 igb_get_rxring_cnt(sc, &ring_cnt, &ring_cntmax); 4396 if (ring_cntmax > msix_ring) 4397 ring_cntmax = msix_ring; 4398 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4399 4400 igb_get_txring_cnt(sc, &ring_cnt, &ring_cntmax); 4401 if (ring_cntmax > msix_ring) 4402 ring_cntmax = msix_ring; 4403 sc->tx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4404 4405 if_ringmap_match(sc->dev, sc->rx_rmap_intr, sc->tx_rmap_intr); 4406 sc->rx_ring_msix = if_ringmap_count(sc->rx_rmap_intr); 4407 KASSERT(sc->rx_ring_msix <= sc->rx_ring_cnt, 4408 ("total RX ring count %d, MSI-X RX ring count %d", 4409 sc->rx_ring_cnt, sc->rx_ring_msix)); 4410 sc->tx_ring_msix = if_ringmap_count(sc->tx_rmap_intr); 4411 KASSERT(sc->tx_ring_msix <= sc->tx_ring_cnt, 4412 ("total TX ring count %d, MSI-X TX ring count %d", 4413 sc->tx_ring_cnt, sc->tx_ring_msix)); 4414 4415 /* 4416 * Aggregate TX/RX MSI-X 4417 */ 4418 ring_cntmax = sc->rx_ring_msix; 4419 if (ring_cntmax < sc->tx_ring_msix) 4420 ring_cntmax = sc->tx_ring_msix; 4421 KASSERT(ring_cntmax <= msix_ring, 4422 ("invalid ring count max %d, MSI-X count for rings %d", 4423 ring_cntmax, msix_ring)); 4424 4425 alloc_cnt = ring_cntmax + 1; /* +1 for status */ 4426 if (bootverbose) { 4427 device_printf(sc->dev, "MSI-X alloc %d, " 4428 "RX ring %d, TX ring %d\n", alloc_cnt, 4429 sc->rx_ring_msix, sc->tx_ring_msix); 4430 } 4431 4432 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4433 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4434 &sc->msix_mem_rid, RF_ACTIVE); 4435 if (sc->msix_mem_res == NULL) { 4436 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4437 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4438 &sc->msix_mem_rid, RF_ACTIVE); 4439 if (sc->msix_mem_res == NULL) { 4440 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4441 return; 4442 } 4443 } 4444 4445 sc->intr_cnt = alloc_cnt; 4446 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4447 M_DEVBUF, M_WAITOK | M_ZERO); 4448 for (x = 0; x < sc->intr_cnt; ++x) { 4449 intr = &sc->intr_data[x]; 4450 intr->intr_rid = -1; 4451 intr->intr_rate = IGB_INTR_RATE; 4452 } 4453 4454 x = 0; 4455 for (i = 0; i < sc->rx_ring_msix; ++i) { 4456 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4457 struct igb_tx_ring *txr = NULL; 4458 int cpuid, j; 4459 4460 KKASSERT(x < sc->intr_cnt); 4461 rxr->rx_intr_vec = x; 4462 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4463 4464 cpuid = if_ringmap_cpumap(sc->rx_rmap_intr, i); 4465 4466 /* 4467 * Try finding TX ring to piggyback. 4468 */ 4469 for (j = 0; j < sc->tx_ring_msix; ++j) { 4470 if (cpuid == 4471 if_ringmap_cpumap(sc->tx_rmap_intr, j)) { 4472 txr = &sc->tx_rings[j]; 4473 KKASSERT(txr->tx_intr_cpuid < 0); 4474 break; 4475 } 4476 } 4477 rxr->rx_txr = txr; 4478 4479 intr = &sc->intr_data[x++]; 4480 intr->intr_serialize = &rxr->rx_serialize; 4481 intr->intr_cpuid = cpuid; 4482 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4483 intr->intr_funcarg = rxr; 4484 if (txr != NULL) { 4485 intr->intr_func = igb_msix_rxtx; 4486 intr->intr_use = IGB_INTR_USE_RXTX; 4487 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4488 "%s rx%dtx%d", device_get_nameunit(sc->dev), 4489 i, txr->me); 4490 4491 txr->tx_intr_vec = rxr->rx_intr_vec; 4492 txr->tx_intr_mask = rxr->rx_intr_mask; 4493 txr->tx_intr_cpuid = intr->intr_cpuid; 4494 } else { 4495 intr->intr_func = igb_msix_rx; 4496 intr->intr_rate = IGB_MSIX_RX_RATE; 4497 intr->intr_use = IGB_INTR_USE_RX; 4498 4499 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4500 "%s rx%d", device_get_nameunit(sc->dev), i); 4501 } 4502 intr->intr_desc = intr->intr_desc0; 4503 } 4504 4505 for (i = 0; i < sc->tx_ring_msix; ++i) { 4506 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4507 4508 if (txr->tx_intr_cpuid >= 0) { 4509 /* Piggybacked by RX ring. */ 4510 continue; 4511 } 4512 4513 KKASSERT(x < sc->intr_cnt); 4514 txr->tx_intr_vec = x; 4515 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4516 4517 intr = &sc->intr_data[x++]; 4518 intr->intr_serialize = &txr->tx_serialize; 4519 intr->intr_func = igb_msix_tx; 4520 intr->intr_funcarg = txr; 4521 intr->intr_rate = IGB_MSIX_TX_RATE; 4522 intr->intr_use = IGB_INTR_USE_TX; 4523 4524 intr->intr_cpuid = if_ringmap_cpumap(sc->tx_rmap_intr, i); 4525 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4526 txr->tx_intr_cpuid = intr->intr_cpuid; 4527 4528 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4529 device_get_nameunit(sc->dev), i); 4530 intr->intr_desc = intr->intr_desc0; 4531 } 4532 4533 /* 4534 * Link status 4535 */ 4536 KKASSERT(x < sc->intr_cnt); 4537 sc->sts_msix_vec = x; 4538 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4539 4540 intr = &sc->intr_data[x++]; 4541 intr->intr_serialize = &sc->main_serialize; 4542 intr->intr_func = igb_msix_status; 4543 intr->intr_funcarg = sc; 4544 intr->intr_cpuid = 0; 4545 intr->intr_use = IGB_INTR_USE_STATUS; 4546 4547 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4548 device_get_nameunit(sc->dev)); 4549 intr->intr_desc = intr->intr_desc0; 4550 4551 KKASSERT(x == sc->intr_cnt); 4552 4553 error = pci_setup_msix(sc->dev); 4554 if (error) { 4555 device_printf(sc->dev, "Setup MSI-X failed\n"); 4556 goto back; 4557 } 4558 setup = TRUE; 4559 4560 for (i = 0; i < sc->intr_cnt; ++i) { 4561 intr = &sc->intr_data[i]; 4562 4563 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4564 intr->intr_cpuid); 4565 if (error) { 4566 device_printf(sc->dev, 4567 "Unable to allocate MSI-X %d on cpu%d\n", i, 4568 intr->intr_cpuid); 4569 goto back; 4570 } 4571 4572 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4573 &intr->intr_rid, RF_ACTIVE); 4574 if (intr->intr_res == NULL) { 4575 device_printf(sc->dev, 4576 "Unable to allocate MSI-X %d resource\n", i); 4577 error = ENOMEM; 4578 goto back; 4579 } 4580 } 4581 4582 pci_enable_msix(sc->dev); 4583 sc->intr_type = PCI_INTR_TYPE_MSIX; 4584 back: 4585 if (error) 4586 igb_free_msix(sc, setup); 4587 } 4588 4589 static void 4590 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4591 { 4592 int i; 4593 4594 KKASSERT(sc->intr_cnt > 1); 4595 4596 for (i = 0; i < sc->intr_cnt; ++i) { 4597 struct igb_intr_data *intr = &sc->intr_data[i]; 4598 4599 if (intr->intr_res != NULL) { 4600 bus_release_resource(sc->dev, SYS_RES_IRQ, 4601 intr->intr_rid, intr->intr_res); 4602 } 4603 if (intr->intr_rid >= 0) 4604 pci_release_msix_vector(sc->dev, intr->intr_rid); 4605 } 4606 if (setup) 4607 pci_teardown_msix(sc->dev); 4608 4609 sc->intr_cnt = 0; 4610 kfree(sc->intr_data, M_DEVBUF); 4611 sc->intr_data = NULL; 4612 } 4613 4614 static void 4615 igb_msix_rx(void *arg) 4616 { 4617 struct igb_rx_ring *rxr = arg; 4618 4619 ASSERT_SERIALIZED(&rxr->rx_serialize); 4620 igb_rxeof(rxr, -1); 4621 4622 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4623 } 4624 4625 static void 4626 igb_msix_tx(void *arg) 4627 { 4628 struct igb_tx_ring *txr = arg; 4629 4630 ASSERT_SERIALIZED(&txr->tx_serialize); 4631 4632 igb_tx_intr(txr, *(txr->tx_hdr)); 4633 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4634 } 4635 4636 static void 4637 igb_msix_status(void *arg) 4638 { 4639 struct igb_softc *sc = arg; 4640 uint32_t icr; 4641 4642 ASSERT_SERIALIZED(&sc->main_serialize); 4643 4644 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4645 if (icr & E1000_ICR_LSC) { 4646 sc->hw.mac.get_link_status = 1; 4647 igb_update_link_status(sc); 4648 } 4649 4650 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4651 } 4652 4653 static void 4654 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4655 { 4656 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4657 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4658 if (bootverbose) { 4659 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4660 sc->rx_ring_inuse, sc->rx_ring_cnt, 4661 sc->tx_ring_inuse, sc->tx_ring_cnt); 4662 } 4663 } 4664 4665 static int 4666 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4667 { 4668 if (!IGB_ENABLE_HWRSS(sc)) 4669 return 1; 4670 4671 if (polling) 4672 return sc->rx_ring_cnt; 4673 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4674 return IGB_MIN_RING_RSS; 4675 else 4676 return sc->rx_ring_msix; 4677 } 4678 4679 static int 4680 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4681 { 4682 if (!IGB_ENABLE_HWTSS(sc)) 4683 return 1; 4684 4685 if (polling) 4686 return sc->tx_ring_cnt; 4687 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4688 return IGB_MIN_RING; 4689 else 4690 return sc->tx_ring_msix; 4691 } 4692 4693 static int 4694 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4695 { 4696 int hoff, iphlen, thoff; 4697 struct mbuf *m; 4698 4699 m = *mp; 4700 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4701 4702 iphlen = m->m_pkthdr.csum_iphlen; 4703 thoff = m->m_pkthdr.csum_thlen; 4704 hoff = m->m_pkthdr.csum_lhlen; 4705 4706 KASSERT(iphlen > 0, ("invalid ip hlen")); 4707 KASSERT(thoff > 0, ("invalid tcp hlen")); 4708 KASSERT(hoff > 0, ("invalid ether hlen")); 4709 4710 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4711 m = m_pullup(m, hoff + iphlen + thoff); 4712 if (m == NULL) { 4713 *mp = NULL; 4714 return ENOBUFS; 4715 } 4716 *mp = m; 4717 } 4718 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4719 struct ip *ip; 4720 4721 ip = mtodoff(m, struct ip *, hoff); 4722 ip->ip_len = 0; 4723 } 4724 4725 return 0; 4726 } 4727 4728 static void 4729 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4730 { 4731 struct e1000_adv_tx_context_desc *TXD; 4732 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4733 int hoff, ctxd, iphlen, thoff; 4734 4735 iphlen = m->m_pkthdr.csum_iphlen; 4736 thoff = m->m_pkthdr.csum_thlen; 4737 hoff = m->m_pkthdr.csum_lhlen; 4738 4739 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4740 4741 ctxd = txr->next_avail_desc; 4742 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4743 4744 if (m->m_flags & M_VLANTAG) { 4745 uint16_t vlantag; 4746 4747 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4748 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4749 } 4750 4751 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4752 vlan_macip_lens |= iphlen; 4753 4754 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4755 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4756 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4757 4758 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4759 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4760 4761 /* 4762 * 82575 needs the TX context index added; the queue 4763 * index is used as TX context index here. 4764 */ 4765 if (txr->sc->hw.mac.type == e1000_82575) 4766 mss_l4len_idx |= txr->me << 4; 4767 4768 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4769 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4770 TXD->seqnum_seed = htole32(0); 4771 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4772 4773 /* We've consumed the first desc, adjust counters */ 4774 if (++ctxd == txr->num_tx_desc) 4775 ctxd = 0; 4776 txr->next_avail_desc = ctxd; 4777 --txr->tx_avail; 4778 4779 *hlen = hoff + iphlen + thoff; 4780 } 4781 4782 static void 4783 igb_setup_serialize(struct igb_softc *sc) 4784 { 4785 int i = 0, j; 4786 4787 /* Main + RX + TX */ 4788 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4789 sc->serializes = 4790 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4791 M_DEVBUF, M_WAITOK | M_ZERO); 4792 4793 /* 4794 * Setup serializes 4795 * 4796 * NOTE: Order is critical 4797 */ 4798 4799 KKASSERT(i < sc->serialize_cnt); 4800 sc->serializes[i++] = &sc->main_serialize; 4801 4802 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4803 KKASSERT(i < sc->serialize_cnt); 4804 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4805 } 4806 4807 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4808 KKASSERT(i < sc->serialize_cnt); 4809 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4810 } 4811 4812 KKASSERT(i == sc->serialize_cnt); 4813 } 4814 4815 static void 4816 igb_msix_rxtx(void *arg) 4817 { 4818 struct igb_rx_ring *rxr = arg; 4819 struct igb_tx_ring *txr; 4820 int hdr; 4821 4822 ASSERT_SERIALIZED(&rxr->rx_serialize); 4823 4824 igb_rxeof(rxr, -1); 4825 4826 /* 4827 * NOTE: 4828 * Since next_to_clean is only changed by igb_txeof(), 4829 * which is called only in interrupt handler, the 4830 * check w/o holding tx serializer is MPSAFE. 4831 */ 4832 txr = rxr->rx_txr; 4833 hdr = *(txr->tx_hdr); 4834 if (hdr != txr->next_to_clean) { 4835 lwkt_serialize_enter(&txr->tx_serialize); 4836 igb_tx_intr(txr, hdr); 4837 lwkt_serialize_exit(&txr->tx_serialize); 4838 } 4839 4840 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4841 } 4842 4843 static void 4844 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4845 { 4846 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4847 sc->timer_cpuid = 0; /* XXX fixed */ 4848 else 4849 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4850 } 4851 4852 static void 4853 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4854 { 4855 struct e1000_hw *hw = &sc->hw; 4856 uint32_t reg; 4857 4858 if (hw->mac.type == e1000_i211) 4859 return; 4860 4861 if (hw->mac.type > e1000_82580) { 4862 uint32_t dmac; 4863 uint16_t hwm; 4864 4865 if (sc->dma_coalesce == 0) { /* Disabling it */ 4866 reg = ~E1000_DMACR_DMAC_EN; 4867 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4868 return; 4869 } else { 4870 if_printf(&sc->arpcom.ac_if, 4871 "DMA Coalescing enabled\n"); 4872 } 4873 4874 /* Set starting threshold */ 4875 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4876 4877 hwm = 64 * pba - sc->max_frame_size / 16; 4878 if (hwm < 64 * (pba - 6)) 4879 hwm = 64 * (pba - 6); 4880 reg = E1000_READ_REG(hw, E1000_FCRTC); 4881 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4882 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4883 & E1000_FCRTC_RTH_COAL_MASK); 4884 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4885 4886 dmac = pba - sc->max_frame_size / 512; 4887 if (dmac < pba - 10) 4888 dmac = pba - 10; 4889 reg = E1000_READ_REG(hw, E1000_DMACR); 4890 reg &= ~E1000_DMACR_DMACTHR_MASK; 4891 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4892 & E1000_DMACR_DMACTHR_MASK); 4893 4894 /* transition to L0x or L1 if available..*/ 4895 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 4896 4897 /* 4898 * Check if status is 2.5Gb backplane connection 4899 * before configuration of watchdog timer, which 4900 * is in msec values in 12.8usec intervals watchdog 4901 * timer = msec values in 32usec intervals for non 4902 * 2.5Gb connection. 4903 */ 4904 if (hw->mac.type == e1000_i354) { 4905 int status = E1000_READ_REG(hw, E1000_STATUS); 4906 4907 if ((status & E1000_STATUS_2P5_SKU) && 4908 !(status & E1000_STATUS_2P5_SKU_OVER)) 4909 reg |= ((sc->dma_coalesce * 5) >> 6); 4910 else 4911 reg |= (sc->dma_coalesce >> 5); 4912 } else { 4913 reg |= (sc->dma_coalesce >> 5); 4914 } 4915 4916 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4917 4918 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 4919 4920 /* Set the interval before transition */ 4921 reg = E1000_READ_REG(hw, E1000_DMCTLX); 4922 if (hw->mac.type == e1000_i350) 4923 reg |= IGB_DMCTLX_DCFLUSH_DIS; 4924 /* 4925 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 4926 * is 0x4*2 = 0xA. But delay is still 4 usec. 4927 */ 4928 if (hw->mac.type == e1000_i354) { 4929 int status = E1000_READ_REG(hw, E1000_STATUS); 4930 4931 if ((status & E1000_STATUS_2P5_SKU) && 4932 !(status & E1000_STATUS_2P5_SKU_OVER)) 4933 reg |= 0xA; 4934 else 4935 reg |= 0x4; 4936 } else { 4937 reg |= 0x4; 4938 } 4939 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 4940 4941 /* Free space in tx packet buffer to wake from DMA coal */ 4942 E1000_WRITE_REG(hw, E1000_DMCTXTH, 4943 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 4944 4945 /* Make low power state decision controlled by DMA coal */ 4946 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4947 reg &= ~E1000_PCIEMISC_LX_DECISION; 4948 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 4949 } else if (hw->mac.type == e1000_82580) { 4950 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4951 E1000_WRITE_REG(hw, E1000_PCIEMISC, 4952 reg & ~E1000_PCIEMISC_LX_DECISION); 4953 E1000_WRITE_REG(hw, E1000_DMACR, 0); 4954 } 4955 } 4956