1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/if_ringmap.h> 59 #include <net/toeplitz.h> 60 #include <net/toeplitz2.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 #include <net/if_poll.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/ig_hal/e1000_dragonfly.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_COPPER_FLASHLESS), 128 IGB_DEVICE(I210_SERDES_FLASHLESS), 129 IGB_DEVICE(I210_FIBER), 130 IGB_DEVICE(I210_SERDES), 131 IGB_DEVICE(I210_SGMII), 132 IGB_DEVICE(I211_COPPER), 133 IGB_DEVICE(I354_BACKPLANE_1GBPS), 134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 135 IGB_DEVICE(I354_SGMII), 136 137 /* required last entry */ 138 IGB_DEVICE_NULL 139 }; 140 141 static int igb_probe(device_t); 142 static int igb_attach(device_t); 143 static int igb_detach(device_t); 144 static int igb_shutdown(device_t); 145 static int igb_suspend(device_t); 146 static int igb_resume(device_t); 147 148 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 149 static void igb_setup_ifp(struct igb_softc *); 150 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 151 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 152 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 153 static void igb_add_sysctl(struct igb_softc *); 154 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 155 const char *, const char *); 156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 160 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 161 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 162 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 163 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 164 165 static void igb_vf_init_stats(struct igb_softc *); 166 static void igb_reset(struct igb_softc *, boolean_t); 167 static void igb_update_stats_counters(struct igb_softc *); 168 static void igb_update_vf_stats_counters(struct igb_softc *); 169 static void igb_update_link_status(struct igb_softc *); 170 static void igb_init_tx_unit(struct igb_softc *); 171 static void igb_init_rx_unit(struct igb_softc *, boolean_t); 172 static void igb_init_dmac(struct igb_softc *, uint32_t); 173 174 static void igb_set_vlan(struct igb_softc *); 175 static void igb_set_multi(struct igb_softc *); 176 static void igb_set_promisc(struct igb_softc *); 177 static void igb_disable_promisc(struct igb_softc *); 178 179 static int igb_get_ring_max(const struct igb_softc *); 180 static void igb_get_rxring_cnt(const struct igb_softc *, int *, int *); 181 static void igb_get_txring_cnt(const struct igb_softc *, int *, int *); 182 static int igb_alloc_rings(struct igb_softc *); 183 static void igb_free_rings(struct igb_softc *); 184 static int igb_create_tx_ring(struct igb_tx_ring *); 185 static int igb_create_rx_ring(struct igb_rx_ring *); 186 static void igb_free_tx_ring(struct igb_tx_ring *); 187 static void igb_free_rx_ring(struct igb_rx_ring *); 188 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 189 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 190 static void igb_init_tx_ring(struct igb_tx_ring *); 191 static int igb_init_rx_ring(struct igb_rx_ring *); 192 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 193 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 194 static void igb_rx_refresh(struct igb_rx_ring *, int); 195 static void igb_setup_serialize(struct igb_softc *); 196 197 static void igb_stop(struct igb_softc *); 198 static void igb_init(void *); 199 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 200 static void igb_media_status(struct ifnet *, struct ifmediareq *); 201 static int igb_media_change(struct ifnet *); 202 static void igb_timer(void *); 203 static void igb_watchdog(struct ifaltq_subque *); 204 static void igb_start(struct ifnet *, struct ifaltq_subque *); 205 #ifdef IFPOLL_ENABLE 206 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 207 static void igb_npoll_rx(struct ifnet *, void *, int); 208 static void igb_npoll_tx(struct ifnet *, void *, int); 209 static void igb_npoll_status(struct ifnet *); 210 #endif 211 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 212 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 213 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 214 #ifdef INVARIANTS 215 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 216 boolean_t); 217 #endif 218 219 static void igb_intr(void *); 220 static void igb_intr_shared(void *); 221 static void igb_rxeof(struct igb_rx_ring *, int); 222 static void igb_txeof(struct igb_tx_ring *, int); 223 static void igb_txgc(struct igb_tx_ring *); 224 static void igb_txgc_timer(void *); 225 static void igb_set_eitr(struct igb_softc *, int, int); 226 static void igb_enable_intr(struct igb_softc *); 227 static void igb_disable_intr(struct igb_softc *); 228 static void igb_init_unshared_intr(struct igb_softc *); 229 static void igb_init_intr(struct igb_softc *); 230 static int igb_setup_intr(struct igb_softc *); 231 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 232 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 233 static void igb_set_intr_mask(struct igb_softc *); 234 static int igb_alloc_intr(struct igb_softc *); 235 static void igb_free_intr(struct igb_softc *); 236 static void igb_teardown_intr(struct igb_softc *, int); 237 static void igb_alloc_msix(struct igb_softc *); 238 static void igb_free_msix(struct igb_softc *, boolean_t); 239 static void igb_msix_rx(void *); 240 static void igb_msix_tx(void *); 241 static void igb_msix_status(void *); 242 static void igb_msix_rxtx(void *); 243 244 /* Management and WOL Support */ 245 static void igb_get_mgmt(struct igb_softc *); 246 static void igb_rel_mgmt(struct igb_softc *); 247 static void igb_get_hw_control(struct igb_softc *); 248 static void igb_rel_hw_control(struct igb_softc *); 249 static void igb_enable_wol(device_t); 250 251 static device_method_t igb_methods[] = { 252 /* Device interface */ 253 DEVMETHOD(device_probe, igb_probe), 254 DEVMETHOD(device_attach, igb_attach), 255 DEVMETHOD(device_detach, igb_detach), 256 DEVMETHOD(device_shutdown, igb_shutdown), 257 DEVMETHOD(device_suspend, igb_suspend), 258 DEVMETHOD(device_resume, igb_resume), 259 DEVMETHOD_END 260 }; 261 262 static driver_t igb_driver = { 263 "igb", 264 igb_methods, 265 sizeof(struct igb_softc), 266 }; 267 268 static devclass_t igb_devclass; 269 270 DECLARE_DUMMY_MODULE(if_igb); 271 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 272 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 273 274 static int igb_rxd = IGB_DEFAULT_RXD; 275 static int igb_txd = IGB_DEFAULT_TXD; 276 static int igb_rxr = 0; 277 static int igb_txr = 0; 278 static int igb_msi_enable = 1; 279 static int igb_msix_enable = 1; 280 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 281 282 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 283 284 /* 285 * DMA Coalescing, only for i350 - default to off, 286 * this feature is for power savings 287 */ 288 static int igb_dma_coalesce = 0; 289 290 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 291 TUNABLE_INT("hw.igb.txd", &igb_txd); 292 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 293 TUNABLE_INT("hw.igb.txr", &igb_txr); 294 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 295 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 296 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 297 298 /* i350 specific */ 299 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 300 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 301 302 static __inline void 303 igb_tx_intr(struct igb_tx_ring *txr, int hdr) 304 { 305 306 igb_txeof(txr, hdr); 307 if (!ifsq_is_empty(txr->ifsq)) 308 ifsq_devstart(txr->ifsq); 309 } 310 311 static __inline void 312 igb_try_txgc(struct igb_tx_ring *txr, int16_t dec) 313 { 314 315 if (txr->tx_running > 0) { 316 txr->tx_running -= dec; 317 if (txr->tx_running <= 0 && txr->tx_nmbuf && 318 txr->tx_avail < txr->num_tx_desc && 319 txr->tx_avail + txr->intr_nsegs > txr->num_tx_desc) 320 igb_txgc(txr); 321 } 322 } 323 324 static void 325 igb_txgc_timer(void *xtxr) 326 { 327 struct igb_tx_ring *txr = xtxr; 328 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 329 330 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 331 (IFF_RUNNING | IFF_UP)) 332 return; 333 334 if (!lwkt_serialize_try(&txr->tx_serialize)) 335 goto done; 336 337 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 338 (IFF_RUNNING | IFF_UP)) { 339 lwkt_serialize_exit(&txr->tx_serialize); 340 return; 341 } 342 igb_try_txgc(txr, IGB_TX_RUNNING_DEC); 343 344 lwkt_serialize_exit(&txr->tx_serialize); 345 done: 346 callout_reset(&txr->tx_gc_timer, 1, igb_txgc_timer, txr); 347 } 348 349 static __inline void 350 igb_free_txbuf(struct igb_tx_ring *txr, struct igb_tx_buf *txbuf) 351 { 352 353 KKASSERT(txbuf->m_head != NULL); 354 KKASSERT(txr->tx_nmbuf > 0); 355 txr->tx_nmbuf--; 356 357 bus_dmamap_unload(txr->tx_tag, txbuf->map); 358 m_freem(txbuf->m_head); 359 txbuf->m_head = NULL; 360 } 361 362 static __inline void 363 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 364 { 365 /* Ignore Checksum bit is set */ 366 if (staterr & E1000_RXD_STAT_IXSM) 367 return; 368 369 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 370 E1000_RXD_STAT_IPCS) 371 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 372 373 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 374 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 375 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 376 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 377 mp->m_pkthdr.csum_data = htons(0xffff); 378 } 379 } 380 } 381 382 static __inline struct pktinfo * 383 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 384 uint32_t hash, uint32_t hashtype, uint32_t staterr) 385 { 386 switch (hashtype) { 387 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 388 pi->pi_netisr = NETISR_IP; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_TCP; 391 break; 392 393 case E1000_RXDADV_RSSTYPE_IPV4: 394 if (staterr & E1000_RXD_STAT_IXSM) 395 return NULL; 396 397 if ((staterr & 398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 399 E1000_RXD_STAT_TCPCS) { 400 pi->pi_netisr = NETISR_IP; 401 pi->pi_flags = 0; 402 pi->pi_l3proto = IPPROTO_UDP; 403 break; 404 } 405 /* FALL THROUGH */ 406 default: 407 return NULL; 408 } 409 410 m_sethash(m, toeplitz_hash(hash)); 411 return pi; 412 } 413 414 static int 415 igb_get_ring_max(const struct igb_softc *sc) 416 { 417 418 switch (sc->hw.mac.type) { 419 case e1000_82575: 420 return (IGB_MAX_RING_82575); 421 422 case e1000_82576: 423 return (IGB_MAX_RING_82576); 424 425 case e1000_82580: 426 return (IGB_MAX_RING_82580); 427 428 case e1000_i350: 429 return (IGB_MAX_RING_I350); 430 431 case e1000_i354: 432 return (IGB_MAX_RING_I354); 433 434 case e1000_i210: 435 return (IGB_MAX_RING_I210); 436 437 case e1000_i211: 438 return (IGB_MAX_RING_I211); 439 440 default: 441 return (IGB_MIN_RING); 442 } 443 } 444 445 static void 446 igb_get_rxring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 447 { 448 449 *ring_max = igb_get_ring_max(sc); 450 *ring_cnt = device_getenv_int(sc->dev, "rxr", igb_rxr); 451 } 452 453 static void 454 igb_get_txring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 455 { 456 457 *ring_max = igb_get_ring_max(sc); 458 *ring_cnt = device_getenv_int(sc->dev, "txr", igb_txr); 459 } 460 461 static int 462 igb_probe(device_t dev) 463 { 464 const struct igb_device *d; 465 uint16_t vid, did; 466 467 vid = pci_get_vendor(dev); 468 did = pci_get_device(dev); 469 470 for (d = igb_devices; d->desc != NULL; ++d) { 471 if (vid == d->vid && did == d->did) { 472 device_set_desc(dev, d->desc); 473 return 0; 474 } 475 } 476 return ENXIO; 477 } 478 479 static int 480 igb_attach(device_t dev) 481 { 482 struct igb_softc *sc = device_get_softc(dev); 483 uint16_t eeprom_data; 484 int error = 0, ring_max, ring_cnt; 485 char flowctrl[IFM_ETH_FC_STRLEN]; 486 487 #ifdef notyet 488 /* SYSCTL stuff */ 489 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 490 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 491 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 492 igb_sysctl_nvm_info, "I", "NVM Information"); 493 #endif 494 495 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 496 igb_media_change, igb_media_status); 497 callout_init_mp(&sc->timer); 498 lwkt_serialize_init(&sc->main_serialize); 499 500 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 501 device_get_unit(dev)); 502 sc->dev = sc->osdep.dev = dev; 503 504 /* Enable bus mastering */ 505 pci_enable_busmaster(dev); 506 507 /* 508 * Determine hardware and mac type 509 */ 510 sc->hw.vendor_id = pci_get_vendor(dev); 511 sc->hw.device_id = pci_get_device(dev); 512 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 513 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 514 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 515 516 if (e1000_set_mac_type(&sc->hw)) 517 return ENXIO; 518 519 /* Are we a VF device? */ 520 if (sc->hw.mac.type == e1000_vfadapt || 521 sc->hw.mac.type == e1000_vfadapt_i350) 522 sc->vf_ifp = 1; 523 else 524 sc->vf_ifp = 0; 525 526 /* 527 * Configure total supported RX/TX ring count 528 */ 529 igb_get_rxring_cnt(sc, &ring_cnt, &ring_max); 530 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 531 igb_get_txring_cnt(sc, &ring_cnt, &ring_max); 532 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 533 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 534 535 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 536 sc->rx_ring_inuse = sc->rx_ring_cnt; 537 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 538 sc->tx_ring_inuse = sc->tx_ring_cnt; 539 540 /* Setup flow control. */ 541 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 542 igb_flowctrl); 543 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 544 545 /* 546 * Allocate IO memory 547 */ 548 sc->mem_rid = PCIR_BAR(0); 549 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 550 RF_ACTIVE); 551 if (sc->mem_res == NULL) { 552 device_printf(dev, "Unable to allocate bus resource: memory\n"); 553 error = ENXIO; 554 goto failed; 555 } 556 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 557 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 558 559 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 560 561 /* Save PCI command register for Shared Code */ 562 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 563 sc->hw.back = &sc->osdep; 564 565 /* Do Shared Code initialization */ 566 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 567 device_printf(dev, "Setup of Shared code failed\n"); 568 error = ENXIO; 569 goto failed; 570 } 571 572 e1000_get_bus_info(&sc->hw); 573 574 sc->hw.mac.autoneg = DO_AUTO_NEG; 575 sc->hw.phy.autoneg_wait_to_complete = FALSE; 576 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 577 578 /* Copper options */ 579 if (sc->hw.phy.media_type == e1000_media_type_copper) { 580 sc->hw.phy.mdix = AUTO_ALL_MODES; 581 sc->hw.phy.disable_polarity_correction = FALSE; 582 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 583 } 584 585 /* Set the frame limits assuming standard ethernet sized frames. */ 586 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 587 588 /* Allocate RX/TX rings */ 589 error = igb_alloc_rings(sc); 590 if (error) 591 goto failed; 592 593 /* Allocate interrupt */ 594 error = igb_alloc_intr(sc); 595 if (error) 596 goto failed; 597 598 /* Setup serializes */ 599 igb_setup_serialize(sc); 600 601 /* Allocate the appropriate stats memory */ 602 if (sc->vf_ifp) { 603 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 604 M_WAITOK | M_ZERO); 605 igb_vf_init_stats(sc); 606 } else { 607 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 608 M_WAITOK | M_ZERO); 609 } 610 611 /* Allocate multicast array memory. */ 612 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 613 M_DEVBUF, M_WAITOK); 614 615 /* Some adapter-specific advanced features */ 616 if (sc->hw.mac.type >= e1000_i350) { 617 #ifdef notyet 618 igb_set_sysctl_value(adapter, "dma_coalesce", 619 "configure dma coalesce", 620 &adapter->dma_coalesce, igb_dma_coalesce); 621 igb_set_sysctl_value(adapter, "eee_disabled", 622 "enable Energy Efficient Ethernet", 623 &adapter->hw.dev_spec._82575.eee_disable, 624 igb_eee_disabled); 625 #else 626 sc->dma_coalesce = igb_dma_coalesce; 627 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 628 #endif 629 if (sc->hw.phy.media_type == e1000_media_type_copper) { 630 if (sc->hw.mac.type == e1000_i354) 631 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 632 else 633 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 634 } 635 } 636 637 /* 638 * Start from a known state, this is important in reading the nvm and 639 * mac from that. 640 */ 641 e1000_reset_hw(&sc->hw); 642 643 /* Make sure we have a good EEPROM before we read from it */ 644 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 645 e1000_validate_nvm_checksum(&sc->hw) < 0) { 646 /* 647 * Some PCI-E parts fail the first check due to 648 * the link being in sleep state, call it again, 649 * if it fails a second time its a real issue. 650 */ 651 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 652 device_printf(dev, 653 "The EEPROM Checksum Is Not Valid\n"); 654 error = EIO; 655 goto failed; 656 } 657 } 658 659 /* Copy the permanent MAC address out of the EEPROM */ 660 if (e1000_read_mac_addr(&sc->hw) < 0) { 661 device_printf(dev, "EEPROM read error while reading MAC" 662 " address\n"); 663 error = EIO; 664 goto failed; 665 } 666 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 667 device_printf(dev, "Invalid MAC address\n"); 668 error = EIO; 669 goto failed; 670 } 671 672 /* Setup OS specific network interface */ 673 igb_setup_ifp(sc); 674 675 /* Add sysctl tree, must after igb_setup_ifp() */ 676 igb_add_sysctl(sc); 677 678 /* Now get a good starting state */ 679 igb_reset(sc, FALSE); 680 681 /* Initialize statistics */ 682 igb_update_stats_counters(sc); 683 684 sc->hw.mac.get_link_status = 1; 685 igb_update_link_status(sc); 686 687 /* Indicate SOL/IDER usage */ 688 if (e1000_check_reset_block(&sc->hw)) { 689 device_printf(dev, 690 "PHY reset is blocked due to SOL/IDER session.\n"); 691 } 692 693 /* Determine if we have to control management hardware */ 694 if (e1000_enable_mng_pass_thru(&sc->hw)) 695 sc->flags |= IGB_FLAG_HAS_MGMT; 696 697 /* 698 * Setup Wake-on-Lan 699 */ 700 /* APME bit in EEPROM is mapped to WUC.APME */ 701 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 702 if (eeprom_data) 703 sc->wol = E1000_WUFC_MAG; 704 /* XXX disable WOL */ 705 sc->wol = 0; 706 707 #ifdef notyet 708 /* Register for VLAN events */ 709 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 710 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 711 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 712 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 713 #endif 714 715 #ifdef notyet 716 igb_add_hw_stats(adapter); 717 #endif 718 719 /* 720 * Disable interrupt to prevent spurious interrupts (line based 721 * interrupt, MSI or even MSI-X), which had been observed on 722 * several types of LOMs, from being handled. 723 */ 724 igb_disable_intr(sc); 725 726 error = igb_setup_intr(sc); 727 if (error) { 728 ether_ifdetach(&sc->arpcom.ac_if); 729 goto failed; 730 } 731 return 0; 732 733 failed: 734 igb_detach(dev); 735 return error; 736 } 737 738 static int 739 igb_detach(device_t dev) 740 { 741 struct igb_softc *sc = device_get_softc(dev); 742 743 if (device_is_attached(dev)) { 744 struct ifnet *ifp = &sc->arpcom.ac_if; 745 746 ifnet_serialize_all(ifp); 747 748 igb_stop(sc); 749 750 e1000_phy_hw_reset(&sc->hw); 751 752 /* Give control back to firmware */ 753 igb_rel_mgmt(sc); 754 igb_rel_hw_control(sc); 755 756 if (sc->wol) { 757 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 758 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 759 igb_enable_wol(dev); 760 } 761 762 igb_teardown_intr(sc, sc->intr_cnt); 763 764 ifnet_deserialize_all(ifp); 765 766 ether_ifdetach(ifp); 767 } else if (sc->mem_res != NULL) { 768 igb_rel_hw_control(sc); 769 } 770 771 ifmedia_removeall(&sc->media); 772 bus_generic_detach(dev); 773 774 igb_free_intr(sc); 775 776 if (sc->msix_mem_res != NULL) { 777 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 778 sc->msix_mem_res); 779 } 780 if (sc->mem_res != NULL) { 781 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 782 sc->mem_res); 783 } 784 785 igb_free_rings(sc); 786 787 if (sc->mta != NULL) 788 kfree(sc->mta, M_DEVBUF); 789 if (sc->stats != NULL) 790 kfree(sc->stats, M_DEVBUF); 791 if (sc->serializes != NULL) 792 kfree(sc->serializes, M_DEVBUF); 793 if (sc->rx_rmap != NULL) 794 if_ringmap_free(sc->rx_rmap); 795 if (sc->rx_rmap_intr != NULL) 796 if_ringmap_free(sc->rx_rmap_intr); 797 if (sc->tx_rmap != NULL) 798 if_ringmap_free(sc->tx_rmap); 799 if (sc->tx_rmap_intr != NULL) 800 if_ringmap_free(sc->tx_rmap_intr); 801 802 return 0; 803 } 804 805 static int 806 igb_shutdown(device_t dev) 807 { 808 return igb_suspend(dev); 809 } 810 811 static int 812 igb_suspend(device_t dev) 813 { 814 struct igb_softc *sc = device_get_softc(dev); 815 struct ifnet *ifp = &sc->arpcom.ac_if; 816 817 ifnet_serialize_all(ifp); 818 819 igb_stop(sc); 820 821 igb_rel_mgmt(sc); 822 igb_rel_hw_control(sc); 823 824 if (sc->wol) { 825 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 826 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 827 igb_enable_wol(dev); 828 } 829 830 ifnet_deserialize_all(ifp); 831 832 return bus_generic_suspend(dev); 833 } 834 835 static int 836 igb_resume(device_t dev) 837 { 838 struct igb_softc *sc = device_get_softc(dev); 839 struct ifnet *ifp = &sc->arpcom.ac_if; 840 int i; 841 842 ifnet_serialize_all(ifp); 843 844 igb_init(sc); 845 igb_get_mgmt(sc); 846 847 for (i = 0; i < sc->tx_ring_inuse; ++i) 848 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 849 850 ifnet_deserialize_all(ifp); 851 852 return bus_generic_resume(dev); 853 } 854 855 static int 856 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 857 { 858 struct igb_softc *sc = ifp->if_softc; 859 struct ifreq *ifr = (struct ifreq *)data; 860 int max_frame_size, mask, reinit; 861 int error = 0; 862 863 ASSERT_IFNET_SERIALIZED_ALL(ifp); 864 865 switch (command) { 866 case SIOCSIFMTU: 867 max_frame_size = 9234; 868 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 869 ETHER_CRC_LEN) { 870 error = EINVAL; 871 break; 872 } 873 874 ifp->if_mtu = ifr->ifr_mtu; 875 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 876 ETHER_CRC_LEN; 877 878 if (ifp->if_flags & IFF_RUNNING) 879 igb_init(sc); 880 break; 881 882 case SIOCSIFFLAGS: 883 if (ifp->if_flags & IFF_UP) { 884 if (ifp->if_flags & IFF_RUNNING) { 885 if ((ifp->if_flags ^ sc->if_flags) & 886 (IFF_PROMISC | IFF_ALLMULTI)) { 887 igb_disable_promisc(sc); 888 igb_set_promisc(sc); 889 } 890 } else { 891 igb_init(sc); 892 } 893 } else if (ifp->if_flags & IFF_RUNNING) { 894 igb_stop(sc); 895 } 896 sc->if_flags = ifp->if_flags; 897 break; 898 899 case SIOCADDMULTI: 900 case SIOCDELMULTI: 901 if (ifp->if_flags & IFF_RUNNING) { 902 igb_disable_intr(sc); 903 igb_set_multi(sc); 904 #ifdef IFPOLL_ENABLE 905 if (!(ifp->if_flags & IFF_NPOLLING)) 906 #endif 907 igb_enable_intr(sc); 908 } 909 break; 910 911 case SIOCSIFMEDIA: 912 /* Check SOL/IDER usage */ 913 if (e1000_check_reset_block(&sc->hw)) { 914 if_printf(ifp, "Media change is " 915 "blocked due to SOL/IDER session.\n"); 916 break; 917 } 918 /* FALL THROUGH */ 919 920 case SIOCGIFMEDIA: 921 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 922 break; 923 924 case SIOCSIFCAP: 925 reinit = 0; 926 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 927 if (mask & IFCAP_RXCSUM) { 928 ifp->if_capenable ^= IFCAP_RXCSUM; 929 reinit = 1; 930 } 931 if (mask & IFCAP_VLAN_HWTAGGING) { 932 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 933 reinit = 1; 934 } 935 if (mask & IFCAP_TXCSUM) { 936 ifp->if_capenable ^= IFCAP_TXCSUM; 937 if (ifp->if_capenable & IFCAP_TXCSUM) 938 ifp->if_hwassist |= IGB_CSUM_FEATURES; 939 else 940 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 941 } 942 if (mask & IFCAP_TSO) { 943 ifp->if_capenable ^= IFCAP_TSO; 944 if (ifp->if_capenable & IFCAP_TSO) 945 ifp->if_hwassist |= CSUM_TSO; 946 else 947 ifp->if_hwassist &= ~CSUM_TSO; 948 } 949 if (mask & IFCAP_RSS) 950 ifp->if_capenable ^= IFCAP_RSS; 951 if (reinit && (ifp->if_flags & IFF_RUNNING)) 952 igb_init(sc); 953 break; 954 955 default: 956 error = ether_ioctl(ifp, command, data); 957 break; 958 } 959 return error; 960 } 961 962 static void 963 igb_init(void *xsc) 964 { 965 struct igb_softc *sc = xsc; 966 struct ifnet *ifp = &sc->arpcom.ac_if; 967 boolean_t polling; 968 int i; 969 970 ASSERT_IFNET_SERIALIZED_ALL(ifp); 971 972 igb_stop(sc); 973 974 /* Get the latest mac address, User can use a LAA */ 975 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 976 977 /* Put the address into the Receive Address Array */ 978 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 979 980 igb_reset(sc, FALSE); 981 igb_update_link_status(sc); 982 983 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 984 985 /* Configure for OS presence */ 986 igb_get_mgmt(sc); 987 988 polling = FALSE; 989 #ifdef IFPOLL_ENABLE 990 if (ifp->if_flags & IFF_NPOLLING) 991 polling = TRUE; 992 #endif 993 994 /* Configured used RX/TX rings */ 995 igb_set_ring_inuse(sc, polling); 996 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 997 998 /* Initialize interrupt */ 999 igb_init_intr(sc); 1000 1001 /* Prepare transmit descriptors and buffers */ 1002 for (i = 0; i < sc->tx_ring_inuse; ++i) 1003 igb_init_tx_ring(&sc->tx_rings[i]); 1004 igb_init_tx_unit(sc); 1005 1006 /* Setup Multicast table */ 1007 igb_set_multi(sc); 1008 1009 #if 0 1010 /* 1011 * Figure out the desired mbuf pool 1012 * for doing jumbo/packetsplit 1013 */ 1014 if (adapter->max_frame_size <= 2048) 1015 adapter->rx_mbuf_sz = MCLBYTES; 1016 else if (adapter->max_frame_size <= 4096) 1017 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1018 else 1019 adapter->rx_mbuf_sz = MJUM9BYTES; 1020 #endif 1021 1022 /* Prepare receive descriptors and buffers */ 1023 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1024 int error; 1025 1026 error = igb_init_rx_ring(&sc->rx_rings[i]); 1027 if (error) { 1028 if_printf(ifp, "Could not setup receive structures\n"); 1029 igb_stop(sc); 1030 return; 1031 } 1032 } 1033 igb_init_rx_unit(sc, polling); 1034 1035 /* Enable VLAN support */ 1036 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1037 igb_set_vlan(sc); 1038 1039 /* Don't lose promiscuous settings */ 1040 igb_set_promisc(sc); 1041 1042 /* Clear counters */ 1043 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1044 1045 /* This clears any pending interrupts */ 1046 E1000_READ_REG(&sc->hw, E1000_ICR); 1047 1048 /* 1049 * Only enable interrupts if we are not polling, make sure 1050 * they are off otherwise. 1051 */ 1052 if (polling) { 1053 igb_disable_intr(sc); 1054 } else { 1055 igb_enable_intr(sc); 1056 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1057 } 1058 1059 /* Set Energy Efficient Ethernet */ 1060 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1061 if (sc->hw.mac.type == e1000_i354) 1062 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1063 else 1064 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1065 } 1066 1067 ifp->if_flags |= IFF_RUNNING; 1068 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1069 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1070 1071 ifsq_clr_oactive(txr->ifsq); 1072 ifsq_watchdog_start(&txr->tx_watchdog); 1073 1074 if (!polling) { 1075 callout_reset_bycpu(&txr->tx_gc_timer, 1, 1076 igb_txgc_timer, txr, txr->tx_intr_cpuid); 1077 } 1078 } 1079 1080 igb_set_timer_cpuid(sc, polling); 1081 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1082 } 1083 1084 static void 1085 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1086 { 1087 struct igb_softc *sc = ifp->if_softc; 1088 1089 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1090 1091 if ((ifp->if_flags & IFF_RUNNING) == 0) 1092 sc->hw.mac.get_link_status = 1; 1093 igb_update_link_status(sc); 1094 1095 ifmr->ifm_status = IFM_AVALID; 1096 ifmr->ifm_active = IFM_ETHER; 1097 1098 if (!sc->link_active) { 1099 if (sc->hw.mac.autoneg) 1100 ifmr->ifm_active |= IFM_NONE; 1101 else 1102 ifmr->ifm_active |= sc->media.ifm_media; 1103 return; 1104 } 1105 1106 ifmr->ifm_status |= IFM_ACTIVE; 1107 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1108 ifmr->ifm_active |= sc->ifm_flowctrl; 1109 1110 switch (sc->link_speed) { 1111 case 10: 1112 ifmr->ifm_active |= IFM_10_T; 1113 break; 1114 1115 case 100: 1116 /* 1117 * Support for 100Mb SFP - these are Fiber 1118 * but the media type appears as serdes 1119 */ 1120 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1121 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1122 ifmr->ifm_active |= IFM_100_FX; 1123 else 1124 ifmr->ifm_active |= IFM_100_TX; 1125 break; 1126 1127 case 1000: 1128 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1129 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1130 ifmr->ifm_active |= IFM_1000_SX; 1131 else 1132 ifmr->ifm_active |= IFM_1000_T; 1133 break; 1134 1135 case 2500: 1136 ifmr->ifm_active |= IFM_2500_SX; 1137 break; 1138 } 1139 1140 if (sc->link_duplex == FULL_DUPLEX) 1141 ifmr->ifm_active |= IFM_FDX; 1142 else 1143 ifmr->ifm_active |= IFM_HDX; 1144 1145 if (sc->link_duplex == FULL_DUPLEX) 1146 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1147 } 1148 1149 static int 1150 igb_media_change(struct ifnet *ifp) 1151 { 1152 struct igb_softc *sc = ifp->if_softc; 1153 struct ifmedia *ifm = &sc->media; 1154 1155 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1156 1157 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1158 return EINVAL; 1159 1160 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1161 case IFM_AUTO: 1162 sc->hw.mac.autoneg = DO_AUTO_NEG; 1163 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1164 break; 1165 1166 case IFM_1000_SX: 1167 case IFM_1000_T: 1168 sc->hw.mac.autoneg = DO_AUTO_NEG; 1169 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1170 break; 1171 1172 case IFM_100_TX: 1173 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1174 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1175 } else { 1176 if (IFM_OPTIONS(ifm->ifm_media) & 1177 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1178 if (bootverbose) { 1179 if_printf(ifp, "Flow control is not " 1180 "allowed for half-duplex\n"); 1181 } 1182 return EINVAL; 1183 } 1184 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1185 } 1186 sc->hw.mac.autoneg = FALSE; 1187 sc->hw.phy.autoneg_advertised = 0; 1188 break; 1189 1190 case IFM_10_T: 1191 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1192 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1193 } else { 1194 if (IFM_OPTIONS(ifm->ifm_media) & 1195 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1196 if (bootverbose) { 1197 if_printf(ifp, "Flow control is not " 1198 "allowed for half-duplex\n"); 1199 } 1200 return EINVAL; 1201 } 1202 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1203 } 1204 sc->hw.mac.autoneg = FALSE; 1205 sc->hw.phy.autoneg_advertised = 0; 1206 break; 1207 1208 default: 1209 if (bootverbose) { 1210 if_printf(ifp, "Unsupported media type %d\n", 1211 IFM_SUBTYPE(ifm->ifm_media)); 1212 } 1213 return EINVAL; 1214 } 1215 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1216 1217 if (ifp->if_flags & IFF_RUNNING) 1218 igb_init(sc); 1219 1220 return 0; 1221 } 1222 1223 static void 1224 igb_set_promisc(struct igb_softc *sc) 1225 { 1226 struct ifnet *ifp = &sc->arpcom.ac_if; 1227 struct e1000_hw *hw = &sc->hw; 1228 uint32_t reg; 1229 1230 if (sc->vf_ifp) { 1231 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1232 return; 1233 } 1234 1235 reg = E1000_READ_REG(hw, E1000_RCTL); 1236 if (ifp->if_flags & IFF_PROMISC) { 1237 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1238 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1239 } else if (ifp->if_flags & IFF_ALLMULTI) { 1240 reg |= E1000_RCTL_MPE; 1241 reg &= ~E1000_RCTL_UPE; 1242 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1243 } 1244 } 1245 1246 static void 1247 igb_disable_promisc(struct igb_softc *sc) 1248 { 1249 struct e1000_hw *hw = &sc->hw; 1250 struct ifnet *ifp = &sc->arpcom.ac_if; 1251 uint32_t reg; 1252 int mcnt = 0; 1253 1254 if (sc->vf_ifp) { 1255 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1256 return; 1257 } 1258 reg = E1000_READ_REG(hw, E1000_RCTL); 1259 reg &= ~E1000_RCTL_UPE; 1260 if (ifp->if_flags & IFF_ALLMULTI) { 1261 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1262 } else { 1263 struct ifmultiaddr *ifma; 1264 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1265 if (ifma->ifma_addr->sa_family != AF_LINK) 1266 continue; 1267 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1268 break; 1269 mcnt++; 1270 } 1271 } 1272 /* Don't disable if in MAX groups */ 1273 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1274 reg &= ~E1000_RCTL_MPE; 1275 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1276 } 1277 1278 static void 1279 igb_set_multi(struct igb_softc *sc) 1280 { 1281 struct ifnet *ifp = &sc->arpcom.ac_if; 1282 struct ifmultiaddr *ifma; 1283 uint32_t reg_rctl = 0; 1284 uint8_t *mta; 1285 int mcnt = 0; 1286 1287 mta = sc->mta; 1288 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1289 1290 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1291 if (ifma->ifma_addr->sa_family != AF_LINK) 1292 continue; 1293 1294 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1295 break; 1296 1297 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1298 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1299 mcnt++; 1300 } 1301 1302 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1303 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1304 reg_rctl |= E1000_RCTL_MPE; 1305 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1306 } else { 1307 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1308 } 1309 } 1310 1311 static void 1312 igb_timer(void *xsc) 1313 { 1314 struct igb_softc *sc = xsc; 1315 1316 lwkt_serialize_enter(&sc->main_serialize); 1317 1318 igb_update_link_status(sc); 1319 igb_update_stats_counters(sc); 1320 1321 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1322 1323 lwkt_serialize_exit(&sc->main_serialize); 1324 } 1325 1326 static void 1327 igb_update_link_status(struct igb_softc *sc) 1328 { 1329 struct ifnet *ifp = &sc->arpcom.ac_if; 1330 struct e1000_hw *hw = &sc->hw; 1331 uint32_t link_check, thstat, ctrl; 1332 1333 link_check = thstat = ctrl = 0; 1334 1335 /* Get the cached link value or read for real */ 1336 switch (hw->phy.media_type) { 1337 case e1000_media_type_copper: 1338 if (hw->mac.get_link_status) { 1339 /* Do the work to read phy */ 1340 e1000_check_for_link(hw); 1341 link_check = !hw->mac.get_link_status; 1342 } else { 1343 link_check = TRUE; 1344 } 1345 break; 1346 1347 case e1000_media_type_fiber: 1348 e1000_check_for_link(hw); 1349 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1350 break; 1351 1352 case e1000_media_type_internal_serdes: 1353 e1000_check_for_link(hw); 1354 link_check = hw->mac.serdes_has_link; 1355 break; 1356 1357 /* VF device is type_unknown */ 1358 case e1000_media_type_unknown: 1359 e1000_check_for_link(hw); 1360 link_check = !hw->mac.get_link_status; 1361 /* Fall thru */ 1362 default: 1363 break; 1364 } 1365 1366 /* Check for thermal downshift or shutdown */ 1367 if (hw->mac.type == e1000_i350) { 1368 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1369 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1370 } 1371 1372 /* Now we check if a transition has happened */ 1373 if (link_check && sc->link_active == 0) { 1374 e1000_get_speed_and_duplex(hw, 1375 &sc->link_speed, &sc->link_duplex); 1376 if (bootverbose) { 1377 char flowctrl[IFM_ETH_FC_STRLEN]; 1378 1379 /* Get the flow control for display */ 1380 e1000_fc2str(hw->fc.current_mode, flowctrl, 1381 sizeof(flowctrl)); 1382 1383 if_printf(ifp, "Link is up %d Mbps %s, " 1384 "Flow control: %s\n", 1385 sc->link_speed, 1386 sc->link_duplex == FULL_DUPLEX ? 1387 "Full Duplex" : "Half Duplex", 1388 flowctrl); 1389 } 1390 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1391 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1392 sc->link_active = 1; 1393 1394 ifp->if_baudrate = sc->link_speed * 1000000; 1395 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1396 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1397 if_printf(ifp, "Link: thermal downshift\n"); 1398 /* Delay Link Up for Phy update */ 1399 if ((hw->mac.type == e1000_i210 || 1400 hw->mac.type == e1000_i211) && 1401 hw->phy.id == I210_I_PHY_ID) 1402 msec_delay(IGB_I210_LINK_DELAY); 1403 /* 1404 * Reset if the media type changed. 1405 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1406 */ 1407 if (hw->dev_spec._82575.media_changed) { 1408 hw->dev_spec._82575.media_changed = FALSE; 1409 igb_reset(sc, TRUE); 1410 } 1411 /* This can sleep */ 1412 ifp->if_link_state = LINK_STATE_UP; 1413 if_link_state_change(ifp); 1414 } else if (!link_check && sc->link_active == 1) { 1415 ifp->if_baudrate = sc->link_speed = 0; 1416 sc->link_duplex = 0; 1417 if (bootverbose) 1418 if_printf(ifp, "Link is Down\n"); 1419 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1420 (thstat & E1000_THSTAT_PWR_DOWN)) 1421 if_printf(ifp, "Link: thermal shutdown\n"); 1422 sc->link_active = 0; 1423 /* This can sleep */ 1424 ifp->if_link_state = LINK_STATE_DOWN; 1425 if_link_state_change(ifp); 1426 } 1427 } 1428 1429 static void 1430 igb_stop(struct igb_softc *sc) 1431 { 1432 struct ifnet *ifp = &sc->arpcom.ac_if; 1433 int i; 1434 1435 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1436 1437 igb_disable_intr(sc); 1438 1439 callout_stop(&sc->timer); 1440 1441 ifp->if_flags &= ~IFF_RUNNING; 1442 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1443 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1444 1445 ifsq_clr_oactive(txr->ifsq); 1446 ifsq_watchdog_stop(&txr->tx_watchdog); 1447 txr->tx_flags &= ~IGB_TXFLAG_ENABLED; 1448 1449 txr->tx_running = 0; 1450 callout_stop(&txr->tx_gc_timer); 1451 } 1452 1453 e1000_reset_hw(&sc->hw); 1454 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1455 1456 e1000_led_off(&sc->hw); 1457 e1000_cleanup_led(&sc->hw); 1458 1459 for (i = 0; i < sc->tx_ring_cnt; ++i) 1460 igb_free_tx_ring(&sc->tx_rings[i]); 1461 for (i = 0; i < sc->rx_ring_cnt; ++i) 1462 igb_free_rx_ring(&sc->rx_rings[i]); 1463 } 1464 1465 static void 1466 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1467 { 1468 struct ifnet *ifp = &sc->arpcom.ac_if; 1469 struct e1000_hw *hw = &sc->hw; 1470 struct e1000_fc_info *fc = &hw->fc; 1471 uint32_t pba = 0; 1472 uint16_t hwm; 1473 1474 /* Let the firmware know the OS is in control */ 1475 igb_get_hw_control(sc); 1476 1477 /* 1478 * Packet Buffer Allocation (PBA) 1479 * Writing PBA sets the receive portion of the buffer 1480 * the remainder is used for the transmit buffer. 1481 */ 1482 switch (hw->mac.type) { 1483 case e1000_82575: 1484 pba = E1000_PBA_32K; 1485 break; 1486 1487 case e1000_82576: 1488 case e1000_vfadapt: 1489 pba = E1000_READ_REG(hw, E1000_RXPBS); 1490 pba &= E1000_RXPBS_SIZE_MASK_82576; 1491 break; 1492 1493 case e1000_82580: 1494 case e1000_i350: 1495 case e1000_i354: 1496 case e1000_vfadapt_i350: 1497 pba = E1000_READ_REG(hw, E1000_RXPBS); 1498 pba = e1000_rxpbs_adjust_82580(pba); 1499 break; 1500 1501 case e1000_i210: 1502 case e1000_i211: 1503 pba = E1000_PBA_34K; 1504 break; 1505 1506 default: 1507 break; 1508 } 1509 1510 /* Special needs in case of Jumbo frames */ 1511 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1512 uint32_t tx_space, min_tx, min_rx; 1513 1514 pba = E1000_READ_REG(hw, E1000_PBA); 1515 tx_space = pba >> 16; 1516 pba &= 0xffff; 1517 1518 min_tx = (sc->max_frame_size + 1519 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1520 min_tx = roundup2(min_tx, 1024); 1521 min_tx >>= 10; 1522 min_rx = sc->max_frame_size; 1523 min_rx = roundup2(min_rx, 1024); 1524 min_rx >>= 10; 1525 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1526 pba = pba - (min_tx - tx_space); 1527 /* 1528 * if short on rx space, rx wins 1529 * and must trump tx adjustment 1530 */ 1531 if (pba < min_rx) 1532 pba = min_rx; 1533 } 1534 E1000_WRITE_REG(hw, E1000_PBA, pba); 1535 } 1536 1537 /* 1538 * These parameters control the automatic generation (Tx) and 1539 * response (Rx) to Ethernet PAUSE frames. 1540 * - High water mark should allow for at least two frames to be 1541 * received after sending an XOFF. 1542 * - Low water mark works best when it is very near the high water mark. 1543 * This allows the receiver to restart by sending XON when it has 1544 * drained a bit. 1545 */ 1546 hwm = min(((pba << 10) * 9 / 10), 1547 ((pba << 10) - 2 * sc->max_frame_size)); 1548 1549 if (hw->mac.type < e1000_82576) { 1550 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1551 fc->low_water = fc->high_water - 8; 1552 } else { 1553 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1554 fc->low_water = fc->high_water - 16; 1555 } 1556 fc->pause_time = IGB_FC_PAUSE_TIME; 1557 fc->send_xon = TRUE; 1558 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1559 1560 /* Issue a global reset */ 1561 e1000_reset_hw(hw); 1562 E1000_WRITE_REG(hw, E1000_WUC, 0); 1563 1564 /* Reset for AutoMediaDetect */ 1565 if (media_reset) { 1566 e1000_setup_init_funcs(hw, TRUE); 1567 e1000_get_bus_info(hw); 1568 } 1569 1570 if (e1000_init_hw(hw) < 0) 1571 if_printf(ifp, "Hardware Initialization Failed\n"); 1572 1573 /* Setup DMA Coalescing */ 1574 igb_init_dmac(sc, pba); 1575 1576 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1577 e1000_get_phy_info(hw); 1578 e1000_check_for_link(hw); 1579 } 1580 1581 static void 1582 igb_setup_ifp(struct igb_softc *sc) 1583 { 1584 struct ifnet *ifp = &sc->arpcom.ac_if; 1585 int i; 1586 1587 ifp->if_softc = sc; 1588 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1589 ifp->if_init = igb_init; 1590 ifp->if_ioctl = igb_ioctl; 1591 ifp->if_start = igb_start; 1592 ifp->if_serialize = igb_serialize; 1593 ifp->if_deserialize = igb_deserialize; 1594 ifp->if_tryserialize = igb_tryserialize; 1595 #ifdef INVARIANTS 1596 ifp->if_serialize_assert = igb_serialize_assert; 1597 #endif 1598 #ifdef IFPOLL_ENABLE 1599 ifp->if_npoll = igb_npoll; 1600 #endif 1601 1602 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1603 1604 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1605 ifq_set_ready(&ifp->if_snd); 1606 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1607 1608 ifp->if_mapsubq = ifq_mapsubq_modulo; 1609 ifq_set_subq_divisor(&ifp->if_snd, 1); 1610 1611 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1612 1613 ifp->if_capabilities = 1614 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1615 if (IGB_ENABLE_HWRSS(sc)) 1616 ifp->if_capabilities |= IFCAP_RSS; 1617 ifp->if_capenable = ifp->if_capabilities; 1618 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1619 1620 /* 1621 * Tell the upper layer(s) we support long frames 1622 */ 1623 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1624 1625 /* Setup TX rings and subqueues */ 1626 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1627 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1628 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1629 1630 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1631 ifsq_set_priv(ifsq, txr); 1632 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1633 txr->ifsq = ifsq; 1634 1635 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1636 } 1637 1638 /* 1639 * Specify the media types supported by this adapter and register 1640 * callbacks to update media and link information 1641 */ 1642 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1643 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1644 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1645 0, NULL); 1646 } else { 1647 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1648 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1649 0, NULL); 1650 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1651 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1652 0, NULL); 1653 if (sc->hw.phy.type != e1000_phy_ife) { 1654 ifmedia_add(&sc->media, 1655 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1656 } 1657 } 1658 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1659 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1660 } 1661 1662 static void 1663 igb_add_sysctl(struct igb_softc *sc) 1664 { 1665 struct sysctl_ctx_list *ctx; 1666 struct sysctl_oid *tree; 1667 char node[32]; 1668 int i; 1669 1670 ctx = device_get_sysctl_ctx(sc->dev); 1671 tree = device_get_sysctl_tree(sc->dev); 1672 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1673 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1674 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1675 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1676 "# of RX rings used"); 1677 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1678 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1679 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1680 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1681 "# of TX rings used"); 1682 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1683 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1684 "# of RX descs"); 1685 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1686 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1687 "# of TX descs"); 1688 1689 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1690 do { \ 1691 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1692 #use " interrupt rate"); \ 1693 } while (0) 1694 1695 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1696 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1697 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1698 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1699 1700 #undef IGB_ADD_INTR_RATE_SYSCTL 1701 1702 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1703 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1704 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1705 "# of segments per TX interrupt"); 1706 1707 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1708 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1709 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1710 "# of segments sent before write to hardware register"); 1711 1712 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1713 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1714 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1715 "# of segments received before write to hardware register"); 1716 1717 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 1718 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1719 OID_AUTO, "tx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1720 sc->tx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1721 "TX MSI-X CPU map"); 1722 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1723 OID_AUTO, "rx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1724 sc->rx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1725 "RX MSI-X CPU map"); 1726 } 1727 #ifdef IFPOLL_ENABLE 1728 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1729 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1730 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1731 "TX polling CPU map"); 1732 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1733 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1734 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1735 "RX polling CPU map"); 1736 #endif 1737 1738 #ifdef IGB_RSS_DEBUG 1739 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1740 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1741 "RSS debug level"); 1742 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1743 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1744 SYSCTL_ADD_ULONG(ctx, 1745 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1746 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1747 } 1748 #endif 1749 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1750 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1751 1752 #ifdef IGB_TSS_DEBUG 1753 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1754 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1755 CTLFLAG_RW, &txr->tx_packets, "TXed packets"); 1756 #endif 1757 ksnprintf(node, sizeof(node), "tx%d_nmbuf", i); 1758 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1759 CTLFLAG_RD, &txr->tx_nmbuf, 0, "# of pending TX mbufs"); 1760 1761 ksnprintf(node, sizeof(node), "tx%d_gc", i); 1762 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, node, 1763 CTLFLAG_RW, &txr->tx_gc, "# of TX desc GC"); 1764 } 1765 } 1766 1767 static int 1768 igb_alloc_rings(struct igb_softc *sc) 1769 { 1770 int error, i; 1771 1772 /* 1773 * Create top level busdma tag 1774 */ 1775 error = bus_dma_tag_create(NULL, 1, 0, 1776 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1777 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1778 &sc->parent_tag); 1779 if (error) { 1780 device_printf(sc->dev, "could not create top level DMA tag\n"); 1781 return error; 1782 } 1783 1784 /* 1785 * Allocate TX descriptor rings and buffers 1786 */ 1787 sc->tx_rings = kmalloc_cachealign( 1788 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1789 M_DEVBUF, M_WAITOK | M_ZERO); 1790 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1791 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1792 1793 /* Set up some basics */ 1794 txr->sc = sc; 1795 txr->me = i; 1796 txr->tx_intr_cpuid = -1; 1797 lwkt_serialize_init(&txr->tx_serialize); 1798 callout_init_mp(&txr->tx_gc_timer); 1799 1800 error = igb_create_tx_ring(txr); 1801 if (error) 1802 return error; 1803 } 1804 1805 /* 1806 * Allocate RX descriptor rings and buffers 1807 */ 1808 sc->rx_rings = kmalloc_cachealign( 1809 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1810 M_DEVBUF, M_WAITOK | M_ZERO); 1811 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1812 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1813 1814 /* Set up some basics */ 1815 rxr->sc = sc; 1816 rxr->me = i; 1817 lwkt_serialize_init(&rxr->rx_serialize); 1818 1819 error = igb_create_rx_ring(rxr); 1820 if (error) 1821 return error; 1822 } 1823 1824 return 0; 1825 } 1826 1827 static void 1828 igb_free_rings(struct igb_softc *sc) 1829 { 1830 int i; 1831 1832 if (sc->tx_rings != NULL) { 1833 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1834 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1835 1836 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1837 } 1838 kfree(sc->tx_rings, M_DEVBUF); 1839 } 1840 1841 if (sc->rx_rings != NULL) { 1842 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1843 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1844 1845 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1846 } 1847 kfree(sc->rx_rings, M_DEVBUF); 1848 } 1849 } 1850 1851 static int 1852 igb_create_tx_ring(struct igb_tx_ring *txr) 1853 { 1854 int tsize, error, i, ntxd; 1855 1856 /* 1857 * Validate number of transmit descriptors. It must not exceed 1858 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1859 */ 1860 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1861 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1862 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1863 device_printf(txr->sc->dev, 1864 "Using %d TX descriptors instead of %d!\n", 1865 IGB_DEFAULT_TXD, ntxd); 1866 txr->num_tx_desc = IGB_DEFAULT_TXD; 1867 } else { 1868 txr->num_tx_desc = ntxd; 1869 } 1870 1871 /* 1872 * Allocate TX descriptor ring 1873 */ 1874 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1875 IGB_DBA_ALIGN); 1876 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1877 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1878 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1879 if (txr->txdma.dma_vaddr == NULL) { 1880 device_printf(txr->sc->dev, 1881 "Unable to allocate TX Descriptor memory\n"); 1882 return ENOMEM; 1883 } 1884 txr->tx_base = txr->txdma.dma_vaddr; 1885 bzero(txr->tx_base, tsize); 1886 1887 tsize = __VM_CACHELINE_ALIGN( 1888 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1889 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1890 1891 /* 1892 * Allocate TX head write-back buffer 1893 */ 1894 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1895 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1896 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1897 if (txr->tx_hdr == NULL) { 1898 device_printf(txr->sc->dev, 1899 "Unable to allocate TX head write-back buffer\n"); 1900 return ENOMEM; 1901 } 1902 1903 /* 1904 * Create DMA tag for TX buffers 1905 */ 1906 error = bus_dma_tag_create(txr->sc->parent_tag, 1907 1, 0, /* alignment, bounds */ 1908 BUS_SPACE_MAXADDR, /* lowaddr */ 1909 BUS_SPACE_MAXADDR, /* highaddr */ 1910 NULL, NULL, /* filter, filterarg */ 1911 IGB_TSO_SIZE, /* maxsize */ 1912 IGB_MAX_SCATTER, /* nsegments */ 1913 PAGE_SIZE, /* maxsegsize */ 1914 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1915 BUS_DMA_ONEBPAGE, /* flags */ 1916 &txr->tx_tag); 1917 if (error) { 1918 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1919 kfree(txr->tx_buf, M_DEVBUF); 1920 txr->tx_buf = NULL; 1921 return error; 1922 } 1923 1924 /* 1925 * Create DMA maps for TX buffers 1926 */ 1927 for (i = 0; i < txr->num_tx_desc; ++i) { 1928 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1929 1930 error = bus_dmamap_create(txr->tx_tag, 1931 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1932 if (error) { 1933 device_printf(txr->sc->dev, 1934 "Unable to create TX DMA map\n"); 1935 igb_destroy_tx_ring(txr, i); 1936 return error; 1937 } 1938 } 1939 1940 if (txr->sc->hw.mac.type == e1000_82575) 1941 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1942 1943 /* 1944 * Initialize various watermark 1945 */ 1946 if (txr->sc->hw.mac.type == e1000_82575) { 1947 /* 1948 * There no ways to GC pending TX mbufs in 'header 1949 * write back' mode with reduced # of RS TX descs, 1950 * since TDH does _not_ move for 82575. 1951 */ 1952 txr->intr_nsegs = 1; 1953 } else { 1954 txr->intr_nsegs = txr->num_tx_desc / 16; 1955 } 1956 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1957 1958 return 0; 1959 } 1960 1961 static void 1962 igb_free_tx_ring(struct igb_tx_ring *txr) 1963 { 1964 int i; 1965 1966 for (i = 0; i < txr->num_tx_desc; ++i) { 1967 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1968 1969 if (txbuf->m_head != NULL) 1970 igb_free_txbuf(txr, txbuf); 1971 } 1972 } 1973 1974 static void 1975 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1976 { 1977 int i; 1978 1979 if (txr->txdma.dma_vaddr != NULL) { 1980 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1981 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1982 txr->txdma.dma_map); 1983 bus_dma_tag_destroy(txr->txdma.dma_tag); 1984 txr->txdma.dma_vaddr = NULL; 1985 } 1986 1987 if (txr->tx_hdr != NULL) { 1988 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1989 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1990 txr->tx_hdr_dmap); 1991 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1992 txr->tx_hdr = NULL; 1993 } 1994 1995 if (txr->tx_buf == NULL) 1996 return; 1997 1998 for (i = 0; i < ndesc; ++i) { 1999 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 2000 2001 KKASSERT(txbuf->m_head == NULL); 2002 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 2003 } 2004 bus_dma_tag_destroy(txr->tx_tag); 2005 2006 kfree(txr->tx_buf, M_DEVBUF); 2007 txr->tx_buf = NULL; 2008 } 2009 2010 static void 2011 igb_init_tx_ring(struct igb_tx_ring *txr) 2012 { 2013 /* Clear the old descriptor contents */ 2014 bzero(txr->tx_base, 2015 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 2016 2017 /* Clear TX head write-back buffer */ 2018 *(txr->tx_hdr) = 0; 2019 2020 /* Reset indices */ 2021 txr->next_avail_desc = 0; 2022 txr->next_to_clean = 0; 2023 txr->tx_nsegs = 0; 2024 txr->tx_running = 0; 2025 txr->tx_nmbuf = 0; 2026 2027 /* Set number of descriptors available */ 2028 txr->tx_avail = txr->num_tx_desc; 2029 2030 /* Enable this TX ring */ 2031 txr->tx_flags |= IGB_TXFLAG_ENABLED; 2032 } 2033 2034 static void 2035 igb_init_tx_unit(struct igb_softc *sc) 2036 { 2037 struct e1000_hw *hw = &sc->hw; 2038 uint32_t tctl; 2039 int i; 2040 2041 /* Setup the Tx Descriptor Rings */ 2042 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2043 struct igb_tx_ring *txr = &sc->tx_rings[i]; 2044 uint64_t bus_addr = txr->txdma.dma_paddr; 2045 uint64_t hdr_paddr = txr->tx_hdr_paddr; 2046 uint32_t txdctl = 0; 2047 uint32_t dca_txctrl; 2048 2049 E1000_WRITE_REG(hw, E1000_TDLEN(i), 2050 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 2051 E1000_WRITE_REG(hw, E1000_TDBAH(i), 2052 (uint32_t)(bus_addr >> 32)); 2053 E1000_WRITE_REG(hw, E1000_TDBAL(i), 2054 (uint32_t)bus_addr); 2055 2056 /* Setup the HW Tx Head and Tail descriptor pointers */ 2057 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 2058 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 2059 2060 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 2061 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2062 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 2063 2064 /* 2065 * Don't set WB_on_EITR: 2066 * - 82575 does not have it 2067 * - It almost has no effect on 82576, see: 2068 * 82576 specification update errata #26 2069 * - It causes unnecessary bus traffic 2070 */ 2071 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 2072 (uint32_t)(hdr_paddr >> 32)); 2073 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 2074 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 2075 2076 /* 2077 * WTHRESH is ignored by the hardware, since header 2078 * write back mode is used. 2079 */ 2080 txdctl |= IGB_TX_PTHRESH; 2081 txdctl |= IGB_TX_HTHRESH << 8; 2082 txdctl |= IGB_TX_WTHRESH << 16; 2083 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2084 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2085 } 2086 2087 if (sc->vf_ifp) 2088 return; 2089 2090 e1000_config_collision_dist(hw); 2091 2092 /* Program the Transmit Control Register */ 2093 tctl = E1000_READ_REG(hw, E1000_TCTL); 2094 tctl &= ~E1000_TCTL_CT; 2095 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2096 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2097 2098 /* This write will effectively turn on the transmit unit. */ 2099 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2100 } 2101 2102 static boolean_t 2103 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2104 { 2105 struct e1000_adv_tx_context_desc *TXD; 2106 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2107 int ehdrlen, ctxd, ip_hlen = 0; 2108 boolean_t offload = TRUE; 2109 2110 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2111 offload = FALSE; 2112 2113 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2114 2115 ctxd = txr->next_avail_desc; 2116 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2117 2118 /* 2119 * In advanced descriptors the vlan tag must 2120 * be placed into the context descriptor, thus 2121 * we need to be here just for that setup. 2122 */ 2123 if (mp->m_flags & M_VLANTAG) { 2124 uint16_t vlantag; 2125 2126 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2127 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2128 } else if (!offload) { 2129 return FALSE; 2130 } 2131 2132 ehdrlen = mp->m_pkthdr.csum_lhlen; 2133 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2134 2135 /* Set the ether header length */ 2136 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2137 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2138 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2139 ip_hlen = mp->m_pkthdr.csum_iphlen; 2140 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2141 } 2142 vlan_macip_lens |= ip_hlen; 2143 2144 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2145 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2146 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2147 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2148 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2149 2150 /* 2151 * 82575 needs the TX context index added; the queue 2152 * index is used as TX context index here. 2153 */ 2154 if (txr->sc->hw.mac.type == e1000_82575) 2155 mss_l4len_idx = txr->me << 4; 2156 2157 /* Now copy bits into descriptor */ 2158 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2159 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2160 TXD->seqnum_seed = htole32(0); 2161 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2162 2163 /* We've consumed the first desc, adjust counters */ 2164 if (++ctxd == txr->num_tx_desc) 2165 ctxd = 0; 2166 txr->next_avail_desc = ctxd; 2167 --txr->tx_avail; 2168 2169 return offload; 2170 } 2171 2172 static void 2173 igb_txeof(struct igb_tx_ring *txr, int hdr) 2174 { 2175 int first, avail; 2176 2177 if (txr->tx_avail == txr->num_tx_desc) 2178 return; 2179 2180 first = txr->next_to_clean; 2181 if (first == hdr) 2182 return; 2183 2184 avail = txr->tx_avail; 2185 while (first != hdr) { 2186 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2187 2188 KKASSERT(avail < txr->num_tx_desc); 2189 ++avail; 2190 2191 if (txbuf->m_head) 2192 igb_free_txbuf(txr, txbuf); 2193 2194 if (++first == txr->num_tx_desc) 2195 first = 0; 2196 } 2197 txr->next_to_clean = first; 2198 txr->tx_avail = avail; 2199 2200 /* 2201 * If we have a minimum free, clear OACTIVE 2202 * to tell the stack that it is OK to send packets. 2203 */ 2204 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2205 ifsq_clr_oactive(txr->ifsq); 2206 2207 /* 2208 * We have enough TX descriptors, turn off 2209 * the watchdog. We allow small amount of 2210 * packets (roughly intr_nsegs) pending on 2211 * the transmit ring. 2212 */ 2213 txr->tx_watchdog.wd_timer = 0; 2214 } 2215 txr->tx_running = IGB_TX_RUNNING; 2216 } 2217 2218 static void 2219 igb_txgc(struct igb_tx_ring *txr) 2220 { 2221 int first, hdr; 2222 #ifdef INVARIANTS 2223 int avail; 2224 #endif 2225 2226 if (txr->tx_avail == txr->num_tx_desc) 2227 return; 2228 2229 hdr = E1000_READ_REG(&txr->sc->hw, E1000_TDH(txr->me)), 2230 first = txr->next_to_clean; 2231 if (first == hdr) 2232 goto done; 2233 txr->tx_gc++; 2234 2235 #ifdef INVARIANTS 2236 avail = txr->tx_avail; 2237 #endif 2238 while (first != hdr) { 2239 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2240 2241 #ifdef INVARIANTS 2242 KKASSERT(avail < txr->num_tx_desc); 2243 ++avail; 2244 #endif 2245 if (txbuf->m_head) 2246 igb_free_txbuf(txr, txbuf); 2247 2248 if (++first == txr->num_tx_desc) 2249 first = 0; 2250 } 2251 done: 2252 if (txr->tx_nmbuf) 2253 txr->tx_running = IGB_TX_RUNNING; 2254 } 2255 2256 static int 2257 igb_create_rx_ring(struct igb_rx_ring *rxr) 2258 { 2259 int rsize, i, error, nrxd; 2260 2261 /* 2262 * Validate number of receive descriptors. It must not exceed 2263 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2264 */ 2265 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2266 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2267 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2268 device_printf(rxr->sc->dev, 2269 "Using %d RX descriptors instead of %d!\n", 2270 IGB_DEFAULT_RXD, nrxd); 2271 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2272 } else { 2273 rxr->num_rx_desc = nrxd; 2274 } 2275 2276 /* 2277 * Allocate RX descriptor ring 2278 */ 2279 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2280 IGB_DBA_ALIGN); 2281 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2282 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2283 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2284 &rxr->rxdma.dma_paddr); 2285 if (rxr->rxdma.dma_vaddr == NULL) { 2286 device_printf(rxr->sc->dev, 2287 "Unable to allocate RxDescriptor memory\n"); 2288 return ENOMEM; 2289 } 2290 rxr->rx_base = rxr->rxdma.dma_vaddr; 2291 bzero(rxr->rx_base, rsize); 2292 2293 rsize = __VM_CACHELINE_ALIGN( 2294 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2295 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2296 2297 /* 2298 * Create DMA tag for RX buffers 2299 */ 2300 error = bus_dma_tag_create(rxr->sc->parent_tag, 2301 1, 0, /* alignment, bounds */ 2302 BUS_SPACE_MAXADDR, /* lowaddr */ 2303 BUS_SPACE_MAXADDR, /* highaddr */ 2304 NULL, NULL, /* filter, filterarg */ 2305 MCLBYTES, /* maxsize */ 2306 1, /* nsegments */ 2307 MCLBYTES, /* maxsegsize */ 2308 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2309 &rxr->rx_tag); 2310 if (error) { 2311 device_printf(rxr->sc->dev, 2312 "Unable to create RX payload DMA tag\n"); 2313 kfree(rxr->rx_buf, M_DEVBUF); 2314 rxr->rx_buf = NULL; 2315 return error; 2316 } 2317 2318 /* 2319 * Create spare DMA map for RX buffers 2320 */ 2321 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2322 &rxr->rx_sparemap); 2323 if (error) { 2324 device_printf(rxr->sc->dev, 2325 "Unable to create spare RX DMA maps\n"); 2326 bus_dma_tag_destroy(rxr->rx_tag); 2327 kfree(rxr->rx_buf, M_DEVBUF); 2328 rxr->rx_buf = NULL; 2329 return error; 2330 } 2331 2332 /* 2333 * Create DMA maps for RX buffers 2334 */ 2335 for (i = 0; i < rxr->num_rx_desc; i++) { 2336 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2337 2338 error = bus_dmamap_create(rxr->rx_tag, 2339 BUS_DMA_WAITOK, &rxbuf->map); 2340 if (error) { 2341 device_printf(rxr->sc->dev, 2342 "Unable to create RX DMA maps\n"); 2343 igb_destroy_rx_ring(rxr, i); 2344 return error; 2345 } 2346 } 2347 2348 /* 2349 * Initialize various watermark 2350 */ 2351 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2352 2353 return 0; 2354 } 2355 2356 static void 2357 igb_free_rx_ring(struct igb_rx_ring *rxr) 2358 { 2359 int i; 2360 2361 for (i = 0; i < rxr->num_rx_desc; ++i) { 2362 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2363 2364 if (rxbuf->m_head != NULL) { 2365 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2366 m_freem(rxbuf->m_head); 2367 rxbuf->m_head = NULL; 2368 } 2369 } 2370 2371 if (rxr->fmp != NULL) 2372 m_freem(rxr->fmp); 2373 rxr->fmp = NULL; 2374 rxr->lmp = NULL; 2375 } 2376 2377 static void 2378 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2379 { 2380 int i; 2381 2382 if (rxr->rxdma.dma_vaddr != NULL) { 2383 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2384 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2385 rxr->rxdma.dma_map); 2386 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2387 rxr->rxdma.dma_vaddr = NULL; 2388 } 2389 2390 if (rxr->rx_buf == NULL) 2391 return; 2392 2393 for (i = 0; i < ndesc; ++i) { 2394 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2395 2396 KKASSERT(rxbuf->m_head == NULL); 2397 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2398 } 2399 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2400 bus_dma_tag_destroy(rxr->rx_tag); 2401 2402 kfree(rxr->rx_buf, M_DEVBUF); 2403 rxr->rx_buf = NULL; 2404 } 2405 2406 static void 2407 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2408 { 2409 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2410 rxd->wb.upper.status_error = 0; 2411 } 2412 2413 static int 2414 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2415 { 2416 struct mbuf *m; 2417 bus_dma_segment_t seg; 2418 bus_dmamap_t map; 2419 struct igb_rx_buf *rxbuf; 2420 int error, nseg; 2421 2422 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2423 if (m == NULL) { 2424 if (wait) { 2425 if_printf(&rxr->sc->arpcom.ac_if, 2426 "Unable to allocate RX mbuf\n"); 2427 } 2428 return ENOBUFS; 2429 } 2430 m->m_len = m->m_pkthdr.len = MCLBYTES; 2431 2432 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2433 m_adj(m, ETHER_ALIGN); 2434 2435 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2436 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2437 if (error) { 2438 m_freem(m); 2439 if (wait) { 2440 if_printf(&rxr->sc->arpcom.ac_if, 2441 "Unable to load RX mbuf\n"); 2442 } 2443 return error; 2444 } 2445 2446 rxbuf = &rxr->rx_buf[i]; 2447 if (rxbuf->m_head != NULL) 2448 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2449 2450 map = rxbuf->map; 2451 rxbuf->map = rxr->rx_sparemap; 2452 rxr->rx_sparemap = map; 2453 2454 rxbuf->m_head = m; 2455 rxbuf->paddr = seg.ds_addr; 2456 2457 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2458 return 0; 2459 } 2460 2461 static int 2462 igb_init_rx_ring(struct igb_rx_ring *rxr) 2463 { 2464 int i; 2465 2466 /* Clear the ring contents */ 2467 bzero(rxr->rx_base, 2468 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2469 2470 /* Now replenish the ring mbufs */ 2471 for (i = 0; i < rxr->num_rx_desc; ++i) { 2472 int error; 2473 2474 error = igb_newbuf(rxr, i, TRUE); 2475 if (error) 2476 return error; 2477 } 2478 2479 /* Setup our descriptor indices */ 2480 rxr->next_to_check = 0; 2481 2482 rxr->fmp = NULL; 2483 rxr->lmp = NULL; 2484 rxr->discard = FALSE; 2485 2486 return 0; 2487 } 2488 2489 static void 2490 igb_init_rx_unit(struct igb_softc *sc, boolean_t polling) 2491 { 2492 struct ifnet *ifp = &sc->arpcom.ac_if; 2493 struct e1000_hw *hw = &sc->hw; 2494 uint32_t rctl, rxcsum, srrctl = 0; 2495 int i; 2496 2497 /* 2498 * Make sure receives are disabled while setting 2499 * up the descriptor ring 2500 */ 2501 rctl = E1000_READ_REG(hw, E1000_RCTL); 2502 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2503 2504 #if 0 2505 /* 2506 ** Set up for header split 2507 */ 2508 if (igb_header_split) { 2509 /* Use a standard mbuf for the header */ 2510 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2511 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2512 } else 2513 #endif 2514 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2515 2516 /* 2517 ** Set up for jumbo frames 2518 */ 2519 if (ifp->if_mtu > ETHERMTU) { 2520 rctl |= E1000_RCTL_LPE; 2521 #if 0 2522 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2523 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2524 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2525 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2526 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2527 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2528 } 2529 /* Set maximum packet len */ 2530 psize = adapter->max_frame_size; 2531 /* are we on a vlan? */ 2532 if (adapter->ifp->if_vlantrunk != NULL) 2533 psize += VLAN_TAG_SIZE; 2534 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2535 #else 2536 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2537 rctl |= E1000_RCTL_SZ_2048; 2538 #endif 2539 } else { 2540 rctl &= ~E1000_RCTL_LPE; 2541 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2542 rctl |= E1000_RCTL_SZ_2048; 2543 } 2544 2545 /* Setup the Base and Length of the Rx Descriptor Rings */ 2546 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2547 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2548 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2549 uint32_t rxdctl; 2550 2551 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2552 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2553 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2554 (uint32_t)(bus_addr >> 32)); 2555 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2556 (uint32_t)bus_addr); 2557 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2558 /* Enable this Queue */ 2559 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2560 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2561 rxdctl &= 0xFFF00000; 2562 rxdctl |= IGB_RX_PTHRESH; 2563 rxdctl |= IGB_RX_HTHRESH << 8; 2564 /* 2565 * Don't set WTHRESH to a value above 1 on 82576, see: 2566 * 82576 specification update errata #26 2567 */ 2568 rxdctl |= IGB_RX_WTHRESH << 16; 2569 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2570 } 2571 2572 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2573 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2574 2575 /* 2576 * Receive Checksum Offload for TCP and UDP 2577 * 2578 * Checksum offloading is also enabled if multiple receive 2579 * queue is to be supported, since we need it to figure out 2580 * fragments. 2581 */ 2582 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2583 /* 2584 * NOTE: 2585 * PCSD must be enabled to enable multiple 2586 * receive queues. 2587 */ 2588 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2589 E1000_RXCSUM_PCSD; 2590 } else { 2591 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2592 E1000_RXCSUM_PCSD); 2593 } 2594 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2595 2596 if (sc->rx_ring_inuse > 1) { 2597 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2598 const struct if_ringmap *rm; 2599 uint32_t reta_shift; 2600 int j, r; 2601 2602 /* 2603 * NOTE: 2604 * When we reach here, RSS has already been disabled 2605 * in igb_stop(), so we could safely configure RSS key 2606 * and redirect table. 2607 */ 2608 2609 /* 2610 * Configure RSS key 2611 */ 2612 toeplitz_get_key(key, sizeof(key)); 2613 for (i = 0; i < IGB_NRSSRK; ++i) { 2614 uint32_t rssrk; 2615 2616 rssrk = IGB_RSSRK_VAL(key, i); 2617 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2618 2619 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2620 } 2621 2622 /* 2623 * Configure RSS redirect table 2624 */ 2625 if (polling) 2626 rm = sc->rx_rmap; 2627 else 2628 rm = sc->rx_rmap_intr; 2629 if_ringmap_rdrtable(rm, sc->rdr_table, IGB_RDRTABLE_SIZE); 2630 2631 reta_shift = IGB_RETA_SHIFT; 2632 if (hw->mac.type == e1000_82575) 2633 reta_shift = IGB_RETA_SHIFT_82575; 2634 2635 r = 0; 2636 for (j = 0; j < IGB_NRETA; ++j) { 2637 uint32_t reta = 0; 2638 2639 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2640 uint32_t q; 2641 2642 q = sc->rdr_table[r] << reta_shift; 2643 reta |= q << (8 * i); 2644 ++r; 2645 } 2646 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2647 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2648 } 2649 2650 /* 2651 * Enable multiple receive queues. 2652 * Enable IPv4 RSS standard hash functions. 2653 * Disable RSS interrupt on 82575 2654 */ 2655 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2656 E1000_MRQC_ENABLE_RSS_4Q | 2657 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2658 E1000_MRQC_RSS_FIELD_IPV4); 2659 } 2660 2661 /* Setup the Receive Control Register */ 2662 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2663 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2664 E1000_RCTL_RDMTS_HALF | 2665 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2666 /* Strip CRC bytes. */ 2667 rctl |= E1000_RCTL_SECRC; 2668 /* Make sure VLAN Filters are off */ 2669 rctl &= ~E1000_RCTL_VFE; 2670 /* Don't store bad packets */ 2671 rctl &= ~E1000_RCTL_SBP; 2672 2673 /* Enable Receives */ 2674 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2675 2676 /* 2677 * Setup the HW Rx Head and Tail Descriptor Pointers 2678 * - needs to be after enable 2679 */ 2680 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2681 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2682 2683 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2684 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2685 } 2686 } 2687 2688 static void 2689 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2690 { 2691 if (--i < 0) 2692 i = rxr->num_rx_desc - 1; 2693 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2694 } 2695 2696 static void 2697 igb_rxeof(struct igb_rx_ring *rxr, int count) 2698 { 2699 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2700 union e1000_adv_rx_desc *cur; 2701 uint32_t staterr; 2702 int i, ncoll = 0, cpuid = mycpuid; 2703 2704 i = rxr->next_to_check; 2705 cur = &rxr->rx_base[i]; 2706 staterr = le32toh(cur->wb.upper.status_error); 2707 2708 if ((staterr & E1000_RXD_STAT_DD) == 0) 2709 return; 2710 2711 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2712 struct pktinfo *pi = NULL, pi0; 2713 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2714 struct mbuf *m = NULL; 2715 boolean_t eop; 2716 2717 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2718 if (eop) 2719 --count; 2720 2721 ++ncoll; 2722 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2723 !rxr->discard) { 2724 struct mbuf *mp = rxbuf->m_head; 2725 uint32_t hash, hashtype; 2726 uint16_t vlan; 2727 int len; 2728 2729 len = le16toh(cur->wb.upper.length); 2730 if ((rxr->sc->hw.mac.type == e1000_i350 || 2731 rxr->sc->hw.mac.type == e1000_i354) && 2732 (staterr & E1000_RXDEXT_STATERR_LB)) 2733 vlan = be16toh(cur->wb.upper.vlan); 2734 else 2735 vlan = le16toh(cur->wb.upper.vlan); 2736 2737 hash = le32toh(cur->wb.lower.hi_dword.rss); 2738 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2739 E1000_RXDADV_RSSTYPE_MASK; 2740 2741 IGB_RSS_DPRINTF(rxr->sc, 10, 2742 "ring%d, hash 0x%08x, hashtype %u\n", 2743 rxr->me, hash, hashtype); 2744 2745 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2746 BUS_DMASYNC_POSTREAD); 2747 2748 if (igb_newbuf(rxr, i, FALSE) != 0) { 2749 IFNET_STAT_INC(ifp, iqdrops, 1); 2750 goto discard; 2751 } 2752 2753 mp->m_len = len; 2754 if (rxr->fmp == NULL) { 2755 mp->m_pkthdr.len = len; 2756 rxr->fmp = mp; 2757 rxr->lmp = mp; 2758 } else { 2759 rxr->lmp->m_next = mp; 2760 rxr->lmp = rxr->lmp->m_next; 2761 rxr->fmp->m_pkthdr.len += len; 2762 } 2763 2764 if (eop) { 2765 m = rxr->fmp; 2766 rxr->fmp = NULL; 2767 rxr->lmp = NULL; 2768 2769 m->m_pkthdr.rcvif = ifp; 2770 IFNET_STAT_INC(ifp, ipackets, 1); 2771 2772 if (ifp->if_capenable & IFCAP_RXCSUM) 2773 igb_rxcsum(staterr, m); 2774 2775 if (staterr & E1000_RXD_STAT_VP) { 2776 m->m_pkthdr.ether_vlantag = vlan; 2777 m->m_flags |= M_VLANTAG; 2778 } 2779 2780 if (ifp->if_capenable & IFCAP_RSS) { 2781 pi = igb_rssinfo(m, &pi0, 2782 hash, hashtype, staterr); 2783 } 2784 #ifdef IGB_RSS_DEBUG 2785 rxr->rx_packets++; 2786 #endif 2787 } 2788 } else { 2789 IFNET_STAT_INC(ifp, ierrors, 1); 2790 discard: 2791 igb_setup_rxdesc(cur, rxbuf); 2792 if (!eop) 2793 rxr->discard = TRUE; 2794 else 2795 rxr->discard = FALSE; 2796 if (rxr->fmp != NULL) { 2797 m_freem(rxr->fmp); 2798 rxr->fmp = NULL; 2799 rxr->lmp = NULL; 2800 } 2801 m = NULL; 2802 } 2803 2804 if (m != NULL) 2805 ifp->if_input(ifp, m, pi, cpuid); 2806 2807 /* Advance our pointers to the next descriptor. */ 2808 if (++i == rxr->num_rx_desc) 2809 i = 0; 2810 2811 if (ncoll >= rxr->wreg_nsegs) { 2812 igb_rx_refresh(rxr, i); 2813 ncoll = 0; 2814 } 2815 2816 cur = &rxr->rx_base[i]; 2817 staterr = le32toh(cur->wb.upper.status_error); 2818 } 2819 rxr->next_to_check = i; 2820 2821 if (ncoll > 0) 2822 igb_rx_refresh(rxr, i); 2823 } 2824 2825 2826 static void 2827 igb_set_vlan(struct igb_softc *sc) 2828 { 2829 struct e1000_hw *hw = &sc->hw; 2830 uint32_t reg; 2831 #if 0 2832 struct ifnet *ifp = sc->arpcom.ac_if; 2833 #endif 2834 2835 if (sc->vf_ifp) { 2836 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2837 return; 2838 } 2839 2840 reg = E1000_READ_REG(hw, E1000_CTRL); 2841 reg |= E1000_CTRL_VME; 2842 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2843 2844 #if 0 2845 /* Enable the Filter Table */ 2846 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2847 reg = E1000_READ_REG(hw, E1000_RCTL); 2848 reg &= ~E1000_RCTL_CFIEN; 2849 reg |= E1000_RCTL_VFE; 2850 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2851 } 2852 #endif 2853 2854 /* Update the frame size */ 2855 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2856 sc->max_frame_size + VLAN_TAG_SIZE); 2857 2858 #if 0 2859 /* Don't bother with table if no vlans */ 2860 if ((adapter->num_vlans == 0) || 2861 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2862 return; 2863 /* 2864 ** A soft reset zero's out the VFTA, so 2865 ** we need to repopulate it now. 2866 */ 2867 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2868 if (adapter->shadow_vfta[i] != 0) { 2869 if (adapter->vf_ifp) 2870 e1000_vfta_set_vf(hw, 2871 adapter->shadow_vfta[i], TRUE); 2872 else 2873 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2874 i, adapter->shadow_vfta[i]); 2875 } 2876 #endif 2877 } 2878 2879 static void 2880 igb_enable_intr(struct igb_softc *sc) 2881 { 2882 int i; 2883 2884 for (i = 0; i < sc->intr_cnt; ++i) 2885 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2886 2887 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2888 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2889 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2890 else 2891 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2892 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2893 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2894 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2895 } else { 2896 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2897 } 2898 E1000_WRITE_FLUSH(&sc->hw); 2899 } 2900 2901 static void 2902 igb_disable_intr(struct igb_softc *sc) 2903 { 2904 int i; 2905 2906 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2907 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2908 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2909 } 2910 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2911 E1000_WRITE_FLUSH(&sc->hw); 2912 2913 for (i = 0; i < sc->intr_cnt; ++i) 2914 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2915 } 2916 2917 /* 2918 * Bit of a misnomer, what this really means is 2919 * to enable OS management of the system... aka 2920 * to disable special hardware management features 2921 */ 2922 static void 2923 igb_get_mgmt(struct igb_softc *sc) 2924 { 2925 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2926 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2927 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2928 2929 /* disable hardware interception of ARP */ 2930 manc &= ~E1000_MANC_ARP_EN; 2931 2932 /* enable receiving management packets to the host */ 2933 manc |= E1000_MANC_EN_MNG2HOST; 2934 manc2h |= 1 << 5; /* Mng Port 623 */ 2935 manc2h |= 1 << 6; /* Mng Port 664 */ 2936 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2937 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2938 } 2939 } 2940 2941 /* 2942 * Give control back to hardware management controller 2943 * if there is one. 2944 */ 2945 static void 2946 igb_rel_mgmt(struct igb_softc *sc) 2947 { 2948 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2949 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2950 2951 /* Re-enable hardware interception of ARP */ 2952 manc |= E1000_MANC_ARP_EN; 2953 manc &= ~E1000_MANC_EN_MNG2HOST; 2954 2955 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2956 } 2957 } 2958 2959 /* 2960 * Sets CTRL_EXT:DRV_LOAD bit. 2961 * 2962 * For ASF and Pass Through versions of f/w this means that 2963 * the driver is loaded. 2964 */ 2965 static void 2966 igb_get_hw_control(struct igb_softc *sc) 2967 { 2968 uint32_t ctrl_ext; 2969 2970 if (sc->vf_ifp) 2971 return; 2972 2973 /* Let firmware know the driver has taken over */ 2974 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2975 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2976 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2977 } 2978 2979 /* 2980 * Resets CTRL_EXT:DRV_LOAD bit. 2981 * 2982 * For ASF and Pass Through versions of f/w this means that the 2983 * driver is no longer loaded. 2984 */ 2985 static void 2986 igb_rel_hw_control(struct igb_softc *sc) 2987 { 2988 uint32_t ctrl_ext; 2989 2990 if (sc->vf_ifp) 2991 return; 2992 2993 /* Let firmware taken over control of h/w */ 2994 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2995 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2996 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2997 } 2998 2999 static boolean_t 3000 igb_is_valid_ether_addr(const uint8_t *addr) 3001 { 3002 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3003 3004 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3005 return FALSE; 3006 return TRUE; 3007 } 3008 3009 /* 3010 * Enable PCI Wake On Lan capability 3011 */ 3012 static void 3013 igb_enable_wol(device_t dev) 3014 { 3015 uint16_t cap, status; 3016 uint8_t id; 3017 3018 /* First find the capabilities pointer*/ 3019 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3020 3021 /* Read the PM Capabilities */ 3022 id = pci_read_config(dev, cap, 1); 3023 if (id != PCIY_PMG) /* Something wrong */ 3024 return; 3025 3026 /* 3027 * OK, we have the power capabilities, 3028 * so now get the status register 3029 */ 3030 cap += PCIR_POWER_STATUS; 3031 status = pci_read_config(dev, cap, 2); 3032 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3033 pci_write_config(dev, cap, status, 2); 3034 } 3035 3036 static void 3037 igb_update_stats_counters(struct igb_softc *sc) 3038 { 3039 struct e1000_hw *hw = &sc->hw; 3040 struct e1000_hw_stats *stats; 3041 struct ifnet *ifp = &sc->arpcom.ac_if; 3042 3043 /* 3044 * The virtual function adapter has only a 3045 * small controlled set of stats, do only 3046 * those and return. 3047 */ 3048 if (sc->vf_ifp) { 3049 igb_update_vf_stats_counters(sc); 3050 return; 3051 } 3052 stats = sc->stats; 3053 3054 if (sc->hw.phy.media_type == e1000_media_type_copper || 3055 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3056 stats->symerrs += 3057 E1000_READ_REG(hw,E1000_SYMERRS); 3058 stats->sec += E1000_READ_REG(hw, E1000_SEC); 3059 } 3060 3061 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 3062 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 3063 stats->scc += E1000_READ_REG(hw, E1000_SCC); 3064 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 3065 3066 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 3067 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 3068 stats->colc += E1000_READ_REG(hw, E1000_COLC); 3069 stats->dc += E1000_READ_REG(hw, E1000_DC); 3070 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 3071 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 3072 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 3073 3074 /* 3075 * For watchdog management we need to know if we have been 3076 * paused during the last interval, so capture that here. 3077 */ 3078 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 3079 stats->xoffrxc += sc->pause_frames; 3080 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 3081 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 3082 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 3083 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 3084 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 3085 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 3086 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 3087 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 3088 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 3089 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 3090 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 3091 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 3092 3093 /* For the 64-bit byte counters the low dword must be read first. */ 3094 /* Both registers clear on the read of the high dword */ 3095 3096 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 3097 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 3098 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 3099 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 3100 3101 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 3102 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 3103 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 3104 stats->roc += E1000_READ_REG(hw, E1000_ROC); 3105 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 3106 3107 stats->tor += E1000_READ_REG(hw, E1000_TORH); 3108 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 3109 3110 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 3111 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 3112 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 3113 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 3114 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 3115 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3116 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3117 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3118 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3119 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3120 3121 /* Interrupt Counts */ 3122 3123 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3124 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3125 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3126 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3127 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3128 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3129 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3130 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3131 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3132 3133 /* Host to Card Statistics */ 3134 3135 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3136 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3137 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3138 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3139 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3140 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3141 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3142 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3143 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3144 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3145 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3146 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3147 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3148 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3149 3150 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3151 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3152 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3153 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3154 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3155 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3156 3157 IFNET_STAT_SET(ifp, collisions, stats->colc); 3158 3159 /* Rx Errors */ 3160 IFNET_STAT_SET(ifp, ierrors, 3161 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3162 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3163 3164 /* Tx Errors */ 3165 IFNET_STAT_SET(ifp, oerrors, 3166 stats->ecol + stats->latecol + sc->watchdog_events); 3167 3168 /* Driver specific counters */ 3169 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3170 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3171 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3172 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3173 sc->packet_buf_alloc_tx = 3174 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3175 sc->packet_buf_alloc_rx = 3176 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3177 } 3178 3179 static void 3180 igb_vf_init_stats(struct igb_softc *sc) 3181 { 3182 struct e1000_hw *hw = &sc->hw; 3183 struct e1000_vf_stats *stats; 3184 3185 stats = sc->stats; 3186 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3187 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3188 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3189 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3190 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3191 } 3192 3193 static void 3194 igb_update_vf_stats_counters(struct igb_softc *sc) 3195 { 3196 struct e1000_hw *hw = &sc->hw; 3197 struct e1000_vf_stats *stats; 3198 3199 if (sc->link_speed == 0) 3200 return; 3201 3202 stats = sc->stats; 3203 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3204 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3205 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3206 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3207 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3208 } 3209 3210 #ifdef IFPOLL_ENABLE 3211 3212 static void 3213 igb_npoll_status(struct ifnet *ifp) 3214 { 3215 struct igb_softc *sc = ifp->if_softc; 3216 uint32_t reg_icr; 3217 3218 ASSERT_SERIALIZED(&sc->main_serialize); 3219 3220 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3221 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3222 sc->hw.mac.get_link_status = 1; 3223 igb_update_link_status(sc); 3224 } 3225 } 3226 3227 static void 3228 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3229 { 3230 struct igb_tx_ring *txr = arg; 3231 3232 ASSERT_SERIALIZED(&txr->tx_serialize); 3233 igb_tx_intr(txr, *(txr->tx_hdr)); 3234 igb_try_txgc(txr, 1); 3235 } 3236 3237 static void 3238 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3239 { 3240 struct igb_rx_ring *rxr = arg; 3241 3242 ASSERT_SERIALIZED(&rxr->rx_serialize); 3243 3244 igb_rxeof(rxr, cycle); 3245 } 3246 3247 static void 3248 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3249 { 3250 struct igb_softc *sc = ifp->if_softc; 3251 int i, txr_cnt, rxr_cnt; 3252 3253 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3254 3255 if (info) { 3256 int cpu; 3257 3258 info->ifpi_status.status_func = igb_npoll_status; 3259 info->ifpi_status.serializer = &sc->main_serialize; 3260 3261 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3262 for (i = 0; i < txr_cnt; ++i) { 3263 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3264 3265 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 3266 KKASSERT(cpu < netisr_ncpus); 3267 info->ifpi_tx[cpu].poll_func = igb_npoll_tx; 3268 info->ifpi_tx[cpu].arg = txr; 3269 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; 3270 ifsq_set_cpuid(txr->ifsq, cpu); 3271 } 3272 3273 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3274 for (i = 0; i < rxr_cnt; ++i) { 3275 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3276 3277 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 3278 KKASSERT(cpu < netisr_ncpus); 3279 info->ifpi_rx[cpu].poll_func = igb_npoll_rx; 3280 info->ifpi_rx[cpu].arg = rxr; 3281 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize; 3282 } 3283 } else { 3284 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3285 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3286 3287 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3288 } 3289 } 3290 if (ifp->if_flags & IFF_RUNNING) 3291 igb_init(sc); 3292 } 3293 3294 #endif /* IFPOLL_ENABLE */ 3295 3296 static void 3297 igb_intr(void *xsc) 3298 { 3299 struct igb_softc *sc = xsc; 3300 struct ifnet *ifp = &sc->arpcom.ac_if; 3301 uint32_t eicr; 3302 3303 ASSERT_SERIALIZED(&sc->main_serialize); 3304 3305 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3306 3307 if (eicr == 0) 3308 return; 3309 3310 if (ifp->if_flags & IFF_RUNNING) { 3311 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3312 int i; 3313 3314 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3315 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3316 3317 if (eicr & rxr->rx_intr_mask) { 3318 lwkt_serialize_enter(&rxr->rx_serialize); 3319 igb_rxeof(rxr, -1); 3320 lwkt_serialize_exit(&rxr->rx_serialize); 3321 } 3322 } 3323 3324 if (eicr & txr->tx_intr_mask) { 3325 lwkt_serialize_enter(&txr->tx_serialize); 3326 igb_tx_intr(txr, *(txr->tx_hdr)); 3327 lwkt_serialize_exit(&txr->tx_serialize); 3328 } 3329 } 3330 3331 if (eicr & E1000_EICR_OTHER) { 3332 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3333 3334 /* Link status change */ 3335 if (icr & E1000_ICR_LSC) { 3336 sc->hw.mac.get_link_status = 1; 3337 igb_update_link_status(sc); 3338 } 3339 } 3340 3341 /* 3342 * Reading EICR has the side effect to clear interrupt mask, 3343 * so all interrupts need to be enabled here. 3344 */ 3345 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3346 } 3347 3348 static void 3349 igb_intr_shared(void *xsc) 3350 { 3351 struct igb_softc *sc = xsc; 3352 struct ifnet *ifp = &sc->arpcom.ac_if; 3353 uint32_t reg_icr; 3354 3355 ASSERT_SERIALIZED(&sc->main_serialize); 3356 3357 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3358 3359 /* Hot eject? */ 3360 if (reg_icr == 0xffffffff) 3361 return; 3362 3363 /* Definitely not our interrupt. */ 3364 if (reg_icr == 0x0) 3365 return; 3366 3367 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3368 return; 3369 3370 if (ifp->if_flags & IFF_RUNNING) { 3371 if (reg_icr & 3372 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3373 int i; 3374 3375 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3376 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3377 3378 lwkt_serialize_enter(&rxr->rx_serialize); 3379 igb_rxeof(rxr, -1); 3380 lwkt_serialize_exit(&rxr->rx_serialize); 3381 } 3382 } 3383 3384 if (reg_icr & E1000_ICR_TXDW) { 3385 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3386 3387 lwkt_serialize_enter(&txr->tx_serialize); 3388 igb_tx_intr(txr, *(txr->tx_hdr)); 3389 lwkt_serialize_exit(&txr->tx_serialize); 3390 } 3391 } 3392 3393 /* Link status change */ 3394 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3395 sc->hw.mac.get_link_status = 1; 3396 igb_update_link_status(sc); 3397 } 3398 3399 if (reg_icr & E1000_ICR_RXO) 3400 sc->rx_overruns++; 3401 } 3402 3403 static int 3404 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3405 int *segs_used, int *idx) 3406 { 3407 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3408 bus_dmamap_t map; 3409 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3410 union e1000_adv_tx_desc *txd = NULL; 3411 struct mbuf *m_head = *m_headp; 3412 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3413 int maxsegs, nsegs, i, j, error; 3414 uint32_t hdrlen = 0; 3415 3416 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3417 error = igb_tso_pullup(txr, m_headp); 3418 if (error) 3419 return error; 3420 m_head = *m_headp; 3421 } 3422 3423 /* Set basic descriptor constants */ 3424 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3425 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3426 if (m_head->m_flags & M_VLANTAG) 3427 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3428 3429 /* 3430 * Map the packet for DMA. 3431 */ 3432 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3433 tx_buf_mapped = tx_buf; 3434 map = tx_buf->map; 3435 3436 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3437 if (maxsegs > IGB_MAX_SCATTER) 3438 maxsegs = IGB_MAX_SCATTER; 3439 3440 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3441 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3442 if (error) { 3443 if (error == ENOBUFS) 3444 txr->sc->mbuf_defrag_failed++; 3445 else 3446 txr->sc->no_tx_dma_setup++; 3447 3448 m_freem(*m_headp); 3449 *m_headp = NULL; 3450 return error; 3451 } 3452 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3453 3454 m_head = *m_headp; 3455 3456 /* 3457 * Set up the TX context descriptor, if any hardware offloading is 3458 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3459 * TX descriptor. 3460 * 3461 * Unlike these chips' predecessors (em/emx), TX context descriptor 3462 * will _not_ interfere TX data fetching pipelining. 3463 */ 3464 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3465 igb_tso_ctx(txr, m_head, &hdrlen); 3466 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3467 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3468 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3469 txr->tx_nsegs++; 3470 (*segs_used)++; 3471 } else if (igb_txcsum_ctx(txr, m_head)) { 3472 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3473 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3474 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3475 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3476 txr->tx_nsegs++; 3477 (*segs_used)++; 3478 } 3479 3480 *segs_used += nsegs; 3481 txr->tx_nsegs += nsegs; 3482 if (txr->tx_nsegs >= txr->intr_nsegs) { 3483 /* 3484 * Report Status (RS) is turned on every intr_nsegs 3485 * descriptors (roughly). 3486 */ 3487 txr->tx_nsegs = 0; 3488 cmd_rs = E1000_ADVTXD_DCMD_RS; 3489 } 3490 3491 /* Calculate payload length */ 3492 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3493 << E1000_ADVTXD_PAYLEN_SHIFT); 3494 3495 /* 3496 * 82575 needs the TX context index added; the queue 3497 * index is used as TX context index here. 3498 */ 3499 if (txr->sc->hw.mac.type == e1000_82575) 3500 olinfo_status |= txr->me << 4; 3501 3502 /* Set up our transmit descriptors */ 3503 i = txr->next_avail_desc; 3504 for (j = 0; j < nsegs; j++) { 3505 bus_size_t seg_len; 3506 bus_addr_t seg_addr; 3507 3508 tx_buf = &txr->tx_buf[i]; 3509 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3510 seg_addr = segs[j].ds_addr; 3511 seg_len = segs[j].ds_len; 3512 3513 txd->read.buffer_addr = htole64(seg_addr); 3514 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3515 txd->read.olinfo_status = htole32(olinfo_status); 3516 if (++i == txr->num_tx_desc) 3517 i = 0; 3518 tx_buf->m_head = NULL; 3519 } 3520 3521 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3522 txr->next_avail_desc = i; 3523 txr->tx_avail -= nsegs; 3524 txr->tx_nmbuf++; 3525 3526 tx_buf->m_head = m_head; 3527 tx_buf_mapped->map = tx_buf->map; 3528 tx_buf->map = map; 3529 3530 /* 3531 * Last Descriptor of Packet needs End Of Packet (EOP) 3532 */ 3533 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3534 3535 /* 3536 * Defer TDT updating, until enough descrptors are setup 3537 */ 3538 *idx = i; 3539 #ifdef IGB_TSS_DEBUG 3540 ++txr->tx_packets; 3541 #endif 3542 3543 return 0; 3544 } 3545 3546 static void 3547 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3548 { 3549 struct igb_softc *sc = ifp->if_softc; 3550 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3551 struct mbuf *m_head; 3552 int idx = -1, nsegs = 0; 3553 3554 KKASSERT(txr->ifsq == ifsq); 3555 ASSERT_SERIALIZED(&txr->tx_serialize); 3556 3557 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3558 return; 3559 3560 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3561 ifsq_purge(ifsq); 3562 return; 3563 } 3564 3565 while (!ifsq_is_empty(ifsq)) { 3566 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3567 ifsq_set_oactive(ifsq); 3568 /* Set watchdog on */ 3569 txr->tx_watchdog.wd_timer = 5; 3570 break; 3571 } 3572 3573 m_head = ifsq_dequeue(ifsq); 3574 if (m_head == NULL) 3575 break; 3576 3577 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3578 IFNET_STAT_INC(ifp, oerrors, 1); 3579 continue; 3580 } 3581 3582 /* 3583 * TX interrupt are aggressively aggregated, so increasing 3584 * opackets at TX interrupt time will make the opackets 3585 * statistics vastly inaccurate; we do the opackets increment 3586 * now. 3587 */ 3588 IFNET_STAT_INC(ifp, opackets, 1); 3589 3590 if (nsegs >= txr->wreg_nsegs) { 3591 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3592 idx = -1; 3593 nsegs = 0; 3594 } 3595 3596 /* Send a copy of the frame to the BPF listener */ 3597 ETHER_BPF_MTAP(ifp, m_head); 3598 } 3599 if (idx >= 0) 3600 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3601 txr->tx_running = IGB_TX_RUNNING; 3602 } 3603 3604 static void 3605 igb_watchdog(struct ifaltq_subque *ifsq) 3606 { 3607 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3608 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3609 struct igb_softc *sc = ifp->if_softc; 3610 int i; 3611 3612 KKASSERT(txr->ifsq == ifsq); 3613 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3614 3615 /* 3616 * If flow control has paused us since last checking 3617 * it invalidates the watchdog timing, so dont run it. 3618 */ 3619 if (sc->pause_frames) { 3620 sc->pause_frames = 0; 3621 txr->tx_watchdog.wd_timer = 5; 3622 return; 3623 } 3624 3625 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3626 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3627 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3628 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3629 if_printf(ifp, "TX(%d) desc avail = %d, " 3630 "Next TX to Clean = %d\n", 3631 txr->me, txr->tx_avail, txr->next_to_clean); 3632 3633 IFNET_STAT_INC(ifp, oerrors, 1); 3634 sc->watchdog_events++; 3635 3636 igb_init(sc); 3637 for (i = 0; i < sc->tx_ring_inuse; ++i) 3638 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3639 } 3640 3641 static void 3642 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3643 { 3644 uint32_t eitr = 0; 3645 3646 if (rate > 0) { 3647 if (sc->hw.mac.type == e1000_82575) { 3648 eitr = 1000000000 / 256 / rate; 3649 /* 3650 * NOTE: 3651 * Document is wrong on the 2 bits left shift 3652 */ 3653 } else { 3654 eitr = 1000000 / rate; 3655 eitr <<= IGB_EITR_INTVL_SHIFT; 3656 } 3657 3658 if (eitr == 0) { 3659 /* Don't disable it */ 3660 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3661 } else if (eitr > IGB_EITR_INTVL_MASK) { 3662 /* Don't allow it to be too large */ 3663 eitr = IGB_EITR_INTVL_MASK; 3664 } 3665 } 3666 if (sc->hw.mac.type == e1000_82575) 3667 eitr |= eitr << 16; 3668 else 3669 eitr |= E1000_EITR_CNT_IGNR; 3670 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3671 } 3672 3673 static void 3674 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3675 const char *name, const char *desc) 3676 { 3677 int i; 3678 3679 for (i = 0; i < sc->intr_cnt; ++i) { 3680 if (sc->intr_data[i].intr_use == use) { 3681 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3682 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3683 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3684 sc, use, igb_sysctl_intr_rate, "I", desc); 3685 break; 3686 } 3687 } 3688 } 3689 3690 static int 3691 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3692 { 3693 struct igb_softc *sc = (void *)arg1; 3694 int use = arg2; 3695 struct ifnet *ifp = &sc->arpcom.ac_if; 3696 int error, rate, i; 3697 struct igb_intr_data *intr; 3698 3699 rate = 0; 3700 for (i = 0; i < sc->intr_cnt; ++i) { 3701 intr = &sc->intr_data[i]; 3702 if (intr->intr_use == use) { 3703 rate = intr->intr_rate; 3704 break; 3705 } 3706 } 3707 3708 error = sysctl_handle_int(oidp, &rate, 0, req); 3709 if (error || req->newptr == NULL) 3710 return error; 3711 if (rate <= 0) 3712 return EINVAL; 3713 3714 ifnet_serialize_all(ifp); 3715 3716 for (i = 0; i < sc->intr_cnt; ++i) { 3717 intr = &sc->intr_data[i]; 3718 if (intr->intr_use == use && intr->intr_rate != rate) { 3719 intr->intr_rate = rate; 3720 if (ifp->if_flags & IFF_RUNNING) 3721 igb_set_eitr(sc, i, rate); 3722 } 3723 } 3724 3725 ifnet_deserialize_all(ifp); 3726 3727 return error; 3728 } 3729 3730 static int 3731 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3732 { 3733 struct igb_softc *sc = (void *)arg1; 3734 struct ifnet *ifp = &sc->arpcom.ac_if; 3735 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3736 int error, nsegs; 3737 3738 nsegs = txr->intr_nsegs; 3739 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3740 if (error || req->newptr == NULL) 3741 return error; 3742 if (nsegs <= 0) 3743 return EINVAL; 3744 3745 ifnet_serialize_all(ifp); 3746 3747 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3748 error = EINVAL; 3749 } else { 3750 int i; 3751 3752 error = 0; 3753 for (i = 0; i < sc->tx_ring_cnt; ++i) 3754 sc->tx_rings[i].intr_nsegs = nsegs; 3755 } 3756 3757 ifnet_deserialize_all(ifp); 3758 3759 return error; 3760 } 3761 3762 static int 3763 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3764 { 3765 struct igb_softc *sc = (void *)arg1; 3766 struct ifnet *ifp = &sc->arpcom.ac_if; 3767 int error, nsegs, i; 3768 3769 nsegs = sc->rx_rings[0].wreg_nsegs; 3770 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3771 if (error || req->newptr == NULL) 3772 return error; 3773 3774 ifnet_serialize_all(ifp); 3775 for (i = 0; i < sc->rx_ring_cnt; ++i) 3776 sc->rx_rings[i].wreg_nsegs = nsegs; 3777 ifnet_deserialize_all(ifp); 3778 3779 return 0; 3780 } 3781 3782 static int 3783 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3784 { 3785 struct igb_softc *sc = (void *)arg1; 3786 struct ifnet *ifp = &sc->arpcom.ac_if; 3787 int error, nsegs, i; 3788 3789 nsegs = sc->tx_rings[0].wreg_nsegs; 3790 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3791 if (error || req->newptr == NULL) 3792 return error; 3793 3794 ifnet_serialize_all(ifp); 3795 for (i = 0; i < sc->tx_ring_cnt; ++i) 3796 sc->tx_rings[i].wreg_nsegs = nsegs; 3797 ifnet_deserialize_all(ifp); 3798 3799 return 0; 3800 } 3801 3802 static void 3803 igb_init_intr(struct igb_softc *sc) 3804 { 3805 int i; 3806 3807 igb_set_intr_mask(sc); 3808 3809 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3810 igb_init_unshared_intr(sc); 3811 3812 for (i = 0; i < sc->intr_cnt; ++i) 3813 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3814 } 3815 3816 static void 3817 igb_init_unshared_intr(struct igb_softc *sc) 3818 { 3819 struct e1000_hw *hw = &sc->hw; 3820 const struct igb_rx_ring *rxr; 3821 const struct igb_tx_ring *txr; 3822 uint32_t ivar, index; 3823 int i; 3824 3825 /* 3826 * Enable extended mode 3827 */ 3828 if (sc->hw.mac.type != e1000_82575) { 3829 uint32_t gpie; 3830 int ivar_max; 3831 3832 gpie = E1000_GPIE_NSICR; 3833 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3834 gpie |= E1000_GPIE_MSIX_MODE | 3835 E1000_GPIE_EIAME | 3836 E1000_GPIE_PBA; 3837 } 3838 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3839 3840 /* 3841 * Clear IVARs 3842 */ 3843 switch (sc->hw.mac.type) { 3844 case e1000_82576: 3845 ivar_max = IGB_MAX_IVAR_82576; 3846 break; 3847 3848 case e1000_82580: 3849 ivar_max = IGB_MAX_IVAR_82580; 3850 break; 3851 3852 case e1000_i350: 3853 ivar_max = IGB_MAX_IVAR_I350; 3854 break; 3855 3856 case e1000_i354: 3857 ivar_max = IGB_MAX_IVAR_I354; 3858 break; 3859 3860 case e1000_vfadapt: 3861 case e1000_vfadapt_i350: 3862 ivar_max = IGB_MAX_IVAR_VF; 3863 break; 3864 3865 case e1000_i210: 3866 ivar_max = IGB_MAX_IVAR_I210; 3867 break; 3868 3869 case e1000_i211: 3870 ivar_max = IGB_MAX_IVAR_I211; 3871 break; 3872 3873 default: 3874 panic("unknown mac type %d\n", sc->hw.mac.type); 3875 } 3876 for (i = 0; i < ivar_max; ++i) 3877 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3878 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3879 } else { 3880 uint32_t tmp; 3881 3882 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3883 ("82575 w/ MSI-X")); 3884 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3885 tmp |= E1000_CTRL_EXT_IRCA; 3886 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3887 } 3888 3889 /* 3890 * Map TX/RX interrupts to EICR 3891 */ 3892 switch (sc->hw.mac.type) { 3893 case e1000_82580: 3894 case e1000_i350: 3895 case e1000_i354: 3896 case e1000_vfadapt: 3897 case e1000_vfadapt_i350: 3898 case e1000_i210: 3899 case e1000_i211: 3900 /* RX entries */ 3901 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3902 rxr = &sc->rx_rings[i]; 3903 3904 index = i >> 1; 3905 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3906 3907 if (i & 1) { 3908 ivar &= 0xff00ffff; 3909 ivar |= 3910 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3911 } else { 3912 ivar &= 0xffffff00; 3913 ivar |= 3914 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3915 } 3916 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3917 } 3918 /* TX entries */ 3919 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3920 txr = &sc->tx_rings[i]; 3921 3922 index = i >> 1; 3923 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3924 3925 if (i & 1) { 3926 ivar &= 0x00ffffff; 3927 ivar |= 3928 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3929 } else { 3930 ivar &= 0xffff00ff; 3931 ivar |= 3932 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3933 } 3934 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3935 } 3936 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3937 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3938 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3939 } 3940 break; 3941 3942 case e1000_82576: 3943 /* RX entries */ 3944 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3945 rxr = &sc->rx_rings[i]; 3946 3947 index = i & 0x7; /* Each IVAR has two entries */ 3948 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3949 3950 if (i < 8) { 3951 ivar &= 0xffffff00; 3952 ivar |= 3953 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3954 } else { 3955 ivar &= 0xff00ffff; 3956 ivar |= 3957 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3958 } 3959 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3960 } 3961 /* TX entries */ 3962 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3963 txr = &sc->tx_rings[i]; 3964 3965 index = i & 0x7; /* Each IVAR has two entries */ 3966 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3967 3968 if (i < 8) { 3969 ivar &= 0xffff00ff; 3970 ivar |= 3971 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3972 } else { 3973 ivar &= 0x00ffffff; 3974 ivar |= 3975 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3976 } 3977 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3978 } 3979 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3980 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3981 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3982 } 3983 break; 3984 3985 case e1000_82575: 3986 /* 3987 * Enable necessary interrupt bits. 3988 * 3989 * The name of the register is confusing; in addition to 3990 * configuring the first vector of MSI-X, it also configures 3991 * which bits of EICR could be set by the hardware even when 3992 * MSI or line interrupt is used; it thus controls interrupt 3993 * generation. It MUST be configured explicitly; the default 3994 * value mentioned in the datasheet is wrong: RX queue0 and 3995 * TX queue0 are NOT enabled by default. 3996 */ 3997 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3998 break; 3999 4000 default: 4001 panic("unknown mac type %d\n", sc->hw.mac.type); 4002 } 4003 } 4004 4005 static int 4006 igb_setup_intr(struct igb_softc *sc) 4007 { 4008 int i; 4009 4010 for (i = 0; i < sc->intr_cnt; ++i) { 4011 struct igb_intr_data *intr = &sc->intr_data[i]; 4012 int error; 4013 4014 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 4015 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 4016 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 4017 if (error) { 4018 device_printf(sc->dev, "can't setup %dth intr\n", i); 4019 igb_teardown_intr(sc, i); 4020 return error; 4021 } 4022 } 4023 return 0; 4024 } 4025 4026 static void 4027 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 4028 { 4029 if (txr->sc->hw.mac.type == e1000_82575) { 4030 txr->tx_intr_vec = 0; /* unused */ 4031 switch (txr->me) { 4032 case 0: 4033 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 4034 break; 4035 case 1: 4036 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 4037 break; 4038 case 2: 4039 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4040 break; 4041 case 3: 4042 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4043 break; 4044 default: 4045 panic("unsupported # of TX ring, %d\n", txr->me); 4046 } 4047 } else { 4048 int intr_vec = *intr_vec0; 4049 4050 txr->tx_intr_vec = intr_vec % intr_vecmax; 4051 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4052 4053 *intr_vec0 = intr_vec + 1; 4054 } 4055 } 4056 4057 static void 4058 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 4059 { 4060 if (rxr->sc->hw.mac.type == e1000_82575) { 4061 rxr->rx_intr_vec = 0; /* unused */ 4062 switch (rxr->me) { 4063 case 0: 4064 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4065 break; 4066 case 1: 4067 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4068 break; 4069 case 2: 4070 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4071 break; 4072 case 3: 4073 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4074 break; 4075 default: 4076 panic("unsupported # of RX ring, %d\n", rxr->me); 4077 } 4078 } else { 4079 int intr_vec = *intr_vec0; 4080 4081 rxr->rx_intr_vec = intr_vec % intr_vecmax; 4082 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4083 4084 *intr_vec0 = intr_vec + 1; 4085 } 4086 } 4087 4088 static void 4089 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4090 { 4091 struct igb_softc *sc = ifp->if_softc; 4092 4093 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4094 } 4095 4096 static void 4097 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4098 { 4099 struct igb_softc *sc = ifp->if_softc; 4100 4101 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4102 } 4103 4104 static int 4105 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4106 { 4107 struct igb_softc *sc = ifp->if_softc; 4108 4109 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4110 slz); 4111 } 4112 4113 #ifdef INVARIANTS 4114 4115 static void 4116 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4117 boolean_t serialized) 4118 { 4119 struct igb_softc *sc = ifp->if_softc; 4120 4121 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4122 slz, serialized); 4123 } 4124 4125 #endif /* INVARIANTS */ 4126 4127 static void 4128 igb_set_intr_mask(struct igb_softc *sc) 4129 { 4130 int i; 4131 4132 sc->intr_mask = sc->sts_intr_mask; 4133 for (i = 0; i < sc->rx_ring_inuse; ++i) 4134 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4135 for (i = 0; i < sc->tx_ring_inuse; ++i) 4136 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4137 if (bootverbose) { 4138 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4139 sc->intr_mask); 4140 } 4141 } 4142 4143 static int 4144 igb_alloc_intr(struct igb_softc *sc) 4145 { 4146 struct igb_tx_ring *txr; 4147 struct igb_intr_data *intr; 4148 int i, intr_vec, intr_vecmax; 4149 u_int intr_flags; 4150 4151 igb_alloc_msix(sc); 4152 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4153 igb_set_ring_inuse(sc, FALSE); 4154 goto done; 4155 } 4156 4157 /* 4158 * Reset some settings changed by igb_alloc_msix(). 4159 */ 4160 if (sc->rx_rmap_intr != NULL) { 4161 if_ringmap_free(sc->rx_rmap_intr); 4162 sc->rx_rmap_intr = NULL; 4163 } 4164 if (sc->tx_rmap_intr != NULL) { 4165 if_ringmap_free(sc->tx_rmap_intr); 4166 sc->tx_rmap_intr = NULL; 4167 } 4168 if (sc->intr_data != NULL) { 4169 kfree(sc->intr_data, M_DEVBUF); 4170 sc->intr_data = NULL; 4171 } 4172 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4173 txr = &sc->tx_rings[i]; 4174 txr->tx_intr_vec = 0; 4175 txr->tx_intr_mask = 0; 4176 txr->tx_intr_cpuid = -1; 4177 } 4178 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4179 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4180 4181 rxr->rx_intr_vec = 0; 4182 rxr->rx_intr_mask = 0; 4183 rxr->rx_txr = NULL; 4184 } 4185 4186 sc->intr_cnt = 1; 4187 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4188 M_WAITOK | M_ZERO); 4189 intr = &sc->intr_data[0]; 4190 4191 /* 4192 * Allocate MSI/legacy interrupt resource 4193 */ 4194 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4195 &intr->intr_rid, &intr_flags); 4196 4197 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4198 int unshared; 4199 4200 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4201 if (!unshared) { 4202 sc->flags |= IGB_FLAG_SHARED_INTR; 4203 if (bootverbose) 4204 device_printf(sc->dev, "IRQ shared\n"); 4205 } else { 4206 intr_flags &= ~RF_SHAREABLE; 4207 if (bootverbose) 4208 device_printf(sc->dev, "IRQ unshared\n"); 4209 } 4210 } 4211 4212 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4213 &intr->intr_rid, intr_flags); 4214 if (intr->intr_res == NULL) { 4215 device_printf(sc->dev, "Unable to allocate bus resource: " 4216 "interrupt\n"); 4217 return ENXIO; 4218 } 4219 4220 intr->intr_serialize = &sc->main_serialize; 4221 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4222 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4223 igb_intr_shared : igb_intr; 4224 intr->intr_funcarg = sc; 4225 intr->intr_rate = IGB_INTR_RATE; 4226 intr->intr_use = IGB_INTR_USE_RXTX; 4227 4228 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 4229 4230 /* 4231 * Setup MSI/legacy interrupt mask 4232 */ 4233 switch (sc->hw.mac.type) { 4234 case e1000_82575: 4235 intr_vecmax = IGB_MAX_TXRXINT_82575; 4236 break; 4237 4238 case e1000_82576: 4239 intr_vecmax = IGB_MAX_TXRXINT_82576; 4240 break; 4241 4242 case e1000_82580: 4243 intr_vecmax = IGB_MAX_TXRXINT_82580; 4244 break; 4245 4246 case e1000_i350: 4247 intr_vecmax = IGB_MAX_TXRXINT_I350; 4248 break; 4249 4250 case e1000_i354: 4251 intr_vecmax = IGB_MAX_TXRXINT_I354; 4252 break; 4253 4254 case e1000_i210: 4255 intr_vecmax = IGB_MAX_TXRXINT_I210; 4256 break; 4257 4258 case e1000_i211: 4259 intr_vecmax = IGB_MAX_TXRXINT_I211; 4260 break; 4261 4262 default: 4263 intr_vecmax = IGB_MIN_TXRXINT; 4264 break; 4265 } 4266 intr_vec = 0; 4267 for (i = 0; i < sc->tx_ring_cnt; ++i) 4268 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4269 for (i = 0; i < sc->rx_ring_cnt; ++i) 4270 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4271 sc->sts_intr_mask = E1000_EICR_OTHER; 4272 4273 igb_set_ring_inuse(sc, FALSE); 4274 KKASSERT(sc->rx_ring_inuse <= IGB_MIN_RING_RSS); 4275 if (sc->rx_ring_inuse == IGB_MIN_RING_RSS) { 4276 /* 4277 * Allocate RX ring map for RSS setup. 4278 */ 4279 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, 4280 IGB_MIN_RING_RSS, IGB_MIN_RING_RSS); 4281 KASSERT(if_ringmap_count(sc->rx_rmap_intr) == 4282 sc->rx_ring_inuse, ("RX ring inuse mismatch")); 4283 } 4284 done: 4285 igb_set_intr_mask(sc); 4286 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4287 txr = &sc->tx_rings[i]; 4288 if (txr->tx_intr_cpuid < 0) 4289 txr->tx_intr_cpuid = 0; 4290 } 4291 return 0; 4292 } 4293 4294 static void 4295 igb_free_intr(struct igb_softc *sc) 4296 { 4297 if (sc->intr_data == NULL) 4298 return; 4299 4300 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4301 struct igb_intr_data *intr = &sc->intr_data[0]; 4302 4303 KKASSERT(sc->intr_cnt == 1); 4304 if (intr->intr_res != NULL) { 4305 bus_release_resource(sc->dev, SYS_RES_IRQ, 4306 intr->intr_rid, intr->intr_res); 4307 } 4308 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4309 pci_release_msi(sc->dev); 4310 4311 kfree(sc->intr_data, M_DEVBUF); 4312 } else { 4313 igb_free_msix(sc, TRUE); 4314 } 4315 } 4316 4317 static void 4318 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4319 { 4320 int i; 4321 4322 if (sc->intr_data == NULL) 4323 return; 4324 4325 for (i = 0; i < intr_cnt; ++i) { 4326 struct igb_intr_data *intr = &sc->intr_data[i]; 4327 4328 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4329 } 4330 } 4331 4332 static void 4333 igb_alloc_msix(struct igb_softc *sc) 4334 { 4335 int msix_enable, msix_cnt, msix_ring, alloc_cnt; 4336 int i, x, error; 4337 int ring_cnt, ring_cntmax; 4338 struct igb_intr_data *intr; 4339 boolean_t setup = FALSE; 4340 4341 /* 4342 * Don't enable MSI-X on 82575, see: 4343 * 82575 specification update errata #25 4344 */ 4345 if (sc->hw.mac.type == e1000_82575) 4346 return; 4347 4348 /* Don't enable MSI-X on VF */ 4349 if (sc->vf_ifp) 4350 return; 4351 4352 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4353 igb_msix_enable); 4354 if (!msix_enable) 4355 return; 4356 4357 msix_cnt = pci_msix_count(sc->dev); 4358 #ifdef IGB_MSIX_DEBUG 4359 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4360 #endif 4361 if (msix_cnt <= 1) { 4362 /* One MSI-X model does not make sense. */ 4363 return; 4364 } 4365 if (bootverbose) 4366 device_printf(sc->dev, "MSI-X count %d\n", msix_cnt); 4367 msix_ring = msix_cnt - 1; /* -1 for status */ 4368 4369 /* 4370 * Configure # of RX/TX rings usable by MSI-X. 4371 */ 4372 igb_get_rxring_cnt(sc, &ring_cnt, &ring_cntmax); 4373 if (ring_cntmax > msix_ring) 4374 ring_cntmax = msix_ring; 4375 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4376 4377 igb_get_txring_cnt(sc, &ring_cnt, &ring_cntmax); 4378 if (ring_cntmax > msix_ring) 4379 ring_cntmax = msix_ring; 4380 sc->tx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4381 4382 if_ringmap_match(sc->dev, sc->rx_rmap_intr, sc->tx_rmap_intr); 4383 sc->rx_ring_msix = if_ringmap_count(sc->rx_rmap_intr); 4384 KASSERT(sc->rx_ring_msix <= sc->rx_ring_cnt, 4385 ("total RX ring count %d, MSI-X RX ring count %d", 4386 sc->rx_ring_cnt, sc->rx_ring_msix)); 4387 sc->tx_ring_msix = if_ringmap_count(sc->tx_rmap_intr); 4388 KASSERT(sc->tx_ring_msix <= sc->tx_ring_cnt, 4389 ("total TX ring count %d, MSI-X TX ring count %d", 4390 sc->tx_ring_cnt, sc->tx_ring_msix)); 4391 4392 /* 4393 * Aggregate TX/RX MSI-X 4394 */ 4395 ring_cntmax = sc->rx_ring_msix; 4396 if (ring_cntmax < sc->tx_ring_msix) 4397 ring_cntmax = sc->tx_ring_msix; 4398 KASSERT(ring_cntmax <= msix_ring, 4399 ("invalid ring count max %d, MSI-X count for rings %d", 4400 ring_cntmax, msix_ring)); 4401 4402 alloc_cnt = ring_cntmax + 1; /* +1 for status */ 4403 if (bootverbose) { 4404 device_printf(sc->dev, "MSI-X alloc %d, " 4405 "RX ring %d, TX ring %d\n", alloc_cnt, 4406 sc->rx_ring_msix, sc->tx_ring_msix); 4407 } 4408 4409 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4410 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4411 &sc->msix_mem_rid, RF_ACTIVE); 4412 if (sc->msix_mem_res == NULL) { 4413 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4414 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4415 &sc->msix_mem_rid, RF_ACTIVE); 4416 if (sc->msix_mem_res == NULL) { 4417 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4418 return; 4419 } 4420 } 4421 4422 sc->intr_cnt = alloc_cnt; 4423 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4424 M_DEVBUF, M_WAITOK | M_ZERO); 4425 for (x = 0; x < sc->intr_cnt; ++x) { 4426 intr = &sc->intr_data[x]; 4427 intr->intr_rid = -1; 4428 intr->intr_rate = IGB_INTR_RATE; 4429 } 4430 4431 x = 0; 4432 for (i = 0; i < sc->rx_ring_msix; ++i) { 4433 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4434 struct igb_tx_ring *txr = NULL; 4435 int cpuid, j; 4436 4437 KKASSERT(x < sc->intr_cnt); 4438 rxr->rx_intr_vec = x; 4439 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4440 4441 cpuid = if_ringmap_cpumap(sc->rx_rmap_intr, i); 4442 4443 /* 4444 * Try finding TX ring to piggyback. 4445 */ 4446 for (j = 0; j < sc->tx_ring_msix; ++j) { 4447 if (cpuid == 4448 if_ringmap_cpumap(sc->tx_rmap_intr, j)) { 4449 txr = &sc->tx_rings[j]; 4450 KKASSERT(txr->tx_intr_cpuid < 0); 4451 break; 4452 } 4453 } 4454 rxr->rx_txr = txr; 4455 4456 intr = &sc->intr_data[x++]; 4457 intr->intr_serialize = &rxr->rx_serialize; 4458 intr->intr_cpuid = cpuid; 4459 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4460 intr->intr_funcarg = rxr; 4461 if (txr != NULL) { 4462 intr->intr_func = igb_msix_rxtx; 4463 intr->intr_use = IGB_INTR_USE_RXTX; 4464 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4465 "%s rx%dtx%d", device_get_nameunit(sc->dev), 4466 i, txr->me); 4467 4468 txr->tx_intr_vec = rxr->rx_intr_vec; 4469 txr->tx_intr_mask = rxr->rx_intr_mask; 4470 txr->tx_intr_cpuid = intr->intr_cpuid; 4471 } else { 4472 intr->intr_func = igb_msix_rx; 4473 intr->intr_rate = IGB_MSIX_RX_RATE; 4474 intr->intr_use = IGB_INTR_USE_RX; 4475 4476 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4477 "%s rx%d", device_get_nameunit(sc->dev), i); 4478 } 4479 intr->intr_desc = intr->intr_desc0; 4480 } 4481 4482 for (i = 0; i < sc->tx_ring_msix; ++i) { 4483 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4484 4485 if (txr->tx_intr_cpuid >= 0) { 4486 /* Piggybacked by RX ring. */ 4487 continue; 4488 } 4489 4490 KKASSERT(x < sc->intr_cnt); 4491 txr->tx_intr_vec = x; 4492 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4493 4494 intr = &sc->intr_data[x++]; 4495 intr->intr_serialize = &txr->tx_serialize; 4496 intr->intr_func = igb_msix_tx; 4497 intr->intr_funcarg = txr; 4498 intr->intr_rate = IGB_MSIX_TX_RATE; 4499 intr->intr_use = IGB_INTR_USE_TX; 4500 4501 intr->intr_cpuid = if_ringmap_cpumap(sc->tx_rmap_intr, i); 4502 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4503 txr->tx_intr_cpuid = intr->intr_cpuid; 4504 4505 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4506 device_get_nameunit(sc->dev), i); 4507 intr->intr_desc = intr->intr_desc0; 4508 } 4509 4510 /* 4511 * Link status 4512 */ 4513 KKASSERT(x < sc->intr_cnt); 4514 sc->sts_msix_vec = x; 4515 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4516 4517 intr = &sc->intr_data[x++]; 4518 intr->intr_serialize = &sc->main_serialize; 4519 intr->intr_func = igb_msix_status; 4520 intr->intr_funcarg = sc; 4521 intr->intr_cpuid = 0; 4522 intr->intr_use = IGB_INTR_USE_STATUS; 4523 4524 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4525 device_get_nameunit(sc->dev)); 4526 intr->intr_desc = intr->intr_desc0; 4527 4528 KKASSERT(x == sc->intr_cnt); 4529 4530 error = pci_setup_msix(sc->dev); 4531 if (error) { 4532 device_printf(sc->dev, "Setup MSI-X failed\n"); 4533 goto back; 4534 } 4535 setup = TRUE; 4536 4537 for (i = 0; i < sc->intr_cnt; ++i) { 4538 intr = &sc->intr_data[i]; 4539 4540 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4541 intr->intr_cpuid); 4542 if (error) { 4543 device_printf(sc->dev, 4544 "Unable to allocate MSI-X %d on cpu%d\n", i, 4545 intr->intr_cpuid); 4546 goto back; 4547 } 4548 4549 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4550 &intr->intr_rid, RF_ACTIVE); 4551 if (intr->intr_res == NULL) { 4552 device_printf(sc->dev, 4553 "Unable to allocate MSI-X %d resource\n", i); 4554 error = ENOMEM; 4555 goto back; 4556 } 4557 } 4558 4559 pci_enable_msix(sc->dev); 4560 sc->intr_type = PCI_INTR_TYPE_MSIX; 4561 back: 4562 if (error) 4563 igb_free_msix(sc, setup); 4564 } 4565 4566 static void 4567 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4568 { 4569 int i; 4570 4571 KKASSERT(sc->intr_cnt > 1); 4572 4573 for (i = 0; i < sc->intr_cnt; ++i) { 4574 struct igb_intr_data *intr = &sc->intr_data[i]; 4575 4576 if (intr->intr_res != NULL) { 4577 bus_release_resource(sc->dev, SYS_RES_IRQ, 4578 intr->intr_rid, intr->intr_res); 4579 } 4580 if (intr->intr_rid >= 0) 4581 pci_release_msix_vector(sc->dev, intr->intr_rid); 4582 } 4583 if (setup) 4584 pci_teardown_msix(sc->dev); 4585 4586 sc->intr_cnt = 0; 4587 kfree(sc->intr_data, M_DEVBUF); 4588 sc->intr_data = NULL; 4589 } 4590 4591 static void 4592 igb_msix_rx(void *arg) 4593 { 4594 struct igb_rx_ring *rxr = arg; 4595 4596 ASSERT_SERIALIZED(&rxr->rx_serialize); 4597 igb_rxeof(rxr, -1); 4598 4599 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4600 } 4601 4602 static void 4603 igb_msix_tx(void *arg) 4604 { 4605 struct igb_tx_ring *txr = arg; 4606 4607 ASSERT_SERIALIZED(&txr->tx_serialize); 4608 4609 igb_tx_intr(txr, *(txr->tx_hdr)); 4610 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4611 } 4612 4613 static void 4614 igb_msix_status(void *arg) 4615 { 4616 struct igb_softc *sc = arg; 4617 uint32_t icr; 4618 4619 ASSERT_SERIALIZED(&sc->main_serialize); 4620 4621 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4622 if (icr & E1000_ICR_LSC) { 4623 sc->hw.mac.get_link_status = 1; 4624 igb_update_link_status(sc); 4625 } 4626 4627 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4628 } 4629 4630 static void 4631 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4632 { 4633 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4634 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4635 if (bootverbose) { 4636 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4637 sc->rx_ring_inuse, sc->rx_ring_cnt, 4638 sc->tx_ring_inuse, sc->tx_ring_cnt); 4639 } 4640 } 4641 4642 static int 4643 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4644 { 4645 if (!IGB_ENABLE_HWRSS(sc)) 4646 return 1; 4647 4648 if (polling) 4649 return sc->rx_ring_cnt; 4650 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4651 return IGB_MIN_RING_RSS; 4652 else 4653 return sc->rx_ring_msix; 4654 } 4655 4656 static int 4657 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4658 { 4659 if (!IGB_ENABLE_HWTSS(sc)) 4660 return 1; 4661 4662 if (polling) 4663 return sc->tx_ring_cnt; 4664 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4665 return IGB_MIN_RING; 4666 else 4667 return sc->tx_ring_msix; 4668 } 4669 4670 static int 4671 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4672 { 4673 int hoff, iphlen, thoff; 4674 struct mbuf *m; 4675 4676 m = *mp; 4677 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4678 4679 iphlen = m->m_pkthdr.csum_iphlen; 4680 thoff = m->m_pkthdr.csum_thlen; 4681 hoff = m->m_pkthdr.csum_lhlen; 4682 4683 KASSERT(iphlen > 0, ("invalid ip hlen")); 4684 KASSERT(thoff > 0, ("invalid tcp hlen")); 4685 KASSERT(hoff > 0, ("invalid ether hlen")); 4686 4687 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4688 m = m_pullup(m, hoff + iphlen + thoff); 4689 if (m == NULL) { 4690 *mp = NULL; 4691 return ENOBUFS; 4692 } 4693 *mp = m; 4694 } 4695 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4696 struct ip *ip; 4697 4698 ip = mtodoff(m, struct ip *, hoff); 4699 ip->ip_len = 0; 4700 } 4701 4702 return 0; 4703 } 4704 4705 static void 4706 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4707 { 4708 struct e1000_adv_tx_context_desc *TXD; 4709 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4710 int hoff, ctxd, iphlen, thoff; 4711 4712 iphlen = m->m_pkthdr.csum_iphlen; 4713 thoff = m->m_pkthdr.csum_thlen; 4714 hoff = m->m_pkthdr.csum_lhlen; 4715 4716 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4717 4718 ctxd = txr->next_avail_desc; 4719 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4720 4721 if (m->m_flags & M_VLANTAG) { 4722 uint16_t vlantag; 4723 4724 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4725 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4726 } 4727 4728 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4729 vlan_macip_lens |= iphlen; 4730 4731 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4732 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4733 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4734 4735 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4736 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4737 4738 /* 4739 * 82575 needs the TX context index added; the queue 4740 * index is used as TX context index here. 4741 */ 4742 if (txr->sc->hw.mac.type == e1000_82575) 4743 mss_l4len_idx |= txr->me << 4; 4744 4745 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4746 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4747 TXD->seqnum_seed = htole32(0); 4748 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4749 4750 /* We've consumed the first desc, adjust counters */ 4751 if (++ctxd == txr->num_tx_desc) 4752 ctxd = 0; 4753 txr->next_avail_desc = ctxd; 4754 --txr->tx_avail; 4755 4756 *hlen = hoff + iphlen + thoff; 4757 } 4758 4759 static void 4760 igb_setup_serialize(struct igb_softc *sc) 4761 { 4762 int i = 0, j; 4763 4764 /* Main + RX + TX */ 4765 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4766 sc->serializes = 4767 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4768 M_DEVBUF, M_WAITOK | M_ZERO); 4769 4770 /* 4771 * Setup serializes 4772 * 4773 * NOTE: Order is critical 4774 */ 4775 4776 KKASSERT(i < sc->serialize_cnt); 4777 sc->serializes[i++] = &sc->main_serialize; 4778 4779 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4780 KKASSERT(i < sc->serialize_cnt); 4781 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4782 } 4783 4784 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4785 KKASSERT(i < sc->serialize_cnt); 4786 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4787 } 4788 4789 KKASSERT(i == sc->serialize_cnt); 4790 } 4791 4792 static void 4793 igb_msix_rxtx(void *arg) 4794 { 4795 struct igb_rx_ring *rxr = arg; 4796 struct igb_tx_ring *txr; 4797 int hdr; 4798 4799 ASSERT_SERIALIZED(&rxr->rx_serialize); 4800 4801 igb_rxeof(rxr, -1); 4802 4803 /* 4804 * NOTE: 4805 * Since next_to_clean is only changed by igb_txeof(), 4806 * which is called only in interrupt handler, the 4807 * check w/o holding tx serializer is MPSAFE. 4808 */ 4809 txr = rxr->rx_txr; 4810 hdr = *(txr->tx_hdr); 4811 if (hdr != txr->next_to_clean) { 4812 lwkt_serialize_enter(&txr->tx_serialize); 4813 igb_tx_intr(txr, hdr); 4814 lwkt_serialize_exit(&txr->tx_serialize); 4815 } 4816 4817 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4818 } 4819 4820 static void 4821 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4822 { 4823 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4824 sc->timer_cpuid = 0; /* XXX fixed */ 4825 else 4826 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4827 } 4828 4829 static void 4830 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4831 { 4832 struct e1000_hw *hw = &sc->hw; 4833 uint32_t reg; 4834 4835 if (hw->mac.type == e1000_i211) 4836 return; 4837 4838 if (hw->mac.type > e1000_82580) { 4839 uint32_t dmac; 4840 uint16_t hwm; 4841 4842 if (sc->dma_coalesce == 0) { /* Disabling it */ 4843 reg = ~E1000_DMACR_DMAC_EN; 4844 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4845 return; 4846 } else { 4847 if_printf(&sc->arpcom.ac_if, 4848 "DMA Coalescing enabled\n"); 4849 } 4850 4851 /* Set starting threshold */ 4852 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4853 4854 hwm = 64 * pba - sc->max_frame_size / 16; 4855 if (hwm < 64 * (pba - 6)) 4856 hwm = 64 * (pba - 6); 4857 reg = E1000_READ_REG(hw, E1000_FCRTC); 4858 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4859 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4860 & E1000_FCRTC_RTH_COAL_MASK); 4861 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4862 4863 dmac = pba - sc->max_frame_size / 512; 4864 if (dmac < pba - 10) 4865 dmac = pba - 10; 4866 reg = E1000_READ_REG(hw, E1000_DMACR); 4867 reg &= ~E1000_DMACR_DMACTHR_MASK; 4868 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4869 & E1000_DMACR_DMACTHR_MASK); 4870 4871 /* transition to L0x or L1 if available..*/ 4872 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 4873 4874 /* 4875 * Check if status is 2.5Gb backplane connection 4876 * before configuration of watchdog timer, which 4877 * is in msec values in 12.8usec intervals watchdog 4878 * timer = msec values in 32usec intervals for non 4879 * 2.5Gb connection. 4880 */ 4881 if (hw->mac.type == e1000_i354) { 4882 int status = E1000_READ_REG(hw, E1000_STATUS); 4883 4884 if ((status & E1000_STATUS_2P5_SKU) && 4885 !(status & E1000_STATUS_2P5_SKU_OVER)) 4886 reg |= ((sc->dma_coalesce * 5) >> 6); 4887 else 4888 reg |= (sc->dma_coalesce >> 5); 4889 } else { 4890 reg |= (sc->dma_coalesce >> 5); 4891 } 4892 4893 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4894 4895 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 4896 4897 /* Set the interval before transition */ 4898 reg = E1000_READ_REG(hw, E1000_DMCTLX); 4899 if (hw->mac.type == e1000_i350) 4900 reg |= IGB_DMCTLX_DCFLUSH_DIS; 4901 /* 4902 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 4903 * is 0x4*2 = 0xA. But delay is still 4 usec. 4904 */ 4905 if (hw->mac.type == e1000_i354) { 4906 int status = E1000_READ_REG(hw, E1000_STATUS); 4907 4908 if ((status & E1000_STATUS_2P5_SKU) && 4909 !(status & E1000_STATUS_2P5_SKU_OVER)) 4910 reg |= 0xA; 4911 else 4912 reg |= 0x4; 4913 } else { 4914 reg |= 0x4; 4915 } 4916 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 4917 4918 /* Free space in tx packet buffer to wake from DMA coal */ 4919 E1000_WRITE_REG(hw, E1000_DMCTXTH, 4920 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 4921 4922 /* Make low power state decision controlled by DMA coal */ 4923 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4924 reg &= ~E1000_PCIEMISC_LX_DECISION; 4925 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 4926 } else if (hw->mac.type == e1000_82580) { 4927 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4928 E1000_WRITE_REG(hw, E1000_PCIEMISC, 4929 reg & ~E1000_PCIEMISC_LX_DECISION); 4930 E1000_WRITE_REG(hw, E1000_DMACR, 0); 4931 } 4932 } 4933