1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 68 #include <bus/pci/pcivar.h> 69 #include <bus/pci/pcireg.h> 70 71 #include <dev/netif/ig_hal/e1000_api.h> 72 #include <dev/netif/ig_hal/e1000_82575.h> 73 #include <dev/netif/ig_hal/e1000_dragonfly.h> 74 #include <dev/netif/igb/if_igb.h> 75 76 #ifdef IGB_RSS_DEBUG 77 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 78 do { \ 79 if (sc->rss_debug >= lvl) \ 80 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 81 } while (0) 82 #else /* !IGB_RSS_DEBUG */ 83 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 84 #endif /* IGB_RSS_DEBUG */ 85 86 #define IGB_NAME "Intel(R) PRO/1000 " 87 #define IGB_DEVICE(id) \ 88 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 89 #define IGB_DEVICE_NULL { 0, 0, NULL } 90 91 static struct igb_device { 92 uint16_t vid; 93 uint16_t did; 94 const char *desc; 95 } igb_devices[] = { 96 IGB_DEVICE(82575EB_COPPER), 97 IGB_DEVICE(82575EB_FIBER_SERDES), 98 IGB_DEVICE(82575GB_QUAD_COPPER), 99 IGB_DEVICE(82576), 100 IGB_DEVICE(82576_NS), 101 IGB_DEVICE(82576_NS_SERDES), 102 IGB_DEVICE(82576_FIBER), 103 IGB_DEVICE(82576_SERDES), 104 IGB_DEVICE(82576_SERDES_QUAD), 105 IGB_DEVICE(82576_QUAD_COPPER), 106 IGB_DEVICE(82576_QUAD_COPPER_ET2), 107 IGB_DEVICE(82576_VF), 108 IGB_DEVICE(82580_COPPER), 109 IGB_DEVICE(82580_FIBER), 110 IGB_DEVICE(82580_SERDES), 111 IGB_DEVICE(82580_SGMII), 112 IGB_DEVICE(82580_COPPER_DUAL), 113 IGB_DEVICE(82580_QUAD_FIBER), 114 IGB_DEVICE(DH89XXCC_SERDES), 115 IGB_DEVICE(DH89XXCC_SGMII), 116 IGB_DEVICE(DH89XXCC_SFP), 117 IGB_DEVICE(DH89XXCC_BACKPLANE), 118 IGB_DEVICE(I350_COPPER), 119 IGB_DEVICE(I350_FIBER), 120 IGB_DEVICE(I350_SERDES), 121 IGB_DEVICE(I350_SGMII), 122 IGB_DEVICE(I350_VF), 123 IGB_DEVICE(I210_COPPER), 124 IGB_DEVICE(I210_COPPER_IT), 125 IGB_DEVICE(I210_COPPER_OEM1), 126 IGB_DEVICE(I210_COPPER_FLASHLESS), 127 IGB_DEVICE(I210_SERDES_FLASHLESS), 128 IGB_DEVICE(I210_FIBER), 129 IGB_DEVICE(I210_SERDES), 130 IGB_DEVICE(I210_SGMII), 131 IGB_DEVICE(I211_COPPER), 132 IGB_DEVICE(I354_BACKPLANE_1GBPS), 133 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 134 IGB_DEVICE(I354_SGMII), 135 136 /* required last entry */ 137 IGB_DEVICE_NULL 138 }; 139 140 static int igb_probe(device_t); 141 static int igb_attach(device_t); 142 static int igb_detach(device_t); 143 static int igb_shutdown(device_t); 144 static int igb_suspend(device_t); 145 static int igb_resume(device_t); 146 147 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 148 static void igb_setup_ifp(struct igb_softc *); 149 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 150 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 151 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 152 static void igb_add_sysctl(struct igb_softc *); 153 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 154 const char *, const char *); 155 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 156 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 160 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 161 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 162 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 163 #ifdef IFPOLL_ENABLE 164 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 165 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 166 #endif 167 168 static void igb_vf_init_stats(struct igb_softc *); 169 static void igb_reset(struct igb_softc *, boolean_t); 170 static void igb_update_stats_counters(struct igb_softc *); 171 static void igb_update_vf_stats_counters(struct igb_softc *); 172 static void igb_update_link_status(struct igb_softc *); 173 static void igb_init_tx_unit(struct igb_softc *); 174 static void igb_init_rx_unit(struct igb_softc *); 175 static void igb_init_dmac(struct igb_softc *, uint32_t); 176 177 static void igb_set_vlan(struct igb_softc *); 178 static void igb_set_multi(struct igb_softc *); 179 static void igb_set_promisc(struct igb_softc *); 180 static void igb_disable_promisc(struct igb_softc *); 181 182 static int igb_alloc_rings(struct igb_softc *); 183 static void igb_free_rings(struct igb_softc *); 184 static int igb_create_tx_ring(struct igb_tx_ring *); 185 static int igb_create_rx_ring(struct igb_rx_ring *); 186 static void igb_free_tx_ring(struct igb_tx_ring *); 187 static void igb_free_rx_ring(struct igb_rx_ring *); 188 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 189 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 190 static void igb_init_tx_ring(struct igb_tx_ring *); 191 static int igb_init_rx_ring(struct igb_rx_ring *); 192 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 193 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 194 static void igb_rx_refresh(struct igb_rx_ring *, int); 195 static void igb_setup_serialize(struct igb_softc *); 196 197 static void igb_stop(struct igb_softc *); 198 static void igb_init(void *); 199 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 200 static void igb_media_status(struct ifnet *, struct ifmediareq *); 201 static int igb_media_change(struct ifnet *); 202 static void igb_timer(void *); 203 static void igb_watchdog(struct ifaltq_subque *); 204 static void igb_start(struct ifnet *, struct ifaltq_subque *); 205 #ifdef IFPOLL_ENABLE 206 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 207 static void igb_npoll_rx(struct ifnet *, void *, int); 208 static void igb_npoll_tx(struct ifnet *, void *, int); 209 static void igb_npoll_status(struct ifnet *); 210 #endif 211 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 212 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 213 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 214 #ifdef INVARIANTS 215 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 216 boolean_t); 217 #endif 218 219 static void igb_intr(void *); 220 static void igb_intr_shared(void *); 221 static void igb_rxeof(struct igb_rx_ring *, int); 222 static void igb_txeof(struct igb_tx_ring *, int); 223 static void igb_set_eitr(struct igb_softc *, int, int); 224 static void igb_enable_intr(struct igb_softc *); 225 static void igb_disable_intr(struct igb_softc *); 226 static void igb_init_unshared_intr(struct igb_softc *); 227 static void igb_init_intr(struct igb_softc *); 228 static int igb_setup_intr(struct igb_softc *); 229 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 230 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 231 static void igb_set_intr_mask(struct igb_softc *); 232 static int igb_alloc_intr(struct igb_softc *); 233 static void igb_free_intr(struct igb_softc *); 234 static void igb_teardown_intr(struct igb_softc *, int); 235 static void igb_alloc_msix(struct igb_softc *); 236 static void igb_free_msix(struct igb_softc *, boolean_t); 237 static void igb_msix_rx_conf(struct igb_softc *, int, int *, int); 238 static void igb_msix_tx_conf(struct igb_softc *, int, int *, int); 239 static void igb_msix_rx(void *); 240 static void igb_msix_tx(void *); 241 static void igb_msix_status(void *); 242 static void igb_msix_rxtx(void *); 243 244 /* Management and WOL Support */ 245 static void igb_get_mgmt(struct igb_softc *); 246 static void igb_rel_mgmt(struct igb_softc *); 247 static void igb_get_hw_control(struct igb_softc *); 248 static void igb_rel_hw_control(struct igb_softc *); 249 static void igb_enable_wol(device_t); 250 251 static device_method_t igb_methods[] = { 252 /* Device interface */ 253 DEVMETHOD(device_probe, igb_probe), 254 DEVMETHOD(device_attach, igb_attach), 255 DEVMETHOD(device_detach, igb_detach), 256 DEVMETHOD(device_shutdown, igb_shutdown), 257 DEVMETHOD(device_suspend, igb_suspend), 258 DEVMETHOD(device_resume, igb_resume), 259 DEVMETHOD_END 260 }; 261 262 static driver_t igb_driver = { 263 "igb", 264 igb_methods, 265 sizeof(struct igb_softc), 266 }; 267 268 static devclass_t igb_devclass; 269 270 DECLARE_DUMMY_MODULE(if_igb); 271 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 272 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 273 274 static int igb_rxd = IGB_DEFAULT_RXD; 275 static int igb_txd = IGB_DEFAULT_TXD; 276 static int igb_rxr = 0; 277 static int igb_txr = 0; 278 static int igb_msi_enable = 1; 279 static int igb_msix_enable = 1; 280 static int igb_msix_agg_rxtx = 1; 281 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 282 283 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 284 285 /* 286 * DMA Coalescing, only for i350 - default to off, 287 * this feature is for power savings 288 */ 289 static int igb_dma_coalesce = 0; 290 291 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 292 TUNABLE_INT("hw.igb.txd", &igb_txd); 293 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 294 TUNABLE_INT("hw.igb.txr", &igb_txr); 295 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 296 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 297 TUNABLE_INT("hw.igb.msix.agg_rxtx", &igb_msix_agg_rxtx); 298 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 299 300 /* i350 specific */ 301 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 302 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 303 304 static __inline void 305 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 306 { 307 /* Ignore Checksum bit is set */ 308 if (staterr & E1000_RXD_STAT_IXSM) 309 return; 310 311 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 312 E1000_RXD_STAT_IPCS) 313 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 314 315 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 316 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 317 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 318 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 319 mp->m_pkthdr.csum_data = htons(0xffff); 320 } 321 } 322 } 323 324 static __inline struct pktinfo * 325 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 326 uint32_t hash, uint32_t hashtype, uint32_t staterr) 327 { 328 switch (hashtype) { 329 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 330 pi->pi_netisr = NETISR_IP; 331 pi->pi_flags = 0; 332 pi->pi_l3proto = IPPROTO_TCP; 333 break; 334 335 case E1000_RXDADV_RSSTYPE_IPV4: 336 if (staterr & E1000_RXD_STAT_IXSM) 337 return NULL; 338 339 if ((staterr & 340 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 341 E1000_RXD_STAT_TCPCS) { 342 pi->pi_netisr = NETISR_IP; 343 pi->pi_flags = 0; 344 pi->pi_l3proto = IPPROTO_UDP; 345 break; 346 } 347 /* FALL THROUGH */ 348 default: 349 return NULL; 350 } 351 352 m_sethash(m, toeplitz_hash(hash)); 353 return pi; 354 } 355 356 static int 357 igb_probe(device_t dev) 358 { 359 const struct igb_device *d; 360 uint16_t vid, did; 361 362 vid = pci_get_vendor(dev); 363 did = pci_get_device(dev); 364 365 for (d = igb_devices; d->desc != NULL; ++d) { 366 if (vid == d->vid && did == d->did) { 367 device_set_desc(dev, d->desc); 368 return 0; 369 } 370 } 371 return ENXIO; 372 } 373 374 static int 375 igb_attach(device_t dev) 376 { 377 struct igb_softc *sc = device_get_softc(dev); 378 uint16_t eeprom_data; 379 int error = 0, ring_max; 380 char flowctrl[IFM_ETH_FC_STRLEN]; 381 #ifdef IFPOLL_ENABLE 382 int offset, offset_def; 383 #endif 384 385 #ifdef notyet 386 /* SYSCTL stuff */ 387 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 388 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 389 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 390 igb_sysctl_nvm_info, "I", "NVM Information"); 391 #endif 392 393 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 394 igb_media_change, igb_media_status); 395 callout_init_mp(&sc->timer); 396 lwkt_serialize_init(&sc->main_serialize); 397 398 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 399 device_get_unit(dev)); 400 sc->dev = sc->osdep.dev = dev; 401 402 /* Enable bus mastering */ 403 pci_enable_busmaster(dev); 404 405 /* 406 * Determine hardware and mac type 407 */ 408 sc->hw.vendor_id = pci_get_vendor(dev); 409 sc->hw.device_id = pci_get_device(dev); 410 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 411 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 412 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 413 414 if (e1000_set_mac_type(&sc->hw)) 415 return ENXIO; 416 417 /* Are we a VF device? */ 418 if (sc->hw.mac.type == e1000_vfadapt || 419 sc->hw.mac.type == e1000_vfadapt_i350) 420 sc->vf_ifp = 1; 421 else 422 sc->vf_ifp = 0; 423 424 /* 425 * Configure total supported RX/TX ring count 426 */ 427 switch (sc->hw.mac.type) { 428 case e1000_82575: 429 ring_max = IGB_MAX_RING_82575; 430 break; 431 432 case e1000_82576: 433 ring_max = IGB_MAX_RING_82576; 434 break; 435 436 case e1000_82580: 437 ring_max = IGB_MAX_RING_82580; 438 break; 439 440 case e1000_i350: 441 ring_max = IGB_MAX_RING_I350; 442 break; 443 444 case e1000_i354: 445 ring_max = IGB_MAX_RING_I354; 446 break; 447 448 case e1000_i210: 449 ring_max = IGB_MAX_RING_I210; 450 break; 451 452 case e1000_i211: 453 ring_max = IGB_MAX_RING_I211; 454 break; 455 456 default: 457 ring_max = IGB_MIN_RING; 458 break; 459 } 460 461 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr); 462 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max); 463 #ifdef IGB_RSS_DEBUG 464 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt); 465 #endif 466 sc->rx_ring_inuse = sc->rx_ring_cnt; 467 468 sc->tx_ring_cnt = device_getenv_int(dev, "txr", igb_txr); 469 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 470 #ifdef IGB_TSS_DEBUG 471 sc->tx_ring_cnt = device_getenv_int(dev, "txr_debug", sc->tx_ring_cnt); 472 #endif 473 sc->tx_ring_inuse = sc->tx_ring_cnt; 474 475 /* Setup flow control. */ 476 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 477 igb_flowctrl); 478 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 479 480 /* 481 * Allocate IO memory 482 */ 483 sc->mem_rid = PCIR_BAR(0); 484 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 485 RF_ACTIVE); 486 if (sc->mem_res == NULL) { 487 device_printf(dev, "Unable to allocate bus resource: memory\n"); 488 error = ENXIO; 489 goto failed; 490 } 491 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 492 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 493 494 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 495 496 /* Save PCI command register for Shared Code */ 497 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 498 sc->hw.back = &sc->osdep; 499 500 /* Do Shared Code initialization */ 501 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 502 device_printf(dev, "Setup of Shared code failed\n"); 503 error = ENXIO; 504 goto failed; 505 } 506 507 e1000_get_bus_info(&sc->hw); 508 509 sc->hw.mac.autoneg = DO_AUTO_NEG; 510 sc->hw.phy.autoneg_wait_to_complete = FALSE; 511 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 512 513 /* Copper options */ 514 if (sc->hw.phy.media_type == e1000_media_type_copper) { 515 sc->hw.phy.mdix = AUTO_ALL_MODES; 516 sc->hw.phy.disable_polarity_correction = FALSE; 517 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 518 } 519 520 /* Set the frame limits assuming standard ethernet sized frames. */ 521 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 522 523 /* Allocate RX/TX rings */ 524 error = igb_alloc_rings(sc); 525 if (error) 526 goto failed; 527 528 #ifdef IFPOLL_ENABLE 529 /* 530 * NPOLLING RX CPU offset 531 */ 532 if (sc->rx_ring_cnt == ncpus2) { 533 offset = 0; 534 } else { 535 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 536 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 537 if (offset >= ncpus2 || 538 offset % sc->rx_ring_cnt != 0) { 539 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 540 offset, offset_def); 541 offset = offset_def; 542 } 543 } 544 sc->rx_npoll_off = offset; 545 546 /* 547 * NPOLLING TX CPU offset 548 */ 549 if (sc->tx_ring_cnt == ncpus2) { 550 offset = 0; 551 } else { 552 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 553 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 554 if (offset >= ncpus2 || 555 offset % sc->tx_ring_cnt != 0) { 556 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 557 offset, offset_def); 558 offset = offset_def; 559 } 560 } 561 sc->tx_npoll_off = offset; 562 #endif 563 564 /* Allocate interrupt */ 565 error = igb_alloc_intr(sc); 566 if (error) 567 goto failed; 568 569 /* Setup serializes */ 570 igb_setup_serialize(sc); 571 572 /* Allocate the appropriate stats memory */ 573 if (sc->vf_ifp) { 574 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 575 M_WAITOK | M_ZERO); 576 igb_vf_init_stats(sc); 577 } else { 578 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 579 M_WAITOK | M_ZERO); 580 } 581 582 /* Allocate multicast array memory. */ 583 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 584 M_DEVBUF, M_WAITOK); 585 586 /* Some adapter-specific advanced features */ 587 if (sc->hw.mac.type >= e1000_i350) { 588 #ifdef notyet 589 igb_set_sysctl_value(adapter, "dma_coalesce", 590 "configure dma coalesce", 591 &adapter->dma_coalesce, igb_dma_coalesce); 592 igb_set_sysctl_value(adapter, "eee_disabled", 593 "enable Energy Efficient Ethernet", 594 &adapter->hw.dev_spec._82575.eee_disable, 595 igb_eee_disabled); 596 #else 597 sc->dma_coalesce = igb_dma_coalesce; 598 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 599 #endif 600 if (sc->hw.phy.media_type == e1000_media_type_copper) { 601 if (sc->hw.mac.type == e1000_i354) 602 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 603 else 604 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 605 } 606 } 607 608 /* 609 * Start from a known state, this is important in reading the nvm and 610 * mac from that. 611 */ 612 e1000_reset_hw(&sc->hw); 613 614 /* Make sure we have a good EEPROM before we read from it */ 615 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 616 e1000_validate_nvm_checksum(&sc->hw) < 0) { 617 /* 618 * Some PCI-E parts fail the first check due to 619 * the link being in sleep state, call it again, 620 * if it fails a second time its a real issue. 621 */ 622 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 623 device_printf(dev, 624 "The EEPROM Checksum Is Not Valid\n"); 625 error = EIO; 626 goto failed; 627 } 628 } 629 630 /* Copy the permanent MAC address out of the EEPROM */ 631 if (e1000_read_mac_addr(&sc->hw) < 0) { 632 device_printf(dev, "EEPROM read error while reading MAC" 633 " address\n"); 634 error = EIO; 635 goto failed; 636 } 637 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 638 device_printf(dev, "Invalid MAC address\n"); 639 error = EIO; 640 goto failed; 641 } 642 643 /* Setup OS specific network interface */ 644 igb_setup_ifp(sc); 645 646 /* Add sysctl tree, must after igb_setup_ifp() */ 647 igb_add_sysctl(sc); 648 649 /* Now get a good starting state */ 650 igb_reset(sc, FALSE); 651 652 /* Initialize statistics */ 653 igb_update_stats_counters(sc); 654 655 sc->hw.mac.get_link_status = 1; 656 igb_update_link_status(sc); 657 658 /* Indicate SOL/IDER usage */ 659 if (e1000_check_reset_block(&sc->hw)) { 660 device_printf(dev, 661 "PHY reset is blocked due to SOL/IDER session.\n"); 662 } 663 664 /* Determine if we have to control management hardware */ 665 if (e1000_enable_mng_pass_thru(&sc->hw)) 666 sc->flags |= IGB_FLAG_HAS_MGMT; 667 668 /* 669 * Setup Wake-on-Lan 670 */ 671 /* APME bit in EEPROM is mapped to WUC.APME */ 672 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 673 if (eeprom_data) 674 sc->wol = E1000_WUFC_MAG; 675 /* XXX disable WOL */ 676 sc->wol = 0; 677 678 #ifdef notyet 679 /* Register for VLAN events */ 680 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 681 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 682 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 683 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 684 #endif 685 686 #ifdef notyet 687 igb_add_hw_stats(adapter); 688 #endif 689 690 /* 691 * Disable interrupt to prevent spurious interrupts (line based 692 * interrupt, MSI or even MSI-X), which had been observed on 693 * several types of LOMs, from being handled. 694 */ 695 igb_disable_intr(sc); 696 697 error = igb_setup_intr(sc); 698 if (error) { 699 ether_ifdetach(&sc->arpcom.ac_if); 700 goto failed; 701 } 702 return 0; 703 704 failed: 705 igb_detach(dev); 706 return error; 707 } 708 709 static int 710 igb_detach(device_t dev) 711 { 712 struct igb_softc *sc = device_get_softc(dev); 713 714 if (device_is_attached(dev)) { 715 struct ifnet *ifp = &sc->arpcom.ac_if; 716 717 ifnet_serialize_all(ifp); 718 719 igb_stop(sc); 720 721 e1000_phy_hw_reset(&sc->hw); 722 723 /* Give control back to firmware */ 724 igb_rel_mgmt(sc); 725 igb_rel_hw_control(sc); 726 727 if (sc->wol) { 728 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 729 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 730 igb_enable_wol(dev); 731 } 732 733 igb_teardown_intr(sc, sc->intr_cnt); 734 735 ifnet_deserialize_all(ifp); 736 737 ether_ifdetach(ifp); 738 } else if (sc->mem_res != NULL) { 739 igb_rel_hw_control(sc); 740 } 741 742 ifmedia_removeall(&sc->media); 743 bus_generic_detach(dev); 744 745 igb_free_intr(sc); 746 747 if (sc->msix_mem_res != NULL) { 748 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 749 sc->msix_mem_res); 750 } 751 if (sc->mem_res != NULL) { 752 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 753 sc->mem_res); 754 } 755 756 igb_free_rings(sc); 757 758 if (sc->mta != NULL) 759 kfree(sc->mta, M_DEVBUF); 760 if (sc->stats != NULL) 761 kfree(sc->stats, M_DEVBUF); 762 if (sc->serializes != NULL) 763 kfree(sc->serializes, M_DEVBUF); 764 765 return 0; 766 } 767 768 static int 769 igb_shutdown(device_t dev) 770 { 771 return igb_suspend(dev); 772 } 773 774 static int 775 igb_suspend(device_t dev) 776 { 777 struct igb_softc *sc = device_get_softc(dev); 778 struct ifnet *ifp = &sc->arpcom.ac_if; 779 780 ifnet_serialize_all(ifp); 781 782 igb_stop(sc); 783 784 igb_rel_mgmt(sc); 785 igb_rel_hw_control(sc); 786 787 if (sc->wol) { 788 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 789 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 790 igb_enable_wol(dev); 791 } 792 793 ifnet_deserialize_all(ifp); 794 795 return bus_generic_suspend(dev); 796 } 797 798 static int 799 igb_resume(device_t dev) 800 { 801 struct igb_softc *sc = device_get_softc(dev); 802 struct ifnet *ifp = &sc->arpcom.ac_if; 803 int i; 804 805 ifnet_serialize_all(ifp); 806 807 igb_init(sc); 808 igb_get_mgmt(sc); 809 810 for (i = 0; i < sc->tx_ring_inuse; ++i) 811 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 812 813 ifnet_deserialize_all(ifp); 814 815 return bus_generic_resume(dev); 816 } 817 818 static int 819 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 820 { 821 struct igb_softc *sc = ifp->if_softc; 822 struct ifreq *ifr = (struct ifreq *)data; 823 int max_frame_size, mask, reinit; 824 int error = 0; 825 826 ASSERT_IFNET_SERIALIZED_ALL(ifp); 827 828 switch (command) { 829 case SIOCSIFMTU: 830 max_frame_size = 9234; 831 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 832 ETHER_CRC_LEN) { 833 error = EINVAL; 834 break; 835 } 836 837 ifp->if_mtu = ifr->ifr_mtu; 838 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 839 ETHER_CRC_LEN; 840 841 if (ifp->if_flags & IFF_RUNNING) 842 igb_init(sc); 843 break; 844 845 case SIOCSIFFLAGS: 846 if (ifp->if_flags & IFF_UP) { 847 if (ifp->if_flags & IFF_RUNNING) { 848 if ((ifp->if_flags ^ sc->if_flags) & 849 (IFF_PROMISC | IFF_ALLMULTI)) { 850 igb_disable_promisc(sc); 851 igb_set_promisc(sc); 852 } 853 } else { 854 igb_init(sc); 855 } 856 } else if (ifp->if_flags & IFF_RUNNING) { 857 igb_stop(sc); 858 } 859 sc->if_flags = ifp->if_flags; 860 break; 861 862 case SIOCADDMULTI: 863 case SIOCDELMULTI: 864 if (ifp->if_flags & IFF_RUNNING) { 865 igb_disable_intr(sc); 866 igb_set_multi(sc); 867 #ifdef IFPOLL_ENABLE 868 if (!(ifp->if_flags & IFF_NPOLLING)) 869 #endif 870 igb_enable_intr(sc); 871 } 872 break; 873 874 case SIOCSIFMEDIA: 875 /* Check SOL/IDER usage */ 876 if (e1000_check_reset_block(&sc->hw)) { 877 if_printf(ifp, "Media change is " 878 "blocked due to SOL/IDER session.\n"); 879 break; 880 } 881 /* FALL THROUGH */ 882 883 case SIOCGIFMEDIA: 884 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 885 break; 886 887 case SIOCSIFCAP: 888 reinit = 0; 889 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 890 if (mask & IFCAP_RXCSUM) { 891 ifp->if_capenable ^= IFCAP_RXCSUM; 892 reinit = 1; 893 } 894 if (mask & IFCAP_VLAN_HWTAGGING) { 895 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 896 reinit = 1; 897 } 898 if (mask & IFCAP_TXCSUM) { 899 ifp->if_capenable ^= IFCAP_TXCSUM; 900 if (ifp->if_capenable & IFCAP_TXCSUM) 901 ifp->if_hwassist |= IGB_CSUM_FEATURES; 902 else 903 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 904 } 905 if (mask & IFCAP_TSO) { 906 ifp->if_capenable ^= IFCAP_TSO; 907 if (ifp->if_capenable & IFCAP_TSO) 908 ifp->if_hwassist |= CSUM_TSO; 909 else 910 ifp->if_hwassist &= ~CSUM_TSO; 911 } 912 if (mask & IFCAP_RSS) 913 ifp->if_capenable ^= IFCAP_RSS; 914 if (reinit && (ifp->if_flags & IFF_RUNNING)) 915 igb_init(sc); 916 break; 917 918 default: 919 error = ether_ioctl(ifp, command, data); 920 break; 921 } 922 return error; 923 } 924 925 static void 926 igb_init(void *xsc) 927 { 928 struct igb_softc *sc = xsc; 929 struct ifnet *ifp = &sc->arpcom.ac_if; 930 boolean_t polling; 931 int i; 932 933 ASSERT_IFNET_SERIALIZED_ALL(ifp); 934 935 igb_stop(sc); 936 937 /* Get the latest mac address, User can use a LAA */ 938 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 939 940 /* Put the address into the Receive Address Array */ 941 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 942 943 igb_reset(sc, FALSE); 944 igb_update_link_status(sc); 945 946 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 947 948 /* Configure for OS presence */ 949 igb_get_mgmt(sc); 950 951 polling = FALSE; 952 #ifdef IFPOLL_ENABLE 953 if (ifp->if_flags & IFF_NPOLLING) 954 polling = TRUE; 955 #endif 956 957 /* Configured used RX/TX rings */ 958 igb_set_ring_inuse(sc, polling); 959 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 960 961 /* Initialize interrupt */ 962 igb_init_intr(sc); 963 964 /* Prepare transmit descriptors and buffers */ 965 for (i = 0; i < sc->tx_ring_inuse; ++i) 966 igb_init_tx_ring(&sc->tx_rings[i]); 967 igb_init_tx_unit(sc); 968 969 /* Setup Multicast table */ 970 igb_set_multi(sc); 971 972 #if 0 973 /* 974 * Figure out the desired mbuf pool 975 * for doing jumbo/packetsplit 976 */ 977 if (adapter->max_frame_size <= 2048) 978 adapter->rx_mbuf_sz = MCLBYTES; 979 else if (adapter->max_frame_size <= 4096) 980 adapter->rx_mbuf_sz = MJUMPAGESIZE; 981 else 982 adapter->rx_mbuf_sz = MJUM9BYTES; 983 #endif 984 985 /* Prepare receive descriptors and buffers */ 986 for (i = 0; i < sc->rx_ring_inuse; ++i) { 987 int error; 988 989 error = igb_init_rx_ring(&sc->rx_rings[i]); 990 if (error) { 991 if_printf(ifp, "Could not setup receive structures\n"); 992 igb_stop(sc); 993 return; 994 } 995 } 996 igb_init_rx_unit(sc); 997 998 /* Enable VLAN support */ 999 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1000 igb_set_vlan(sc); 1001 1002 /* Don't lose promiscuous settings */ 1003 igb_set_promisc(sc); 1004 1005 ifp->if_flags |= IFF_RUNNING; 1006 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1007 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1008 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 1009 } 1010 1011 igb_set_timer_cpuid(sc, polling); 1012 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1013 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1014 1015 /* This clears any pending interrupts */ 1016 E1000_READ_REG(&sc->hw, E1000_ICR); 1017 1018 /* 1019 * Only enable interrupts if we are not polling, make sure 1020 * they are off otherwise. 1021 */ 1022 if (polling) { 1023 igb_disable_intr(sc); 1024 } else { 1025 igb_enable_intr(sc); 1026 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1027 } 1028 1029 /* Set Energy Efficient Ethernet */ 1030 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1031 if (sc->hw.mac.type == e1000_i354) 1032 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1033 else 1034 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1035 } 1036 } 1037 1038 static void 1039 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1040 { 1041 struct igb_softc *sc = ifp->if_softc; 1042 1043 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1044 1045 if ((ifp->if_flags & IFF_RUNNING) == 0) 1046 sc->hw.mac.get_link_status = 1; 1047 igb_update_link_status(sc); 1048 1049 ifmr->ifm_status = IFM_AVALID; 1050 ifmr->ifm_active = IFM_ETHER; 1051 1052 if (!sc->link_active) { 1053 if (sc->hw.mac.autoneg) 1054 ifmr->ifm_active |= IFM_NONE; 1055 else 1056 ifmr->ifm_active |= sc->media.ifm_media; 1057 return; 1058 } 1059 1060 ifmr->ifm_status |= IFM_ACTIVE; 1061 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1062 ifmr->ifm_active |= sc->ifm_flowctrl; 1063 1064 switch (sc->link_speed) { 1065 case 10: 1066 ifmr->ifm_active |= IFM_10_T; 1067 break; 1068 1069 case 100: 1070 /* 1071 * Support for 100Mb SFP - these are Fiber 1072 * but the media type appears as serdes 1073 */ 1074 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1075 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1076 ifmr->ifm_active |= IFM_100_FX; 1077 else 1078 ifmr->ifm_active |= IFM_100_TX; 1079 break; 1080 1081 case 1000: 1082 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1083 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1084 ifmr->ifm_active |= IFM_1000_SX; 1085 else 1086 ifmr->ifm_active |= IFM_1000_T; 1087 break; 1088 1089 case 2500: 1090 ifmr->ifm_active |= IFM_2500_SX; 1091 break; 1092 } 1093 1094 if (sc->link_duplex == FULL_DUPLEX) 1095 ifmr->ifm_active |= IFM_FDX; 1096 else 1097 ifmr->ifm_active |= IFM_HDX; 1098 1099 if (sc->link_duplex == FULL_DUPLEX) 1100 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1101 } 1102 1103 static int 1104 igb_media_change(struct ifnet *ifp) 1105 { 1106 struct igb_softc *sc = ifp->if_softc; 1107 struct ifmedia *ifm = &sc->media; 1108 1109 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1110 1111 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1112 return EINVAL; 1113 1114 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1115 case IFM_AUTO: 1116 sc->hw.mac.autoneg = DO_AUTO_NEG; 1117 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1118 break; 1119 1120 case IFM_1000_SX: 1121 case IFM_1000_T: 1122 sc->hw.mac.autoneg = DO_AUTO_NEG; 1123 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1124 break; 1125 1126 case IFM_100_TX: 1127 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1128 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1129 } else { 1130 if (IFM_OPTIONS(ifm->ifm_media) & 1131 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1132 if (bootverbose) { 1133 if_printf(ifp, "Flow control is not " 1134 "allowed for half-duplex\n"); 1135 } 1136 return EINVAL; 1137 } 1138 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1139 } 1140 sc->hw.mac.autoneg = FALSE; 1141 sc->hw.phy.autoneg_advertised = 0; 1142 break; 1143 1144 case IFM_10_T: 1145 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1146 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1147 } else { 1148 if (IFM_OPTIONS(ifm->ifm_media) & 1149 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1150 if (bootverbose) { 1151 if_printf(ifp, "Flow control is not " 1152 "allowed for half-duplex\n"); 1153 } 1154 return EINVAL; 1155 } 1156 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1157 } 1158 sc->hw.mac.autoneg = FALSE; 1159 sc->hw.phy.autoneg_advertised = 0; 1160 break; 1161 1162 default: 1163 if (bootverbose) { 1164 if_printf(ifp, "Unsupported media type %d\n", 1165 IFM_SUBTYPE(ifm->ifm_media)); 1166 } 1167 return EINVAL; 1168 } 1169 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1170 1171 if (ifp->if_flags & IFF_RUNNING) 1172 igb_init(sc); 1173 1174 return 0; 1175 } 1176 1177 static void 1178 igb_set_promisc(struct igb_softc *sc) 1179 { 1180 struct ifnet *ifp = &sc->arpcom.ac_if; 1181 struct e1000_hw *hw = &sc->hw; 1182 uint32_t reg; 1183 1184 if (sc->vf_ifp) { 1185 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1186 return; 1187 } 1188 1189 reg = E1000_READ_REG(hw, E1000_RCTL); 1190 if (ifp->if_flags & IFF_PROMISC) { 1191 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1192 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1193 } else if (ifp->if_flags & IFF_ALLMULTI) { 1194 reg |= E1000_RCTL_MPE; 1195 reg &= ~E1000_RCTL_UPE; 1196 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1197 } 1198 } 1199 1200 static void 1201 igb_disable_promisc(struct igb_softc *sc) 1202 { 1203 struct e1000_hw *hw = &sc->hw; 1204 struct ifnet *ifp = &sc->arpcom.ac_if; 1205 uint32_t reg; 1206 int mcnt = 0; 1207 1208 if (sc->vf_ifp) { 1209 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1210 return; 1211 } 1212 reg = E1000_READ_REG(hw, E1000_RCTL); 1213 reg &= ~E1000_RCTL_UPE; 1214 if (ifp->if_flags & IFF_ALLMULTI) { 1215 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1216 } else { 1217 struct ifmultiaddr *ifma; 1218 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1219 if (ifma->ifma_addr->sa_family != AF_LINK) 1220 continue; 1221 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1222 break; 1223 mcnt++; 1224 } 1225 } 1226 /* Don't disable if in MAX groups */ 1227 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1228 reg &= ~E1000_RCTL_MPE; 1229 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1230 } 1231 1232 static void 1233 igb_set_multi(struct igb_softc *sc) 1234 { 1235 struct ifnet *ifp = &sc->arpcom.ac_if; 1236 struct ifmultiaddr *ifma; 1237 uint32_t reg_rctl = 0; 1238 uint8_t *mta; 1239 int mcnt = 0; 1240 1241 mta = sc->mta; 1242 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1243 1244 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1245 if (ifma->ifma_addr->sa_family != AF_LINK) 1246 continue; 1247 1248 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1249 break; 1250 1251 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1252 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1253 mcnt++; 1254 } 1255 1256 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1257 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1258 reg_rctl |= E1000_RCTL_MPE; 1259 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1260 } else { 1261 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1262 } 1263 } 1264 1265 static void 1266 igb_timer(void *xsc) 1267 { 1268 struct igb_softc *sc = xsc; 1269 1270 lwkt_serialize_enter(&sc->main_serialize); 1271 1272 igb_update_link_status(sc); 1273 igb_update_stats_counters(sc); 1274 1275 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1276 1277 lwkt_serialize_exit(&sc->main_serialize); 1278 } 1279 1280 static void 1281 igb_update_link_status(struct igb_softc *sc) 1282 { 1283 struct ifnet *ifp = &sc->arpcom.ac_if; 1284 struct e1000_hw *hw = &sc->hw; 1285 uint32_t link_check, thstat, ctrl; 1286 1287 link_check = thstat = ctrl = 0; 1288 1289 /* Get the cached link value or read for real */ 1290 switch (hw->phy.media_type) { 1291 case e1000_media_type_copper: 1292 if (hw->mac.get_link_status) { 1293 /* Do the work to read phy */ 1294 e1000_check_for_link(hw); 1295 link_check = !hw->mac.get_link_status; 1296 } else { 1297 link_check = TRUE; 1298 } 1299 break; 1300 1301 case e1000_media_type_fiber: 1302 e1000_check_for_link(hw); 1303 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1304 break; 1305 1306 case e1000_media_type_internal_serdes: 1307 e1000_check_for_link(hw); 1308 link_check = hw->mac.serdes_has_link; 1309 break; 1310 1311 /* VF device is type_unknown */ 1312 case e1000_media_type_unknown: 1313 e1000_check_for_link(hw); 1314 link_check = !hw->mac.get_link_status; 1315 /* Fall thru */ 1316 default: 1317 break; 1318 } 1319 1320 /* Check for thermal downshift or shutdown */ 1321 if (hw->mac.type == e1000_i350) { 1322 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1323 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1324 } 1325 1326 /* Now we check if a transition has happened */ 1327 if (link_check && sc->link_active == 0) { 1328 e1000_get_speed_and_duplex(hw, 1329 &sc->link_speed, &sc->link_duplex); 1330 if (bootverbose) { 1331 char flowctrl[IFM_ETH_FC_STRLEN]; 1332 1333 /* Get the flow control for display */ 1334 e1000_fc2str(hw->fc.current_mode, flowctrl, 1335 sizeof(flowctrl)); 1336 1337 if_printf(ifp, "Link is up %d Mbps %s, " 1338 "Flow control: %s\n", 1339 sc->link_speed, 1340 sc->link_duplex == FULL_DUPLEX ? 1341 "Full Duplex" : "Half Duplex", 1342 flowctrl); 1343 } 1344 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1345 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1346 sc->link_active = 1; 1347 1348 ifp->if_baudrate = sc->link_speed * 1000000; 1349 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1350 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1351 if_printf(ifp, "Link: thermal downshift\n"); 1352 /* Delay Link Up for Phy update */ 1353 if ((hw->mac.type == e1000_i210 || 1354 hw->mac.type == e1000_i211) && 1355 hw->phy.id == I210_I_PHY_ID) 1356 msec_delay(IGB_I210_LINK_DELAY); 1357 /* 1358 * Reset if the media type changed. 1359 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1360 */ 1361 if (hw->dev_spec._82575.media_changed) { 1362 hw->dev_spec._82575.media_changed = FALSE; 1363 igb_reset(sc, TRUE); 1364 } 1365 /* This can sleep */ 1366 ifp->if_link_state = LINK_STATE_UP; 1367 if_link_state_change(ifp); 1368 } else if (!link_check && sc->link_active == 1) { 1369 ifp->if_baudrate = sc->link_speed = 0; 1370 sc->link_duplex = 0; 1371 if (bootverbose) 1372 if_printf(ifp, "Link is Down\n"); 1373 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1374 (thstat & E1000_THSTAT_PWR_DOWN)) 1375 if_printf(ifp, "Link: thermal shutdown\n"); 1376 sc->link_active = 0; 1377 /* This can sleep */ 1378 ifp->if_link_state = LINK_STATE_DOWN; 1379 if_link_state_change(ifp); 1380 } 1381 } 1382 1383 static void 1384 igb_stop(struct igb_softc *sc) 1385 { 1386 struct ifnet *ifp = &sc->arpcom.ac_if; 1387 int i; 1388 1389 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1390 1391 igb_disable_intr(sc); 1392 1393 callout_stop(&sc->timer); 1394 1395 ifp->if_flags &= ~IFF_RUNNING; 1396 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1397 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1398 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 1399 sc->tx_rings[i].tx_flags &= ~IGB_TXFLAG_ENABLED; 1400 } 1401 1402 e1000_reset_hw(&sc->hw); 1403 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1404 1405 e1000_led_off(&sc->hw); 1406 e1000_cleanup_led(&sc->hw); 1407 1408 for (i = 0; i < sc->tx_ring_cnt; ++i) 1409 igb_free_tx_ring(&sc->tx_rings[i]); 1410 for (i = 0; i < sc->rx_ring_cnt; ++i) 1411 igb_free_rx_ring(&sc->rx_rings[i]); 1412 } 1413 1414 static void 1415 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1416 { 1417 struct ifnet *ifp = &sc->arpcom.ac_if; 1418 struct e1000_hw *hw = &sc->hw; 1419 struct e1000_fc_info *fc = &hw->fc; 1420 uint32_t pba = 0; 1421 uint16_t hwm; 1422 1423 /* Let the firmware know the OS is in control */ 1424 igb_get_hw_control(sc); 1425 1426 /* 1427 * Packet Buffer Allocation (PBA) 1428 * Writing PBA sets the receive portion of the buffer 1429 * the remainder is used for the transmit buffer. 1430 */ 1431 switch (hw->mac.type) { 1432 case e1000_82575: 1433 pba = E1000_PBA_32K; 1434 break; 1435 1436 case e1000_82576: 1437 case e1000_vfadapt: 1438 pba = E1000_READ_REG(hw, E1000_RXPBS); 1439 pba &= E1000_RXPBS_SIZE_MASK_82576; 1440 break; 1441 1442 case e1000_82580: 1443 case e1000_i350: 1444 case e1000_i354: 1445 case e1000_vfadapt_i350: 1446 pba = E1000_READ_REG(hw, E1000_RXPBS); 1447 pba = e1000_rxpbs_adjust_82580(pba); 1448 break; 1449 1450 case e1000_i210: 1451 case e1000_i211: 1452 pba = E1000_PBA_34K; 1453 break; 1454 1455 default: 1456 break; 1457 } 1458 1459 /* Special needs in case of Jumbo frames */ 1460 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1461 uint32_t tx_space, min_tx, min_rx; 1462 1463 pba = E1000_READ_REG(hw, E1000_PBA); 1464 tx_space = pba >> 16; 1465 pba &= 0xffff; 1466 1467 min_tx = (sc->max_frame_size + 1468 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1469 min_tx = roundup2(min_tx, 1024); 1470 min_tx >>= 10; 1471 min_rx = sc->max_frame_size; 1472 min_rx = roundup2(min_rx, 1024); 1473 min_rx >>= 10; 1474 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1475 pba = pba - (min_tx - tx_space); 1476 /* 1477 * if short on rx space, rx wins 1478 * and must trump tx adjustment 1479 */ 1480 if (pba < min_rx) 1481 pba = min_rx; 1482 } 1483 E1000_WRITE_REG(hw, E1000_PBA, pba); 1484 } 1485 1486 /* 1487 * These parameters control the automatic generation (Tx) and 1488 * response (Rx) to Ethernet PAUSE frames. 1489 * - High water mark should allow for at least two frames to be 1490 * received after sending an XOFF. 1491 * - Low water mark works best when it is very near the high water mark. 1492 * This allows the receiver to restart by sending XON when it has 1493 * drained a bit. 1494 */ 1495 hwm = min(((pba << 10) * 9 / 10), 1496 ((pba << 10) - 2 * sc->max_frame_size)); 1497 1498 if (hw->mac.type < e1000_82576) { 1499 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1500 fc->low_water = fc->high_water - 8; 1501 } else { 1502 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1503 fc->low_water = fc->high_water - 16; 1504 } 1505 fc->pause_time = IGB_FC_PAUSE_TIME; 1506 fc->send_xon = TRUE; 1507 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1508 1509 /* Issue a global reset */ 1510 e1000_reset_hw(hw); 1511 E1000_WRITE_REG(hw, E1000_WUC, 0); 1512 1513 /* Reset for AutoMediaDetect */ 1514 if (media_reset) { 1515 e1000_setup_init_funcs(hw, TRUE); 1516 e1000_get_bus_info(hw); 1517 } 1518 1519 if (e1000_init_hw(hw) < 0) 1520 if_printf(ifp, "Hardware Initialization Failed\n"); 1521 1522 /* Setup DMA Coalescing */ 1523 igb_init_dmac(sc, pba); 1524 1525 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1526 e1000_get_phy_info(hw); 1527 e1000_check_for_link(hw); 1528 } 1529 1530 static void 1531 igb_setup_ifp(struct igb_softc *sc) 1532 { 1533 struct ifnet *ifp = &sc->arpcom.ac_if; 1534 int i; 1535 1536 ifp->if_softc = sc; 1537 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1538 ifp->if_init = igb_init; 1539 ifp->if_ioctl = igb_ioctl; 1540 ifp->if_start = igb_start; 1541 ifp->if_serialize = igb_serialize; 1542 ifp->if_deserialize = igb_deserialize; 1543 ifp->if_tryserialize = igb_tryserialize; 1544 #ifdef INVARIANTS 1545 ifp->if_serialize_assert = igb_serialize_assert; 1546 #endif 1547 #ifdef IFPOLL_ENABLE 1548 ifp->if_npoll = igb_npoll; 1549 #endif 1550 1551 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1552 1553 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1554 ifq_set_ready(&ifp->if_snd); 1555 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1556 1557 ifp->if_mapsubq = ifq_mapsubq_mask; 1558 ifq_set_subq_mask(&ifp->if_snd, 0); 1559 1560 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1561 1562 ifp->if_capabilities = 1563 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1564 if (IGB_ENABLE_HWRSS(sc)) 1565 ifp->if_capabilities |= IFCAP_RSS; 1566 ifp->if_capenable = ifp->if_capabilities; 1567 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1568 1569 /* 1570 * Tell the upper layer(s) we support long frames 1571 */ 1572 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1573 1574 /* Setup TX rings and subqueues */ 1575 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1576 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1577 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1578 1579 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1580 ifsq_set_priv(ifsq, txr); 1581 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1582 txr->ifsq = ifsq; 1583 1584 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1585 } 1586 1587 /* 1588 * Specify the media types supported by this adapter and register 1589 * callbacks to update media and link information 1590 */ 1591 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1592 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1593 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1594 0, NULL); 1595 } else { 1596 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1597 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1598 0, NULL); 1599 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1600 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1601 0, NULL); 1602 if (sc->hw.phy.type != e1000_phy_ife) { 1603 ifmedia_add(&sc->media, 1604 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1605 } 1606 } 1607 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1608 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1609 } 1610 1611 static void 1612 igb_add_sysctl(struct igb_softc *sc) 1613 { 1614 struct sysctl_ctx_list *ctx; 1615 struct sysctl_oid *tree; 1616 #if defined(IGB_RSS_DEBUG) || defined(IGB_TSS_DEBUG) 1617 char node[32]; 1618 int i; 1619 #endif 1620 1621 ctx = device_get_sysctl_ctx(sc->dev); 1622 tree = device_get_sysctl_tree(sc->dev); 1623 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1624 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1625 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1626 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1627 "# of RX rings used"); 1628 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1629 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1630 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1631 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1632 "# of TX rings used"); 1633 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1634 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1635 "# of RX descs"); 1636 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1637 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1638 "# of TX descs"); 1639 1640 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1641 do { \ 1642 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1643 #use " interrupt rate"); \ 1644 } while (0) 1645 1646 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1647 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1648 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1649 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1650 1651 #undef IGB_ADD_INTR_RATE_SYSCTL 1652 1653 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1654 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1655 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1656 "# of segments per TX interrupt"); 1657 1658 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1659 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1660 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1661 "# of segments sent before write to hardware register"); 1662 1663 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1664 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1665 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1666 "# of segments received before write to hardware register"); 1667 1668 #ifdef IFPOLL_ENABLE 1669 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1670 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 1671 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1672 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1673 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 1674 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1675 #endif 1676 1677 #ifdef IGB_RSS_DEBUG 1678 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1679 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1680 "RSS debug level"); 1681 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1682 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1683 SYSCTL_ADD_ULONG(ctx, 1684 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1685 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1686 } 1687 #endif 1688 #ifdef IGB_TSS_DEBUG 1689 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1690 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1691 SYSCTL_ADD_ULONG(ctx, 1692 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1693 CTLFLAG_RW, &sc->tx_rings[i].tx_packets, "TXed packets"); 1694 } 1695 #endif 1696 } 1697 1698 static int 1699 igb_alloc_rings(struct igb_softc *sc) 1700 { 1701 int error, i; 1702 1703 /* 1704 * Create top level busdma tag 1705 */ 1706 error = bus_dma_tag_create(NULL, 1, 0, 1707 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1708 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1709 &sc->parent_tag); 1710 if (error) { 1711 device_printf(sc->dev, "could not create top level DMA tag\n"); 1712 return error; 1713 } 1714 1715 /* 1716 * Allocate TX descriptor rings and buffers 1717 */ 1718 sc->tx_rings = kmalloc_cachealign( 1719 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1720 M_DEVBUF, M_WAITOK | M_ZERO); 1721 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1722 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1723 1724 /* Set up some basics */ 1725 txr->sc = sc; 1726 txr->me = i; 1727 lwkt_serialize_init(&txr->tx_serialize); 1728 1729 error = igb_create_tx_ring(txr); 1730 if (error) 1731 return error; 1732 } 1733 1734 /* 1735 * Allocate RX descriptor rings and buffers 1736 */ 1737 sc->rx_rings = kmalloc_cachealign( 1738 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1739 M_DEVBUF, M_WAITOK | M_ZERO); 1740 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1741 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1742 1743 /* Set up some basics */ 1744 rxr->sc = sc; 1745 rxr->me = i; 1746 lwkt_serialize_init(&rxr->rx_serialize); 1747 1748 error = igb_create_rx_ring(rxr); 1749 if (error) 1750 return error; 1751 } 1752 1753 return 0; 1754 } 1755 1756 static void 1757 igb_free_rings(struct igb_softc *sc) 1758 { 1759 int i; 1760 1761 if (sc->tx_rings != NULL) { 1762 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1763 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1764 1765 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1766 } 1767 kfree(sc->tx_rings, M_DEVBUF); 1768 } 1769 1770 if (sc->rx_rings != NULL) { 1771 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1772 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1773 1774 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1775 } 1776 kfree(sc->rx_rings, M_DEVBUF); 1777 } 1778 } 1779 1780 static int 1781 igb_create_tx_ring(struct igb_tx_ring *txr) 1782 { 1783 int tsize, error, i, ntxd; 1784 1785 /* 1786 * Validate number of transmit descriptors. It must not exceed 1787 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1788 */ 1789 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1790 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1791 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1792 device_printf(txr->sc->dev, 1793 "Using %d TX descriptors instead of %d!\n", 1794 IGB_DEFAULT_TXD, ntxd); 1795 txr->num_tx_desc = IGB_DEFAULT_TXD; 1796 } else { 1797 txr->num_tx_desc = ntxd; 1798 } 1799 1800 /* 1801 * Allocate TX descriptor ring 1802 */ 1803 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1804 IGB_DBA_ALIGN); 1805 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1806 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1807 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1808 if (txr->txdma.dma_vaddr == NULL) { 1809 device_printf(txr->sc->dev, 1810 "Unable to allocate TX Descriptor memory\n"); 1811 return ENOMEM; 1812 } 1813 txr->tx_base = txr->txdma.dma_vaddr; 1814 bzero(txr->tx_base, tsize); 1815 1816 tsize = __VM_CACHELINE_ALIGN( 1817 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1818 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1819 1820 /* 1821 * Allocate TX head write-back buffer 1822 */ 1823 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1824 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1825 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1826 if (txr->tx_hdr == NULL) { 1827 device_printf(txr->sc->dev, 1828 "Unable to allocate TX head write-back buffer\n"); 1829 return ENOMEM; 1830 } 1831 1832 /* 1833 * Create DMA tag for TX buffers 1834 */ 1835 error = bus_dma_tag_create(txr->sc->parent_tag, 1836 1, 0, /* alignment, bounds */ 1837 BUS_SPACE_MAXADDR, /* lowaddr */ 1838 BUS_SPACE_MAXADDR, /* highaddr */ 1839 NULL, NULL, /* filter, filterarg */ 1840 IGB_TSO_SIZE, /* maxsize */ 1841 IGB_MAX_SCATTER, /* nsegments */ 1842 PAGE_SIZE, /* maxsegsize */ 1843 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1844 BUS_DMA_ONEBPAGE, /* flags */ 1845 &txr->tx_tag); 1846 if (error) { 1847 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1848 kfree(txr->tx_buf, M_DEVBUF); 1849 txr->tx_buf = NULL; 1850 return error; 1851 } 1852 1853 /* 1854 * Create DMA maps for TX buffers 1855 */ 1856 for (i = 0; i < txr->num_tx_desc; ++i) { 1857 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1858 1859 error = bus_dmamap_create(txr->tx_tag, 1860 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1861 if (error) { 1862 device_printf(txr->sc->dev, 1863 "Unable to create TX DMA map\n"); 1864 igb_destroy_tx_ring(txr, i); 1865 return error; 1866 } 1867 } 1868 1869 if (txr->sc->hw.mac.type == e1000_82575) 1870 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1871 1872 /* 1873 * Initialize various watermark 1874 */ 1875 txr->intr_nsegs = txr->num_tx_desc / 16; 1876 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1877 1878 return 0; 1879 } 1880 1881 static void 1882 igb_free_tx_ring(struct igb_tx_ring *txr) 1883 { 1884 int i; 1885 1886 for (i = 0; i < txr->num_tx_desc; ++i) { 1887 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1888 1889 if (txbuf->m_head != NULL) { 1890 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1891 m_freem(txbuf->m_head); 1892 txbuf->m_head = NULL; 1893 } 1894 } 1895 } 1896 1897 static void 1898 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1899 { 1900 int i; 1901 1902 if (txr->txdma.dma_vaddr != NULL) { 1903 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1904 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1905 txr->txdma.dma_map); 1906 bus_dma_tag_destroy(txr->txdma.dma_tag); 1907 txr->txdma.dma_vaddr = NULL; 1908 } 1909 1910 if (txr->tx_hdr != NULL) { 1911 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1912 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1913 txr->tx_hdr_dmap); 1914 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1915 txr->tx_hdr = NULL; 1916 } 1917 1918 if (txr->tx_buf == NULL) 1919 return; 1920 1921 for (i = 0; i < ndesc; ++i) { 1922 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1923 1924 KKASSERT(txbuf->m_head == NULL); 1925 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1926 } 1927 bus_dma_tag_destroy(txr->tx_tag); 1928 1929 kfree(txr->tx_buf, M_DEVBUF); 1930 txr->tx_buf = NULL; 1931 } 1932 1933 static void 1934 igb_init_tx_ring(struct igb_tx_ring *txr) 1935 { 1936 /* Clear the old descriptor contents */ 1937 bzero(txr->tx_base, 1938 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1939 1940 /* Clear TX head write-back buffer */ 1941 *(txr->tx_hdr) = 0; 1942 1943 /* Reset indices */ 1944 txr->next_avail_desc = 0; 1945 txr->next_to_clean = 0; 1946 txr->tx_nsegs = 0; 1947 1948 /* Set number of descriptors available */ 1949 txr->tx_avail = txr->num_tx_desc; 1950 1951 /* Enable this TX ring */ 1952 txr->tx_flags |= IGB_TXFLAG_ENABLED; 1953 } 1954 1955 static void 1956 igb_init_tx_unit(struct igb_softc *sc) 1957 { 1958 struct e1000_hw *hw = &sc->hw; 1959 uint32_t tctl; 1960 int i; 1961 1962 /* Setup the Tx Descriptor Rings */ 1963 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1964 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1965 uint64_t bus_addr = txr->txdma.dma_paddr; 1966 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1967 uint32_t txdctl = 0; 1968 uint32_t dca_txctrl; 1969 1970 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1971 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 1972 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1973 (uint32_t)(bus_addr >> 32)); 1974 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1975 (uint32_t)bus_addr); 1976 1977 /* Setup the HW Tx Head and Tail descriptor pointers */ 1978 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1979 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1980 1981 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1982 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1983 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1984 1985 /* 1986 * Don't set WB_on_EITR: 1987 * - 82575 does not have it 1988 * - It almost has no effect on 82576, see: 1989 * 82576 specification update errata #26 1990 * - It causes unnecessary bus traffic 1991 */ 1992 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1993 (uint32_t)(hdr_paddr >> 32)); 1994 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1995 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1996 1997 /* 1998 * WTHRESH is ignored by the hardware, since header 1999 * write back mode is used. 2000 */ 2001 txdctl |= IGB_TX_PTHRESH; 2002 txdctl |= IGB_TX_HTHRESH << 8; 2003 txdctl |= IGB_TX_WTHRESH << 16; 2004 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2005 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2006 } 2007 2008 if (sc->vf_ifp) 2009 return; 2010 2011 e1000_config_collision_dist(hw); 2012 2013 /* Program the Transmit Control Register */ 2014 tctl = E1000_READ_REG(hw, E1000_TCTL); 2015 tctl &= ~E1000_TCTL_CT; 2016 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2017 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2018 2019 /* This write will effectively turn on the transmit unit. */ 2020 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2021 } 2022 2023 static boolean_t 2024 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2025 { 2026 struct e1000_adv_tx_context_desc *TXD; 2027 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2028 int ehdrlen, ctxd, ip_hlen = 0; 2029 boolean_t offload = TRUE; 2030 2031 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2032 offload = FALSE; 2033 2034 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2035 2036 ctxd = txr->next_avail_desc; 2037 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2038 2039 /* 2040 * In advanced descriptors the vlan tag must 2041 * be placed into the context descriptor, thus 2042 * we need to be here just for that setup. 2043 */ 2044 if (mp->m_flags & M_VLANTAG) { 2045 uint16_t vlantag; 2046 2047 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2048 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2049 } else if (!offload) { 2050 return FALSE; 2051 } 2052 2053 ehdrlen = mp->m_pkthdr.csum_lhlen; 2054 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2055 2056 /* Set the ether header length */ 2057 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2058 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2059 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2060 ip_hlen = mp->m_pkthdr.csum_iphlen; 2061 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2062 } 2063 vlan_macip_lens |= ip_hlen; 2064 2065 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2066 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2067 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2068 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2069 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2070 2071 /* 2072 * 82575 needs the TX context index added; the queue 2073 * index is used as TX context index here. 2074 */ 2075 if (txr->sc->hw.mac.type == e1000_82575) 2076 mss_l4len_idx = txr->me << 4; 2077 2078 /* Now copy bits into descriptor */ 2079 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2080 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2081 TXD->seqnum_seed = htole32(0); 2082 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2083 2084 /* We've consumed the first desc, adjust counters */ 2085 if (++ctxd == txr->num_tx_desc) 2086 ctxd = 0; 2087 txr->next_avail_desc = ctxd; 2088 --txr->tx_avail; 2089 2090 return offload; 2091 } 2092 2093 static void 2094 igb_txeof(struct igb_tx_ring *txr, int hdr) 2095 { 2096 int first, avail; 2097 2098 if (txr->tx_avail == txr->num_tx_desc) 2099 return; 2100 2101 first = txr->next_to_clean; 2102 if (first == hdr) 2103 return; 2104 2105 avail = txr->tx_avail; 2106 while (first != hdr) { 2107 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2108 2109 ++avail; 2110 if (txbuf->m_head) { 2111 bus_dmamap_unload(txr->tx_tag, txbuf->map); 2112 m_freem(txbuf->m_head); 2113 txbuf->m_head = NULL; 2114 } 2115 if (++first == txr->num_tx_desc) 2116 first = 0; 2117 } 2118 txr->next_to_clean = first; 2119 txr->tx_avail = avail; 2120 2121 /* 2122 * If we have a minimum free, clear OACTIVE 2123 * to tell the stack that it is OK to send packets. 2124 */ 2125 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2126 ifsq_clr_oactive(txr->ifsq); 2127 2128 /* 2129 * We have enough TX descriptors, turn off 2130 * the watchdog. We allow small amount of 2131 * packets (roughly intr_nsegs) pending on 2132 * the transmit ring. 2133 */ 2134 txr->tx_watchdog.wd_timer = 0; 2135 } 2136 } 2137 2138 static int 2139 igb_create_rx_ring(struct igb_rx_ring *rxr) 2140 { 2141 int rsize, i, error, nrxd; 2142 2143 /* 2144 * Validate number of receive descriptors. It must not exceed 2145 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2146 */ 2147 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2148 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2149 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2150 device_printf(rxr->sc->dev, 2151 "Using %d RX descriptors instead of %d!\n", 2152 IGB_DEFAULT_RXD, nrxd); 2153 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2154 } else { 2155 rxr->num_rx_desc = nrxd; 2156 } 2157 2158 /* 2159 * Allocate RX descriptor ring 2160 */ 2161 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2162 IGB_DBA_ALIGN); 2163 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2164 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2165 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2166 &rxr->rxdma.dma_paddr); 2167 if (rxr->rxdma.dma_vaddr == NULL) { 2168 device_printf(rxr->sc->dev, 2169 "Unable to allocate RxDescriptor memory\n"); 2170 return ENOMEM; 2171 } 2172 rxr->rx_base = rxr->rxdma.dma_vaddr; 2173 bzero(rxr->rx_base, rsize); 2174 2175 rsize = __VM_CACHELINE_ALIGN( 2176 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2177 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2178 2179 /* 2180 * Create DMA tag for RX buffers 2181 */ 2182 error = bus_dma_tag_create(rxr->sc->parent_tag, 2183 1, 0, /* alignment, bounds */ 2184 BUS_SPACE_MAXADDR, /* lowaddr */ 2185 BUS_SPACE_MAXADDR, /* highaddr */ 2186 NULL, NULL, /* filter, filterarg */ 2187 MCLBYTES, /* maxsize */ 2188 1, /* nsegments */ 2189 MCLBYTES, /* maxsegsize */ 2190 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2191 &rxr->rx_tag); 2192 if (error) { 2193 device_printf(rxr->sc->dev, 2194 "Unable to create RX payload DMA tag\n"); 2195 kfree(rxr->rx_buf, M_DEVBUF); 2196 rxr->rx_buf = NULL; 2197 return error; 2198 } 2199 2200 /* 2201 * Create spare DMA map for RX buffers 2202 */ 2203 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2204 &rxr->rx_sparemap); 2205 if (error) { 2206 device_printf(rxr->sc->dev, 2207 "Unable to create spare RX DMA maps\n"); 2208 bus_dma_tag_destroy(rxr->rx_tag); 2209 kfree(rxr->rx_buf, M_DEVBUF); 2210 rxr->rx_buf = NULL; 2211 return error; 2212 } 2213 2214 /* 2215 * Create DMA maps for RX buffers 2216 */ 2217 for (i = 0; i < rxr->num_rx_desc; i++) { 2218 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2219 2220 error = bus_dmamap_create(rxr->rx_tag, 2221 BUS_DMA_WAITOK, &rxbuf->map); 2222 if (error) { 2223 device_printf(rxr->sc->dev, 2224 "Unable to create RX DMA maps\n"); 2225 igb_destroy_rx_ring(rxr, i); 2226 return error; 2227 } 2228 } 2229 2230 /* 2231 * Initialize various watermark 2232 */ 2233 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2234 2235 return 0; 2236 } 2237 2238 static void 2239 igb_free_rx_ring(struct igb_rx_ring *rxr) 2240 { 2241 int i; 2242 2243 for (i = 0; i < rxr->num_rx_desc; ++i) { 2244 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2245 2246 if (rxbuf->m_head != NULL) { 2247 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2248 m_freem(rxbuf->m_head); 2249 rxbuf->m_head = NULL; 2250 } 2251 } 2252 2253 if (rxr->fmp != NULL) 2254 m_freem(rxr->fmp); 2255 rxr->fmp = NULL; 2256 rxr->lmp = NULL; 2257 } 2258 2259 static void 2260 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2261 { 2262 int i; 2263 2264 if (rxr->rxdma.dma_vaddr != NULL) { 2265 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2266 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2267 rxr->rxdma.dma_map); 2268 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2269 rxr->rxdma.dma_vaddr = NULL; 2270 } 2271 2272 if (rxr->rx_buf == NULL) 2273 return; 2274 2275 for (i = 0; i < ndesc; ++i) { 2276 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2277 2278 KKASSERT(rxbuf->m_head == NULL); 2279 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2280 } 2281 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2282 bus_dma_tag_destroy(rxr->rx_tag); 2283 2284 kfree(rxr->rx_buf, M_DEVBUF); 2285 rxr->rx_buf = NULL; 2286 } 2287 2288 static void 2289 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2290 { 2291 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2292 rxd->wb.upper.status_error = 0; 2293 } 2294 2295 static int 2296 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2297 { 2298 struct mbuf *m; 2299 bus_dma_segment_t seg; 2300 bus_dmamap_t map; 2301 struct igb_rx_buf *rxbuf; 2302 int error, nseg; 2303 2304 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2305 if (m == NULL) { 2306 if (wait) { 2307 if_printf(&rxr->sc->arpcom.ac_if, 2308 "Unable to allocate RX mbuf\n"); 2309 } 2310 return ENOBUFS; 2311 } 2312 m->m_len = m->m_pkthdr.len = MCLBYTES; 2313 2314 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2315 m_adj(m, ETHER_ALIGN); 2316 2317 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2318 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2319 if (error) { 2320 m_freem(m); 2321 if (wait) { 2322 if_printf(&rxr->sc->arpcom.ac_if, 2323 "Unable to load RX mbuf\n"); 2324 } 2325 return error; 2326 } 2327 2328 rxbuf = &rxr->rx_buf[i]; 2329 if (rxbuf->m_head != NULL) 2330 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2331 2332 map = rxbuf->map; 2333 rxbuf->map = rxr->rx_sparemap; 2334 rxr->rx_sparemap = map; 2335 2336 rxbuf->m_head = m; 2337 rxbuf->paddr = seg.ds_addr; 2338 2339 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2340 return 0; 2341 } 2342 2343 static int 2344 igb_init_rx_ring(struct igb_rx_ring *rxr) 2345 { 2346 int i; 2347 2348 /* Clear the ring contents */ 2349 bzero(rxr->rx_base, 2350 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2351 2352 /* Now replenish the ring mbufs */ 2353 for (i = 0; i < rxr->num_rx_desc; ++i) { 2354 int error; 2355 2356 error = igb_newbuf(rxr, i, TRUE); 2357 if (error) 2358 return error; 2359 } 2360 2361 /* Setup our descriptor indices */ 2362 rxr->next_to_check = 0; 2363 2364 rxr->fmp = NULL; 2365 rxr->lmp = NULL; 2366 rxr->discard = FALSE; 2367 2368 return 0; 2369 } 2370 2371 static void 2372 igb_init_rx_unit(struct igb_softc *sc) 2373 { 2374 struct ifnet *ifp = &sc->arpcom.ac_if; 2375 struct e1000_hw *hw = &sc->hw; 2376 uint32_t rctl, rxcsum, srrctl = 0; 2377 int i; 2378 2379 /* 2380 * Make sure receives are disabled while setting 2381 * up the descriptor ring 2382 */ 2383 rctl = E1000_READ_REG(hw, E1000_RCTL); 2384 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2385 2386 #if 0 2387 /* 2388 ** Set up for header split 2389 */ 2390 if (igb_header_split) { 2391 /* Use a standard mbuf for the header */ 2392 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2393 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2394 } else 2395 #endif 2396 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2397 2398 /* 2399 ** Set up for jumbo frames 2400 */ 2401 if (ifp->if_mtu > ETHERMTU) { 2402 rctl |= E1000_RCTL_LPE; 2403 #if 0 2404 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2405 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2406 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2407 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2408 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2409 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2410 } 2411 /* Set maximum packet len */ 2412 psize = adapter->max_frame_size; 2413 /* are we on a vlan? */ 2414 if (adapter->ifp->if_vlantrunk != NULL) 2415 psize += VLAN_TAG_SIZE; 2416 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2417 #else 2418 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2419 rctl |= E1000_RCTL_SZ_2048; 2420 #endif 2421 } else { 2422 rctl &= ~E1000_RCTL_LPE; 2423 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2424 rctl |= E1000_RCTL_SZ_2048; 2425 } 2426 2427 /* Setup the Base and Length of the Rx Descriptor Rings */ 2428 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2429 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2430 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2431 uint32_t rxdctl; 2432 2433 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2434 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2435 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2436 (uint32_t)(bus_addr >> 32)); 2437 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2438 (uint32_t)bus_addr); 2439 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2440 /* Enable this Queue */ 2441 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2442 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2443 rxdctl &= 0xFFF00000; 2444 rxdctl |= IGB_RX_PTHRESH; 2445 rxdctl |= IGB_RX_HTHRESH << 8; 2446 /* 2447 * Don't set WTHRESH to a value above 1 on 82576, see: 2448 * 82576 specification update errata #26 2449 */ 2450 rxdctl |= IGB_RX_WTHRESH << 16; 2451 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2452 } 2453 2454 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2455 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2456 2457 /* 2458 * Receive Checksum Offload for TCP and UDP 2459 * 2460 * Checksum offloading is also enabled if multiple receive 2461 * queue is to be supported, since we need it to figure out 2462 * fragments. 2463 */ 2464 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2465 /* 2466 * NOTE: 2467 * PCSD must be enabled to enable multiple 2468 * receive queues. 2469 */ 2470 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2471 E1000_RXCSUM_PCSD; 2472 } else { 2473 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2474 E1000_RXCSUM_PCSD); 2475 } 2476 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2477 2478 if (IGB_ENABLE_HWRSS(sc)) { 2479 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2480 uint32_t reta_shift; 2481 int j, r; 2482 2483 /* 2484 * NOTE: 2485 * When we reach here, RSS has already been disabled 2486 * in igb_stop(), so we could safely configure RSS key 2487 * and redirect table. 2488 */ 2489 2490 /* 2491 * Configure RSS key 2492 */ 2493 toeplitz_get_key(key, sizeof(key)); 2494 for (i = 0; i < IGB_NRSSRK; ++i) { 2495 uint32_t rssrk; 2496 2497 rssrk = IGB_RSSRK_VAL(key, i); 2498 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2499 2500 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2501 } 2502 2503 /* 2504 * Configure RSS redirect table in following fashion: 2505 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2506 */ 2507 reta_shift = IGB_RETA_SHIFT; 2508 if (hw->mac.type == e1000_82575) 2509 reta_shift = IGB_RETA_SHIFT_82575; 2510 2511 r = 0; 2512 for (j = 0; j < IGB_NRETA; ++j) { 2513 uint32_t reta = 0; 2514 2515 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2516 uint32_t q; 2517 2518 q = (r % sc->rx_ring_inuse) << reta_shift; 2519 reta |= q << (8 * i); 2520 ++r; 2521 } 2522 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2523 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2524 } 2525 2526 /* 2527 * Enable multiple receive queues. 2528 * Enable IPv4 RSS standard hash functions. 2529 * Disable RSS interrupt on 82575 2530 */ 2531 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2532 E1000_MRQC_ENABLE_RSS_4Q | 2533 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2534 E1000_MRQC_RSS_FIELD_IPV4); 2535 } 2536 2537 /* Setup the Receive Control Register */ 2538 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2539 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2540 E1000_RCTL_RDMTS_HALF | 2541 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2542 /* Strip CRC bytes. */ 2543 rctl |= E1000_RCTL_SECRC; 2544 /* Make sure VLAN Filters are off */ 2545 rctl &= ~E1000_RCTL_VFE; 2546 /* Don't store bad packets */ 2547 rctl &= ~E1000_RCTL_SBP; 2548 2549 /* Enable Receives */ 2550 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2551 2552 /* 2553 * Setup the HW Rx Head and Tail Descriptor Pointers 2554 * - needs to be after enable 2555 */ 2556 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2557 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2558 2559 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2560 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2561 } 2562 } 2563 2564 static void 2565 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2566 { 2567 if (--i < 0) 2568 i = rxr->num_rx_desc - 1; 2569 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2570 } 2571 2572 static void 2573 igb_rxeof(struct igb_rx_ring *rxr, int count) 2574 { 2575 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2576 union e1000_adv_rx_desc *cur; 2577 uint32_t staterr; 2578 int i, ncoll = 0, cpuid = mycpuid; 2579 2580 i = rxr->next_to_check; 2581 cur = &rxr->rx_base[i]; 2582 staterr = le32toh(cur->wb.upper.status_error); 2583 2584 if ((staterr & E1000_RXD_STAT_DD) == 0) 2585 return; 2586 2587 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2588 struct pktinfo *pi = NULL, pi0; 2589 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2590 struct mbuf *m = NULL; 2591 boolean_t eop; 2592 2593 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2594 if (eop) 2595 --count; 2596 2597 ++ncoll; 2598 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2599 !rxr->discard) { 2600 struct mbuf *mp = rxbuf->m_head; 2601 uint32_t hash, hashtype; 2602 uint16_t vlan; 2603 int len; 2604 2605 len = le16toh(cur->wb.upper.length); 2606 if ((rxr->sc->hw.mac.type == e1000_i350 || 2607 rxr->sc->hw.mac.type == e1000_i354) && 2608 (staterr & E1000_RXDEXT_STATERR_LB)) 2609 vlan = be16toh(cur->wb.upper.vlan); 2610 else 2611 vlan = le16toh(cur->wb.upper.vlan); 2612 2613 hash = le32toh(cur->wb.lower.hi_dword.rss); 2614 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2615 E1000_RXDADV_RSSTYPE_MASK; 2616 2617 IGB_RSS_DPRINTF(rxr->sc, 10, 2618 "ring%d, hash 0x%08x, hashtype %u\n", 2619 rxr->me, hash, hashtype); 2620 2621 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2622 BUS_DMASYNC_POSTREAD); 2623 2624 if (igb_newbuf(rxr, i, FALSE) != 0) { 2625 IFNET_STAT_INC(ifp, iqdrops, 1); 2626 goto discard; 2627 } 2628 2629 mp->m_len = len; 2630 if (rxr->fmp == NULL) { 2631 mp->m_pkthdr.len = len; 2632 rxr->fmp = mp; 2633 rxr->lmp = mp; 2634 } else { 2635 rxr->lmp->m_next = mp; 2636 rxr->lmp = rxr->lmp->m_next; 2637 rxr->fmp->m_pkthdr.len += len; 2638 } 2639 2640 if (eop) { 2641 m = rxr->fmp; 2642 rxr->fmp = NULL; 2643 rxr->lmp = NULL; 2644 2645 m->m_pkthdr.rcvif = ifp; 2646 IFNET_STAT_INC(ifp, ipackets, 1); 2647 2648 if (ifp->if_capenable & IFCAP_RXCSUM) 2649 igb_rxcsum(staterr, m); 2650 2651 if (staterr & E1000_RXD_STAT_VP) { 2652 m->m_pkthdr.ether_vlantag = vlan; 2653 m->m_flags |= M_VLANTAG; 2654 } 2655 2656 if (ifp->if_capenable & IFCAP_RSS) { 2657 pi = igb_rssinfo(m, &pi0, 2658 hash, hashtype, staterr); 2659 } 2660 #ifdef IGB_RSS_DEBUG 2661 rxr->rx_packets++; 2662 #endif 2663 } 2664 } else { 2665 IFNET_STAT_INC(ifp, ierrors, 1); 2666 discard: 2667 igb_setup_rxdesc(cur, rxbuf); 2668 if (!eop) 2669 rxr->discard = TRUE; 2670 else 2671 rxr->discard = FALSE; 2672 if (rxr->fmp != NULL) { 2673 m_freem(rxr->fmp); 2674 rxr->fmp = NULL; 2675 rxr->lmp = NULL; 2676 } 2677 m = NULL; 2678 } 2679 2680 if (m != NULL) 2681 ifp->if_input(ifp, m, pi, cpuid); 2682 2683 /* Advance our pointers to the next descriptor. */ 2684 if (++i == rxr->num_rx_desc) 2685 i = 0; 2686 2687 if (ncoll >= rxr->wreg_nsegs) { 2688 igb_rx_refresh(rxr, i); 2689 ncoll = 0; 2690 } 2691 2692 cur = &rxr->rx_base[i]; 2693 staterr = le32toh(cur->wb.upper.status_error); 2694 } 2695 rxr->next_to_check = i; 2696 2697 if (ncoll > 0) 2698 igb_rx_refresh(rxr, i); 2699 } 2700 2701 2702 static void 2703 igb_set_vlan(struct igb_softc *sc) 2704 { 2705 struct e1000_hw *hw = &sc->hw; 2706 uint32_t reg; 2707 #if 0 2708 struct ifnet *ifp = sc->arpcom.ac_if; 2709 #endif 2710 2711 if (sc->vf_ifp) { 2712 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2713 return; 2714 } 2715 2716 reg = E1000_READ_REG(hw, E1000_CTRL); 2717 reg |= E1000_CTRL_VME; 2718 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2719 2720 #if 0 2721 /* Enable the Filter Table */ 2722 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2723 reg = E1000_READ_REG(hw, E1000_RCTL); 2724 reg &= ~E1000_RCTL_CFIEN; 2725 reg |= E1000_RCTL_VFE; 2726 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2727 } 2728 #endif 2729 2730 /* Update the frame size */ 2731 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2732 sc->max_frame_size + VLAN_TAG_SIZE); 2733 2734 #if 0 2735 /* Don't bother with table if no vlans */ 2736 if ((adapter->num_vlans == 0) || 2737 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2738 return; 2739 /* 2740 ** A soft reset zero's out the VFTA, so 2741 ** we need to repopulate it now. 2742 */ 2743 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2744 if (adapter->shadow_vfta[i] != 0) { 2745 if (adapter->vf_ifp) 2746 e1000_vfta_set_vf(hw, 2747 adapter->shadow_vfta[i], TRUE); 2748 else 2749 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2750 i, adapter->shadow_vfta[i]); 2751 } 2752 #endif 2753 } 2754 2755 static void 2756 igb_enable_intr(struct igb_softc *sc) 2757 { 2758 int i; 2759 2760 for (i = 0; i < sc->intr_cnt; ++i) 2761 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2762 2763 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2764 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2765 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2766 else 2767 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2768 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2769 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2770 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2771 } else { 2772 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2773 } 2774 E1000_WRITE_FLUSH(&sc->hw); 2775 } 2776 2777 static void 2778 igb_disable_intr(struct igb_softc *sc) 2779 { 2780 int i; 2781 2782 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2783 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2784 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2785 } 2786 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2787 E1000_WRITE_FLUSH(&sc->hw); 2788 2789 for (i = 0; i < sc->intr_cnt; ++i) 2790 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2791 } 2792 2793 /* 2794 * Bit of a misnomer, what this really means is 2795 * to enable OS management of the system... aka 2796 * to disable special hardware management features 2797 */ 2798 static void 2799 igb_get_mgmt(struct igb_softc *sc) 2800 { 2801 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2802 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2803 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2804 2805 /* disable hardware interception of ARP */ 2806 manc &= ~E1000_MANC_ARP_EN; 2807 2808 /* enable receiving management packets to the host */ 2809 manc |= E1000_MANC_EN_MNG2HOST; 2810 manc2h |= 1 << 5; /* Mng Port 623 */ 2811 manc2h |= 1 << 6; /* Mng Port 664 */ 2812 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2813 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2814 } 2815 } 2816 2817 /* 2818 * Give control back to hardware management controller 2819 * if there is one. 2820 */ 2821 static void 2822 igb_rel_mgmt(struct igb_softc *sc) 2823 { 2824 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2825 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2826 2827 /* Re-enable hardware interception of ARP */ 2828 manc |= E1000_MANC_ARP_EN; 2829 manc &= ~E1000_MANC_EN_MNG2HOST; 2830 2831 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2832 } 2833 } 2834 2835 /* 2836 * Sets CTRL_EXT:DRV_LOAD bit. 2837 * 2838 * For ASF and Pass Through versions of f/w this means that 2839 * the driver is loaded. 2840 */ 2841 static void 2842 igb_get_hw_control(struct igb_softc *sc) 2843 { 2844 uint32_t ctrl_ext; 2845 2846 if (sc->vf_ifp) 2847 return; 2848 2849 /* Let firmware know the driver has taken over */ 2850 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2851 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2852 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2853 } 2854 2855 /* 2856 * Resets CTRL_EXT:DRV_LOAD bit. 2857 * 2858 * For ASF and Pass Through versions of f/w this means that the 2859 * driver is no longer loaded. 2860 */ 2861 static void 2862 igb_rel_hw_control(struct igb_softc *sc) 2863 { 2864 uint32_t ctrl_ext; 2865 2866 if (sc->vf_ifp) 2867 return; 2868 2869 /* Let firmware taken over control of h/w */ 2870 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2871 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2872 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2873 } 2874 2875 static boolean_t 2876 igb_is_valid_ether_addr(const uint8_t *addr) 2877 { 2878 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2879 2880 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2881 return FALSE; 2882 return TRUE; 2883 } 2884 2885 /* 2886 * Enable PCI Wake On Lan capability 2887 */ 2888 static void 2889 igb_enable_wol(device_t dev) 2890 { 2891 uint16_t cap, status; 2892 uint8_t id; 2893 2894 /* First find the capabilities pointer*/ 2895 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2896 2897 /* Read the PM Capabilities */ 2898 id = pci_read_config(dev, cap, 1); 2899 if (id != PCIY_PMG) /* Something wrong */ 2900 return; 2901 2902 /* 2903 * OK, we have the power capabilities, 2904 * so now get the status register 2905 */ 2906 cap += PCIR_POWER_STATUS; 2907 status = pci_read_config(dev, cap, 2); 2908 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2909 pci_write_config(dev, cap, status, 2); 2910 } 2911 2912 static void 2913 igb_update_stats_counters(struct igb_softc *sc) 2914 { 2915 struct e1000_hw *hw = &sc->hw; 2916 struct e1000_hw_stats *stats; 2917 struct ifnet *ifp = &sc->arpcom.ac_if; 2918 2919 /* 2920 * The virtual function adapter has only a 2921 * small controlled set of stats, do only 2922 * those and return. 2923 */ 2924 if (sc->vf_ifp) { 2925 igb_update_vf_stats_counters(sc); 2926 return; 2927 } 2928 stats = sc->stats; 2929 2930 if (sc->hw.phy.media_type == e1000_media_type_copper || 2931 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2932 stats->symerrs += 2933 E1000_READ_REG(hw,E1000_SYMERRS); 2934 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2935 } 2936 2937 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2938 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2939 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2940 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2941 2942 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2943 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2944 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2945 stats->dc += E1000_READ_REG(hw, E1000_DC); 2946 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2947 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2948 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2949 2950 /* 2951 * For watchdog management we need to know if we have been 2952 * paused during the last interval, so capture that here. 2953 */ 2954 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2955 stats->xoffrxc += sc->pause_frames; 2956 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2957 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2958 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2959 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2960 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2961 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2962 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2963 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2964 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2965 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2966 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2967 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2968 2969 /* For the 64-bit byte counters the low dword must be read first. */ 2970 /* Both registers clear on the read of the high dword */ 2971 2972 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2973 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2974 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2975 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2976 2977 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2978 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2979 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2980 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2981 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2982 2983 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2984 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2985 2986 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2987 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2988 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2989 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2990 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2991 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 2992 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 2993 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 2994 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 2995 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 2996 2997 /* Interrupt Counts */ 2998 2999 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3000 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3001 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3002 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3003 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3004 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3005 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3006 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3007 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3008 3009 /* Host to Card Statistics */ 3010 3011 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3012 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3013 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3014 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3015 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3016 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3017 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3018 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3019 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3020 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3021 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3022 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3023 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3024 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3025 3026 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3027 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3028 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3029 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3030 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3031 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3032 3033 IFNET_STAT_SET(ifp, collisions, stats->colc); 3034 3035 /* Rx Errors */ 3036 IFNET_STAT_SET(ifp, ierrors, 3037 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3038 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3039 3040 /* Tx Errors */ 3041 IFNET_STAT_SET(ifp, oerrors, 3042 stats->ecol + stats->latecol + sc->watchdog_events); 3043 3044 /* Driver specific counters */ 3045 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3046 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3047 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3048 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3049 sc->packet_buf_alloc_tx = 3050 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3051 sc->packet_buf_alloc_rx = 3052 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3053 } 3054 3055 static void 3056 igb_vf_init_stats(struct igb_softc *sc) 3057 { 3058 struct e1000_hw *hw = &sc->hw; 3059 struct e1000_vf_stats *stats; 3060 3061 stats = sc->stats; 3062 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3063 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3064 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3065 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3066 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3067 } 3068 3069 static void 3070 igb_update_vf_stats_counters(struct igb_softc *sc) 3071 { 3072 struct e1000_hw *hw = &sc->hw; 3073 struct e1000_vf_stats *stats; 3074 3075 if (sc->link_speed == 0) 3076 return; 3077 3078 stats = sc->stats; 3079 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3080 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3081 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3082 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3083 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3084 } 3085 3086 #ifdef IFPOLL_ENABLE 3087 3088 static void 3089 igb_npoll_status(struct ifnet *ifp) 3090 { 3091 struct igb_softc *sc = ifp->if_softc; 3092 uint32_t reg_icr; 3093 3094 ASSERT_SERIALIZED(&sc->main_serialize); 3095 3096 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3097 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3098 sc->hw.mac.get_link_status = 1; 3099 igb_update_link_status(sc); 3100 } 3101 } 3102 3103 static void 3104 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3105 { 3106 struct igb_tx_ring *txr = arg; 3107 3108 ASSERT_SERIALIZED(&txr->tx_serialize); 3109 3110 igb_txeof(txr, *(txr->tx_hdr)); 3111 if (!ifsq_is_empty(txr->ifsq)) 3112 ifsq_devstart(txr->ifsq); 3113 } 3114 3115 static void 3116 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3117 { 3118 struct igb_rx_ring *rxr = arg; 3119 3120 ASSERT_SERIALIZED(&rxr->rx_serialize); 3121 3122 igb_rxeof(rxr, cycle); 3123 } 3124 3125 static void 3126 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3127 { 3128 struct igb_softc *sc = ifp->if_softc; 3129 int i, txr_cnt, rxr_cnt; 3130 3131 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3132 3133 if (info) { 3134 int off; 3135 3136 info->ifpi_status.status_func = igb_npoll_status; 3137 info->ifpi_status.serializer = &sc->main_serialize; 3138 3139 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3140 off = sc->tx_npoll_off; 3141 for (i = 0; i < txr_cnt; ++i) { 3142 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3143 int idx = i + off; 3144 3145 KKASSERT(idx < ncpus2); 3146 info->ifpi_tx[idx].poll_func = igb_npoll_tx; 3147 info->ifpi_tx[idx].arg = txr; 3148 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 3149 ifsq_set_cpuid(txr->ifsq, idx); 3150 } 3151 3152 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3153 off = sc->rx_npoll_off; 3154 for (i = 0; i < rxr_cnt; ++i) { 3155 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3156 int idx = i + off; 3157 3158 KKASSERT(idx < ncpus2); 3159 info->ifpi_rx[idx].poll_func = igb_npoll_rx; 3160 info->ifpi_rx[idx].arg = rxr; 3161 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 3162 } 3163 3164 if (ifp->if_flags & IFF_RUNNING) { 3165 if (rxr_cnt == sc->rx_ring_inuse && 3166 txr_cnt == sc->tx_ring_inuse) { 3167 igb_set_timer_cpuid(sc, TRUE); 3168 igb_disable_intr(sc); 3169 } else { 3170 igb_init(sc); 3171 } 3172 } 3173 } else { 3174 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3175 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3176 3177 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3178 } 3179 3180 if (ifp->if_flags & IFF_RUNNING) { 3181 txr_cnt = igb_get_txring_inuse(sc, FALSE); 3182 rxr_cnt = igb_get_rxring_inuse(sc, FALSE); 3183 3184 if (rxr_cnt == sc->rx_ring_inuse && 3185 txr_cnt == sc->tx_ring_inuse) { 3186 igb_set_timer_cpuid(sc, FALSE); 3187 igb_enable_intr(sc); 3188 } else { 3189 igb_init(sc); 3190 } 3191 } 3192 } 3193 } 3194 3195 #endif /* IFPOLL_ENABLE */ 3196 3197 static void 3198 igb_intr(void *xsc) 3199 { 3200 struct igb_softc *sc = xsc; 3201 struct ifnet *ifp = &sc->arpcom.ac_if; 3202 uint32_t eicr; 3203 3204 ASSERT_SERIALIZED(&sc->main_serialize); 3205 3206 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3207 3208 if (eicr == 0) 3209 return; 3210 3211 if (ifp->if_flags & IFF_RUNNING) { 3212 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3213 int i; 3214 3215 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3216 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3217 3218 if (eicr & rxr->rx_intr_mask) { 3219 lwkt_serialize_enter(&rxr->rx_serialize); 3220 igb_rxeof(rxr, -1); 3221 lwkt_serialize_exit(&rxr->rx_serialize); 3222 } 3223 } 3224 3225 if (eicr & txr->tx_intr_mask) { 3226 lwkt_serialize_enter(&txr->tx_serialize); 3227 igb_txeof(txr, *(txr->tx_hdr)); 3228 if (!ifsq_is_empty(txr->ifsq)) 3229 ifsq_devstart(txr->ifsq); 3230 lwkt_serialize_exit(&txr->tx_serialize); 3231 } 3232 } 3233 3234 if (eicr & E1000_EICR_OTHER) { 3235 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3236 3237 /* Link status change */ 3238 if (icr & E1000_ICR_LSC) { 3239 sc->hw.mac.get_link_status = 1; 3240 igb_update_link_status(sc); 3241 } 3242 } 3243 3244 /* 3245 * Reading EICR has the side effect to clear interrupt mask, 3246 * so all interrupts need to be enabled here. 3247 */ 3248 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3249 } 3250 3251 static void 3252 igb_intr_shared(void *xsc) 3253 { 3254 struct igb_softc *sc = xsc; 3255 struct ifnet *ifp = &sc->arpcom.ac_if; 3256 uint32_t reg_icr; 3257 3258 ASSERT_SERIALIZED(&sc->main_serialize); 3259 3260 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3261 3262 /* Hot eject? */ 3263 if (reg_icr == 0xffffffff) 3264 return; 3265 3266 /* Definitely not our interrupt. */ 3267 if (reg_icr == 0x0) 3268 return; 3269 3270 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3271 return; 3272 3273 if (ifp->if_flags & IFF_RUNNING) { 3274 if (reg_icr & 3275 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3276 int i; 3277 3278 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3279 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3280 3281 lwkt_serialize_enter(&rxr->rx_serialize); 3282 igb_rxeof(rxr, -1); 3283 lwkt_serialize_exit(&rxr->rx_serialize); 3284 } 3285 } 3286 3287 if (reg_icr & E1000_ICR_TXDW) { 3288 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3289 3290 lwkt_serialize_enter(&txr->tx_serialize); 3291 igb_txeof(txr, *(txr->tx_hdr)); 3292 if (!ifsq_is_empty(txr->ifsq)) 3293 ifsq_devstart(txr->ifsq); 3294 lwkt_serialize_exit(&txr->tx_serialize); 3295 } 3296 } 3297 3298 /* Link status change */ 3299 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3300 sc->hw.mac.get_link_status = 1; 3301 igb_update_link_status(sc); 3302 } 3303 3304 if (reg_icr & E1000_ICR_RXO) 3305 sc->rx_overruns++; 3306 } 3307 3308 static int 3309 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3310 int *segs_used, int *idx) 3311 { 3312 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3313 bus_dmamap_t map; 3314 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3315 union e1000_adv_tx_desc *txd = NULL; 3316 struct mbuf *m_head = *m_headp; 3317 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3318 int maxsegs, nsegs, i, j, error; 3319 uint32_t hdrlen = 0; 3320 3321 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3322 error = igb_tso_pullup(txr, m_headp); 3323 if (error) 3324 return error; 3325 m_head = *m_headp; 3326 } 3327 3328 /* Set basic descriptor constants */ 3329 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3330 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3331 if (m_head->m_flags & M_VLANTAG) 3332 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3333 3334 /* 3335 * Map the packet for DMA. 3336 */ 3337 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3338 tx_buf_mapped = tx_buf; 3339 map = tx_buf->map; 3340 3341 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3342 if (maxsegs > IGB_MAX_SCATTER) 3343 maxsegs = IGB_MAX_SCATTER; 3344 3345 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3346 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3347 if (error) { 3348 if (error == ENOBUFS) 3349 txr->sc->mbuf_defrag_failed++; 3350 else 3351 txr->sc->no_tx_dma_setup++; 3352 3353 m_freem(*m_headp); 3354 *m_headp = NULL; 3355 return error; 3356 } 3357 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3358 3359 m_head = *m_headp; 3360 3361 /* 3362 * Set up the TX context descriptor, if any hardware offloading is 3363 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3364 * TX descriptor. 3365 * 3366 * Unlike these chips' predecessors (em/emx), TX context descriptor 3367 * will _not_ interfere TX data fetching pipelining. 3368 */ 3369 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3370 igb_tso_ctx(txr, m_head, &hdrlen); 3371 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3372 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3373 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3374 txr->tx_nsegs++; 3375 (*segs_used)++; 3376 } else if (igb_txcsum_ctx(txr, m_head)) { 3377 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3378 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3379 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3380 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3381 txr->tx_nsegs++; 3382 (*segs_used)++; 3383 } 3384 3385 *segs_used += nsegs; 3386 txr->tx_nsegs += nsegs; 3387 if (txr->tx_nsegs >= txr->intr_nsegs) { 3388 /* 3389 * Report Status (RS) is turned on every intr_nsegs 3390 * descriptors (roughly). 3391 */ 3392 txr->tx_nsegs = 0; 3393 cmd_rs = E1000_ADVTXD_DCMD_RS; 3394 } 3395 3396 /* Calculate payload length */ 3397 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3398 << E1000_ADVTXD_PAYLEN_SHIFT); 3399 3400 /* 3401 * 82575 needs the TX context index added; the queue 3402 * index is used as TX context index here. 3403 */ 3404 if (txr->sc->hw.mac.type == e1000_82575) 3405 olinfo_status |= txr->me << 4; 3406 3407 /* Set up our transmit descriptors */ 3408 i = txr->next_avail_desc; 3409 for (j = 0; j < nsegs; j++) { 3410 bus_size_t seg_len; 3411 bus_addr_t seg_addr; 3412 3413 tx_buf = &txr->tx_buf[i]; 3414 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3415 seg_addr = segs[j].ds_addr; 3416 seg_len = segs[j].ds_len; 3417 3418 txd->read.buffer_addr = htole64(seg_addr); 3419 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3420 txd->read.olinfo_status = htole32(olinfo_status); 3421 if (++i == txr->num_tx_desc) 3422 i = 0; 3423 tx_buf->m_head = NULL; 3424 } 3425 3426 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3427 txr->next_avail_desc = i; 3428 txr->tx_avail -= nsegs; 3429 3430 tx_buf->m_head = m_head; 3431 tx_buf_mapped->map = tx_buf->map; 3432 tx_buf->map = map; 3433 3434 /* 3435 * Last Descriptor of Packet needs End Of Packet (EOP) 3436 */ 3437 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3438 3439 /* 3440 * Defer TDT updating, until enough descrptors are setup 3441 */ 3442 *idx = i; 3443 #ifdef IGB_TSS_DEBUG 3444 ++txr->tx_packets; 3445 #endif 3446 3447 return 0; 3448 } 3449 3450 static void 3451 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3452 { 3453 struct igb_softc *sc = ifp->if_softc; 3454 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3455 struct mbuf *m_head; 3456 int idx = -1, nsegs = 0; 3457 3458 KKASSERT(txr->ifsq == ifsq); 3459 ASSERT_SERIALIZED(&txr->tx_serialize); 3460 3461 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3462 return; 3463 3464 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3465 ifsq_purge(ifsq); 3466 return; 3467 } 3468 3469 while (!ifsq_is_empty(ifsq)) { 3470 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3471 ifsq_set_oactive(ifsq); 3472 /* Set watchdog on */ 3473 txr->tx_watchdog.wd_timer = 5; 3474 break; 3475 } 3476 3477 m_head = ifsq_dequeue(ifsq); 3478 if (m_head == NULL) 3479 break; 3480 3481 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3482 IFNET_STAT_INC(ifp, oerrors, 1); 3483 continue; 3484 } 3485 3486 /* 3487 * TX interrupt are aggressively aggregated, so increasing 3488 * opackets at TX interrupt time will make the opackets 3489 * statistics vastly inaccurate; we do the opackets increment 3490 * now. 3491 */ 3492 IFNET_STAT_INC(ifp, opackets, 1); 3493 3494 if (nsegs >= txr->wreg_nsegs) { 3495 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3496 idx = -1; 3497 nsegs = 0; 3498 } 3499 3500 /* Send a copy of the frame to the BPF listener */ 3501 ETHER_BPF_MTAP(ifp, m_head); 3502 } 3503 if (idx >= 0) 3504 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3505 } 3506 3507 static void 3508 igb_watchdog(struct ifaltq_subque *ifsq) 3509 { 3510 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3511 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3512 struct igb_softc *sc = ifp->if_softc; 3513 int i; 3514 3515 KKASSERT(txr->ifsq == ifsq); 3516 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3517 3518 /* 3519 * If flow control has paused us since last checking 3520 * it invalidates the watchdog timing, so dont run it. 3521 */ 3522 if (sc->pause_frames) { 3523 sc->pause_frames = 0; 3524 txr->tx_watchdog.wd_timer = 5; 3525 return; 3526 } 3527 3528 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3529 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3530 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3531 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3532 if_printf(ifp, "TX(%d) desc avail = %d, " 3533 "Next TX to Clean = %d\n", 3534 txr->me, txr->tx_avail, txr->next_to_clean); 3535 3536 IFNET_STAT_INC(ifp, oerrors, 1); 3537 sc->watchdog_events++; 3538 3539 igb_init(sc); 3540 for (i = 0; i < sc->tx_ring_inuse; ++i) 3541 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3542 } 3543 3544 static void 3545 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3546 { 3547 uint32_t eitr = 0; 3548 3549 if (rate > 0) { 3550 if (sc->hw.mac.type == e1000_82575) { 3551 eitr = 1000000000 / 256 / rate; 3552 /* 3553 * NOTE: 3554 * Document is wrong on the 2 bits left shift 3555 */ 3556 } else { 3557 eitr = 1000000 / rate; 3558 eitr <<= IGB_EITR_INTVL_SHIFT; 3559 } 3560 3561 if (eitr == 0) { 3562 /* Don't disable it */ 3563 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3564 } else if (eitr > IGB_EITR_INTVL_MASK) { 3565 /* Don't allow it to be too large */ 3566 eitr = IGB_EITR_INTVL_MASK; 3567 } 3568 } 3569 if (sc->hw.mac.type == e1000_82575) 3570 eitr |= eitr << 16; 3571 else 3572 eitr |= E1000_EITR_CNT_IGNR; 3573 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3574 } 3575 3576 static void 3577 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3578 const char *name, const char *desc) 3579 { 3580 int i; 3581 3582 for (i = 0; i < sc->intr_cnt; ++i) { 3583 if (sc->intr_data[i].intr_use == use) { 3584 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3585 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3586 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3587 sc, use, igb_sysctl_intr_rate, "I", desc); 3588 break; 3589 } 3590 } 3591 } 3592 3593 static int 3594 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3595 { 3596 struct igb_softc *sc = (void *)arg1; 3597 int use = arg2; 3598 struct ifnet *ifp = &sc->arpcom.ac_if; 3599 int error, rate, i; 3600 struct igb_intr_data *intr; 3601 3602 rate = 0; 3603 for (i = 0; i < sc->intr_cnt; ++i) { 3604 intr = &sc->intr_data[i]; 3605 if (intr->intr_use == use) { 3606 rate = intr->intr_rate; 3607 break; 3608 } 3609 } 3610 3611 error = sysctl_handle_int(oidp, &rate, 0, req); 3612 if (error || req->newptr == NULL) 3613 return error; 3614 if (rate <= 0) 3615 return EINVAL; 3616 3617 ifnet_serialize_all(ifp); 3618 3619 for (i = 0; i < sc->intr_cnt; ++i) { 3620 intr = &sc->intr_data[i]; 3621 if (intr->intr_use == use && intr->intr_rate != rate) { 3622 intr->intr_rate = rate; 3623 if (ifp->if_flags & IFF_RUNNING) 3624 igb_set_eitr(sc, i, rate); 3625 } 3626 } 3627 3628 ifnet_deserialize_all(ifp); 3629 3630 return error; 3631 } 3632 3633 static int 3634 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3635 { 3636 struct igb_softc *sc = (void *)arg1; 3637 struct ifnet *ifp = &sc->arpcom.ac_if; 3638 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3639 int error, nsegs; 3640 3641 nsegs = txr->intr_nsegs; 3642 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3643 if (error || req->newptr == NULL) 3644 return error; 3645 if (nsegs <= 0) 3646 return EINVAL; 3647 3648 ifnet_serialize_all(ifp); 3649 3650 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3651 error = EINVAL; 3652 } else { 3653 int i; 3654 3655 error = 0; 3656 for (i = 0; i < sc->tx_ring_cnt; ++i) 3657 sc->tx_rings[i].intr_nsegs = nsegs; 3658 } 3659 3660 ifnet_deserialize_all(ifp); 3661 3662 return error; 3663 } 3664 3665 static int 3666 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3667 { 3668 struct igb_softc *sc = (void *)arg1; 3669 struct ifnet *ifp = &sc->arpcom.ac_if; 3670 int error, nsegs, i; 3671 3672 nsegs = sc->rx_rings[0].wreg_nsegs; 3673 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3674 if (error || req->newptr == NULL) 3675 return error; 3676 3677 ifnet_serialize_all(ifp); 3678 for (i = 0; i < sc->rx_ring_cnt; ++i) 3679 sc->rx_rings[i].wreg_nsegs = nsegs; 3680 ifnet_deserialize_all(ifp); 3681 3682 return 0; 3683 } 3684 3685 static int 3686 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3687 { 3688 struct igb_softc *sc = (void *)arg1; 3689 struct ifnet *ifp = &sc->arpcom.ac_if; 3690 int error, nsegs, i; 3691 3692 nsegs = sc->tx_rings[0].wreg_nsegs; 3693 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3694 if (error || req->newptr == NULL) 3695 return error; 3696 3697 ifnet_serialize_all(ifp); 3698 for (i = 0; i < sc->tx_ring_cnt; ++i) 3699 sc->tx_rings[i].wreg_nsegs = nsegs; 3700 ifnet_deserialize_all(ifp); 3701 3702 return 0; 3703 } 3704 3705 #ifdef IFPOLL_ENABLE 3706 3707 static int 3708 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3709 { 3710 struct igb_softc *sc = (void *)arg1; 3711 struct ifnet *ifp = &sc->arpcom.ac_if; 3712 int error, off; 3713 3714 off = sc->rx_npoll_off; 3715 error = sysctl_handle_int(oidp, &off, 0, req); 3716 if (error || req->newptr == NULL) 3717 return error; 3718 if (off < 0) 3719 return EINVAL; 3720 3721 ifnet_serialize_all(ifp); 3722 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3723 error = EINVAL; 3724 } else { 3725 error = 0; 3726 sc->rx_npoll_off = off; 3727 } 3728 ifnet_deserialize_all(ifp); 3729 3730 return error; 3731 } 3732 3733 static int 3734 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3735 { 3736 struct igb_softc *sc = (void *)arg1; 3737 struct ifnet *ifp = &sc->arpcom.ac_if; 3738 int error, off; 3739 3740 off = sc->tx_npoll_off; 3741 error = sysctl_handle_int(oidp, &off, 0, req); 3742 if (error || req->newptr == NULL) 3743 return error; 3744 if (off < 0) 3745 return EINVAL; 3746 3747 ifnet_serialize_all(ifp); 3748 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3749 error = EINVAL; 3750 } else { 3751 error = 0; 3752 sc->tx_npoll_off = off; 3753 } 3754 ifnet_deserialize_all(ifp); 3755 3756 return error; 3757 } 3758 3759 #endif /* IFPOLL_ENABLE */ 3760 3761 static void 3762 igb_init_intr(struct igb_softc *sc) 3763 { 3764 int i; 3765 3766 igb_set_intr_mask(sc); 3767 3768 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3769 igb_init_unshared_intr(sc); 3770 3771 for (i = 0; i < sc->intr_cnt; ++i) 3772 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3773 } 3774 3775 static void 3776 igb_init_unshared_intr(struct igb_softc *sc) 3777 { 3778 struct e1000_hw *hw = &sc->hw; 3779 const struct igb_rx_ring *rxr; 3780 const struct igb_tx_ring *txr; 3781 uint32_t ivar, index; 3782 int i; 3783 3784 /* 3785 * Enable extended mode 3786 */ 3787 if (sc->hw.mac.type != e1000_82575) { 3788 uint32_t gpie; 3789 int ivar_max; 3790 3791 gpie = E1000_GPIE_NSICR; 3792 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3793 gpie |= E1000_GPIE_MSIX_MODE | 3794 E1000_GPIE_EIAME | 3795 E1000_GPIE_PBA; 3796 } 3797 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3798 3799 /* 3800 * Clear IVARs 3801 */ 3802 switch (sc->hw.mac.type) { 3803 case e1000_82576: 3804 ivar_max = IGB_MAX_IVAR_82576; 3805 break; 3806 3807 case e1000_82580: 3808 ivar_max = IGB_MAX_IVAR_82580; 3809 break; 3810 3811 case e1000_i350: 3812 ivar_max = IGB_MAX_IVAR_I350; 3813 break; 3814 3815 case e1000_i354: 3816 ivar_max = IGB_MAX_IVAR_I354; 3817 break; 3818 3819 case e1000_vfadapt: 3820 case e1000_vfadapt_i350: 3821 ivar_max = IGB_MAX_IVAR_VF; 3822 break; 3823 3824 case e1000_i210: 3825 ivar_max = IGB_MAX_IVAR_I210; 3826 break; 3827 3828 case e1000_i211: 3829 ivar_max = IGB_MAX_IVAR_I211; 3830 break; 3831 3832 default: 3833 panic("unknown mac type %d\n", sc->hw.mac.type); 3834 } 3835 for (i = 0; i < ivar_max; ++i) 3836 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3837 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3838 } else { 3839 uint32_t tmp; 3840 3841 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3842 ("82575 w/ MSI-X")); 3843 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3844 tmp |= E1000_CTRL_EXT_IRCA; 3845 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3846 } 3847 3848 /* 3849 * Map TX/RX interrupts to EICR 3850 */ 3851 switch (sc->hw.mac.type) { 3852 case e1000_82580: 3853 case e1000_i350: 3854 case e1000_i354: 3855 case e1000_vfadapt: 3856 case e1000_vfadapt_i350: 3857 case e1000_i210: 3858 case e1000_i211: 3859 /* RX entries */ 3860 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3861 rxr = &sc->rx_rings[i]; 3862 3863 index = i >> 1; 3864 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3865 3866 if (i & 1) { 3867 ivar &= 0xff00ffff; 3868 ivar |= 3869 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3870 } else { 3871 ivar &= 0xffffff00; 3872 ivar |= 3873 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3874 } 3875 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3876 } 3877 /* TX entries */ 3878 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3879 txr = &sc->tx_rings[i]; 3880 3881 index = i >> 1; 3882 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3883 3884 if (i & 1) { 3885 ivar &= 0x00ffffff; 3886 ivar |= 3887 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3888 } else { 3889 ivar &= 0xffff00ff; 3890 ivar |= 3891 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3892 } 3893 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3894 } 3895 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3896 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3897 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3898 } 3899 break; 3900 3901 case e1000_82576: 3902 /* RX entries */ 3903 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3904 rxr = &sc->rx_rings[i]; 3905 3906 index = i & 0x7; /* Each IVAR has two entries */ 3907 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3908 3909 if (i < 8) { 3910 ivar &= 0xffffff00; 3911 ivar |= 3912 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3913 } else { 3914 ivar &= 0xff00ffff; 3915 ivar |= 3916 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3917 } 3918 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3919 } 3920 /* TX entries */ 3921 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3922 txr = &sc->tx_rings[i]; 3923 3924 index = i & 0x7; /* Each IVAR has two entries */ 3925 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3926 3927 if (i < 8) { 3928 ivar &= 0xffff00ff; 3929 ivar |= 3930 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3931 } else { 3932 ivar &= 0x00ffffff; 3933 ivar |= 3934 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3935 } 3936 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3937 } 3938 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3939 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3940 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3941 } 3942 break; 3943 3944 case e1000_82575: 3945 /* 3946 * Enable necessary interrupt bits. 3947 * 3948 * The name of the register is confusing; in addition to 3949 * configuring the first vector of MSI-X, it also configures 3950 * which bits of EICR could be set by the hardware even when 3951 * MSI or line interrupt is used; it thus controls interrupt 3952 * generation. It MUST be configured explicitly; the default 3953 * value mentioned in the datasheet is wrong: RX queue0 and 3954 * TX queue0 are NOT enabled by default. 3955 */ 3956 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3957 break; 3958 3959 default: 3960 panic("unknown mac type %d\n", sc->hw.mac.type); 3961 } 3962 } 3963 3964 static int 3965 igb_setup_intr(struct igb_softc *sc) 3966 { 3967 int i; 3968 3969 for (i = 0; i < sc->intr_cnt; ++i) { 3970 struct igb_intr_data *intr = &sc->intr_data[i]; 3971 int error; 3972 3973 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 3974 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 3975 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 3976 if (error) { 3977 device_printf(sc->dev, "can't setup %dth intr\n", i); 3978 igb_teardown_intr(sc, i); 3979 return error; 3980 } 3981 } 3982 return 0; 3983 } 3984 3985 static void 3986 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 3987 { 3988 if (txr->sc->hw.mac.type == e1000_82575) { 3989 txr->tx_intr_vec = 0; /* unused */ 3990 switch (txr->me) { 3991 case 0: 3992 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3993 break; 3994 case 1: 3995 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3996 break; 3997 case 2: 3998 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 3999 break; 4000 case 3: 4001 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4002 break; 4003 default: 4004 panic("unsupported # of TX ring, %d\n", txr->me); 4005 } 4006 } else { 4007 int intr_vec = *intr_vec0; 4008 4009 txr->tx_intr_vec = intr_vec % intr_vecmax; 4010 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4011 4012 *intr_vec0 = intr_vec + 1; 4013 } 4014 } 4015 4016 static void 4017 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 4018 { 4019 if (rxr->sc->hw.mac.type == e1000_82575) { 4020 rxr->rx_intr_vec = 0; /* unused */ 4021 switch (rxr->me) { 4022 case 0: 4023 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4024 break; 4025 case 1: 4026 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4027 break; 4028 case 2: 4029 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4030 break; 4031 case 3: 4032 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4033 break; 4034 default: 4035 panic("unsupported # of RX ring, %d\n", rxr->me); 4036 } 4037 } else { 4038 int intr_vec = *intr_vec0; 4039 4040 rxr->rx_intr_vec = intr_vec % intr_vecmax; 4041 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4042 4043 *intr_vec0 = intr_vec + 1; 4044 } 4045 } 4046 4047 static void 4048 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4049 { 4050 struct igb_softc *sc = ifp->if_softc; 4051 4052 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4053 } 4054 4055 static void 4056 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4057 { 4058 struct igb_softc *sc = ifp->if_softc; 4059 4060 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4061 } 4062 4063 static int 4064 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4065 { 4066 struct igb_softc *sc = ifp->if_softc; 4067 4068 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4069 slz); 4070 } 4071 4072 #ifdef INVARIANTS 4073 4074 static void 4075 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4076 boolean_t serialized) 4077 { 4078 struct igb_softc *sc = ifp->if_softc; 4079 4080 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4081 slz, serialized); 4082 } 4083 4084 #endif /* INVARIANTS */ 4085 4086 static void 4087 igb_set_intr_mask(struct igb_softc *sc) 4088 { 4089 int i; 4090 4091 sc->intr_mask = sc->sts_intr_mask; 4092 for (i = 0; i < sc->rx_ring_inuse; ++i) 4093 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4094 for (i = 0; i < sc->tx_ring_inuse; ++i) 4095 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4096 if (bootverbose) { 4097 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4098 sc->intr_mask); 4099 } 4100 } 4101 4102 static int 4103 igb_alloc_intr(struct igb_softc *sc) 4104 { 4105 struct igb_intr_data *intr; 4106 int i, intr_vec, intr_vecmax; 4107 u_int intr_flags; 4108 4109 igb_alloc_msix(sc); 4110 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 4111 goto done; 4112 4113 if (sc->intr_data != NULL) 4114 kfree(sc->intr_data, M_DEVBUF); 4115 4116 sc->intr_cnt = 1; 4117 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4118 M_WAITOK | M_ZERO); 4119 intr = &sc->intr_data[0]; 4120 4121 /* 4122 * Allocate MSI/legacy interrupt resource 4123 */ 4124 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4125 &intr->intr_rid, &intr_flags); 4126 4127 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4128 int unshared; 4129 4130 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4131 if (!unshared) { 4132 sc->flags |= IGB_FLAG_SHARED_INTR; 4133 if (bootverbose) 4134 device_printf(sc->dev, "IRQ shared\n"); 4135 } else { 4136 intr_flags &= ~RF_SHAREABLE; 4137 if (bootverbose) 4138 device_printf(sc->dev, "IRQ unshared\n"); 4139 } 4140 } 4141 4142 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4143 &intr->intr_rid, intr_flags); 4144 if (intr->intr_res == NULL) { 4145 device_printf(sc->dev, "Unable to allocate bus resource: " 4146 "interrupt\n"); 4147 return ENXIO; 4148 } 4149 4150 intr->intr_serialize = &sc->main_serialize; 4151 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4152 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4153 igb_intr_shared : igb_intr; 4154 intr->intr_funcarg = sc; 4155 intr->intr_rate = IGB_INTR_RATE; 4156 intr->intr_use = IGB_INTR_USE_RXTX; 4157 4158 for (i = 0; i < sc->tx_ring_cnt; ++i) 4159 sc->tx_rings[i].tx_intr_cpuid = intr->intr_cpuid; 4160 4161 /* 4162 * Setup MSI/legacy interrupt mask 4163 */ 4164 switch (sc->hw.mac.type) { 4165 case e1000_82575: 4166 intr_vecmax = IGB_MAX_TXRXINT_82575; 4167 break; 4168 4169 case e1000_82576: 4170 intr_vecmax = IGB_MAX_TXRXINT_82576; 4171 break; 4172 4173 case e1000_82580: 4174 intr_vecmax = IGB_MAX_TXRXINT_82580; 4175 break; 4176 4177 case e1000_i350: 4178 intr_vecmax = IGB_MAX_TXRXINT_I350; 4179 break; 4180 4181 case e1000_i354: 4182 intr_vecmax = IGB_MAX_TXRXINT_I354; 4183 break; 4184 4185 case e1000_i210: 4186 intr_vecmax = IGB_MAX_TXRXINT_I210; 4187 break; 4188 4189 case e1000_i211: 4190 intr_vecmax = IGB_MAX_TXRXINT_I211; 4191 break; 4192 4193 default: 4194 intr_vecmax = IGB_MIN_TXRXINT; 4195 break; 4196 } 4197 intr_vec = 0; 4198 for (i = 0; i < sc->tx_ring_cnt; ++i) 4199 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4200 for (i = 0; i < sc->rx_ring_cnt; ++i) 4201 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4202 sc->sts_intr_mask = E1000_EICR_OTHER; 4203 done: 4204 igb_set_ring_inuse(sc, FALSE); 4205 igb_set_intr_mask(sc); 4206 return 0; 4207 } 4208 4209 static void 4210 igb_free_intr(struct igb_softc *sc) 4211 { 4212 if (sc->intr_data == NULL) 4213 return; 4214 4215 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4216 struct igb_intr_data *intr = &sc->intr_data[0]; 4217 4218 KKASSERT(sc->intr_cnt == 1); 4219 if (intr->intr_res != NULL) { 4220 bus_release_resource(sc->dev, SYS_RES_IRQ, 4221 intr->intr_rid, intr->intr_res); 4222 } 4223 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4224 pci_release_msi(sc->dev); 4225 4226 kfree(sc->intr_data, M_DEVBUF); 4227 } else { 4228 igb_free_msix(sc, TRUE); 4229 } 4230 } 4231 4232 static void 4233 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4234 { 4235 int i; 4236 4237 if (sc->intr_data == NULL) 4238 return; 4239 4240 for (i = 0; i < intr_cnt; ++i) { 4241 struct igb_intr_data *intr = &sc->intr_data[i]; 4242 4243 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4244 } 4245 } 4246 4247 static void 4248 igb_alloc_msix(struct igb_softc *sc) 4249 { 4250 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 4251 int i, x, error; 4252 int offset, offset_def, agg_rxtx, ring_max; 4253 struct igb_intr_data *intr; 4254 boolean_t aggregate, setup = FALSE; 4255 4256 /* 4257 * Don't enable MSI-X on 82575, see: 4258 * 82575 specification update errata #25 4259 */ 4260 if (sc->hw.mac.type == e1000_82575) 4261 return; 4262 4263 /* Don't enable MSI-X on VF */ 4264 if (sc->vf_ifp) 4265 return; 4266 4267 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4268 igb_msix_enable); 4269 if (!msix_enable) 4270 return; 4271 4272 msix_cnt = pci_msix_count(sc->dev); 4273 #ifdef IGB_MSIX_DEBUG 4274 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4275 #endif 4276 if (msix_cnt <= 1) { 4277 /* One MSI-X model does not make sense */ 4278 return; 4279 } 4280 4281 i = 0; 4282 while ((1 << (i + 1)) <= msix_cnt) 4283 ++i; 4284 msix_cnt2 = 1 << i; 4285 4286 if (bootverbose) { 4287 device_printf(sc->dev, "MSI-X count %d/%d\n", 4288 msix_cnt2, msix_cnt); 4289 } 4290 4291 KKASSERT(msix_cnt2 <= msix_cnt); 4292 if (msix_cnt == msix_cnt2) { 4293 /* We need at least one MSI-X for link status */ 4294 msix_cnt2 >>= 1; 4295 if (msix_cnt2 <= 1) { 4296 /* One MSI-X for RX/TX does not make sense */ 4297 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 4298 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 4299 return; 4300 } 4301 KKASSERT(msix_cnt > msix_cnt2); 4302 4303 if (bootverbose) { 4304 device_printf(sc->dev, "MSI-X count fixup %d/%d\n", 4305 msix_cnt2, msix_cnt); 4306 } 4307 } 4308 4309 sc->rx_ring_msix = sc->rx_ring_cnt; 4310 if (sc->rx_ring_msix > msix_cnt2) 4311 sc->rx_ring_msix = msix_cnt2; 4312 4313 sc->tx_ring_msix = sc->tx_ring_cnt; 4314 if (sc->tx_ring_msix > msix_cnt2) 4315 sc->tx_ring_msix = msix_cnt2; 4316 4317 ring_max = sc->rx_ring_msix; 4318 if (ring_max < sc->tx_ring_msix) 4319 ring_max = sc->tx_ring_msix; 4320 4321 /* Allow user to force independent RX/TX MSI-X handling */ 4322 agg_rxtx = device_getenv_int(sc->dev, "msix.agg_rxtx", 4323 igb_msix_agg_rxtx); 4324 4325 if (!agg_rxtx && msix_cnt >= sc->tx_ring_msix + sc->rx_ring_msix + 1) { 4326 /* 4327 * Independent TX/RX MSI-X 4328 */ 4329 aggregate = FALSE; 4330 if (bootverbose) 4331 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 4332 alloc_cnt = sc->tx_ring_msix + sc->rx_ring_msix; 4333 } else { 4334 /* 4335 * Aggregate TX/RX MSI-X 4336 */ 4337 aggregate = TRUE; 4338 if (bootverbose) 4339 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 4340 alloc_cnt = msix_cnt2; 4341 if (alloc_cnt > ring_max) 4342 alloc_cnt = ring_max; 4343 KKASSERT(alloc_cnt >= sc->rx_ring_msix && 4344 alloc_cnt >= sc->tx_ring_msix); 4345 } 4346 ++alloc_cnt; /* For link status */ 4347 4348 if (bootverbose) { 4349 device_printf(sc->dev, "MSI-X alloc %d, " 4350 "RX ring %d, TX ring %d\n", alloc_cnt, 4351 sc->rx_ring_msix, sc->tx_ring_msix); 4352 } 4353 4354 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4355 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4356 &sc->msix_mem_rid, RF_ACTIVE); 4357 if (sc->msix_mem_res == NULL) { 4358 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4359 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4360 &sc->msix_mem_rid, RF_ACTIVE); 4361 if (sc->msix_mem_res == NULL) { 4362 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4363 return; 4364 } 4365 } 4366 4367 sc->intr_cnt = alloc_cnt; 4368 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4369 M_DEVBUF, M_WAITOK | M_ZERO); 4370 for (x = 0; x < sc->intr_cnt; ++x) { 4371 intr = &sc->intr_data[x]; 4372 intr->intr_rid = -1; 4373 intr->intr_rate = IGB_INTR_RATE; 4374 } 4375 4376 x = 0; 4377 if (!aggregate) { 4378 /* 4379 * RX rings 4380 */ 4381 if (sc->rx_ring_msix == ncpus2) { 4382 offset = 0; 4383 } else { 4384 offset_def = (sc->rx_ring_msix * 4385 device_get_unit(sc->dev)) % ncpus2; 4386 4387 offset = device_getenv_int(sc->dev, 4388 "msix.rxoff", offset_def); 4389 if (offset >= ncpus2 || 4390 offset % sc->rx_ring_msix != 0) { 4391 device_printf(sc->dev, 4392 "invalid msix.rxoff %d, use %d\n", 4393 offset, offset_def); 4394 offset = offset_def; 4395 } 4396 } 4397 igb_msix_rx_conf(sc, 0, &x, offset); 4398 4399 /* 4400 * TX rings 4401 */ 4402 if (sc->tx_ring_msix == ncpus2) { 4403 offset = 0; 4404 } else { 4405 offset_def = (sc->tx_ring_msix * 4406 device_get_unit(sc->dev)) % ncpus2; 4407 4408 offset = device_getenv_int(sc->dev, 4409 "msix.txoff", offset_def); 4410 if (offset >= ncpus2 || 4411 offset % sc->tx_ring_msix != 0) { 4412 device_printf(sc->dev, 4413 "invalid msix.txoff %d, use %d\n", 4414 offset, offset_def); 4415 offset = offset_def; 4416 } 4417 } 4418 igb_msix_tx_conf(sc, 0, &x, offset); 4419 } else { 4420 int ring_agg; 4421 4422 ring_agg = sc->rx_ring_msix; 4423 if (ring_agg > sc->tx_ring_msix) 4424 ring_agg = sc->tx_ring_msix; 4425 4426 if (ring_max == ncpus2) { 4427 offset = 0; 4428 } else { 4429 offset_def = (ring_max * device_get_unit(sc->dev)) % 4430 ncpus2; 4431 4432 offset = device_getenv_int(sc->dev, "msix.off", 4433 offset_def); 4434 if (offset >= ncpus2 || offset % ring_max != 0) { 4435 device_printf(sc->dev, 4436 "invalid msix.off %d, use %d\n", 4437 offset, offset_def); 4438 offset = offset_def; 4439 } 4440 } 4441 4442 for (i = 0; i < ring_agg; ++i) { 4443 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4444 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4445 4446 KKASSERT(x < sc->intr_cnt); 4447 rxr->rx_intr_vec = x; 4448 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4449 rxr->rx_txr = txr; 4450 txr->tx_intr_vec = rxr->rx_intr_vec; 4451 txr->tx_intr_mask = rxr->rx_intr_mask; 4452 4453 intr = &sc->intr_data[x++]; 4454 4455 intr->intr_serialize = &rxr->rx_serialize; 4456 intr->intr_func = igb_msix_rxtx; 4457 intr->intr_funcarg = rxr; 4458 intr->intr_use = IGB_INTR_USE_RXTX; 4459 4460 intr->intr_cpuid = i + offset; 4461 KKASSERT(intr->intr_cpuid < ncpus2); 4462 txr->tx_intr_cpuid = intr->intr_cpuid; 4463 4464 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4465 "%s rxtx%d", device_get_nameunit(sc->dev), i); 4466 intr->intr_desc = intr->intr_desc0; 4467 } 4468 4469 if (ring_agg != ring_max) { 4470 if (ring_max == sc->tx_ring_msix) 4471 igb_msix_tx_conf(sc, i, &x, offset); 4472 else 4473 igb_msix_rx_conf(sc, i, &x, offset); 4474 } 4475 } 4476 4477 /* 4478 * Link status 4479 */ 4480 KKASSERT(x < sc->intr_cnt); 4481 sc->sts_msix_vec = x; 4482 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4483 4484 intr = &sc->intr_data[x++]; 4485 4486 intr->intr_serialize = &sc->main_serialize; 4487 intr->intr_func = igb_msix_status; 4488 intr->intr_funcarg = sc; 4489 intr->intr_cpuid = 0; 4490 intr->intr_use = IGB_INTR_USE_STATUS; 4491 4492 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4493 device_get_nameunit(sc->dev)); 4494 intr->intr_desc = intr->intr_desc0; 4495 4496 KKASSERT(x == sc->intr_cnt); 4497 4498 error = pci_setup_msix(sc->dev); 4499 if (error) { 4500 device_printf(sc->dev, "Setup MSI-X failed\n"); 4501 goto back; 4502 } 4503 setup = TRUE; 4504 4505 for (i = 0; i < sc->intr_cnt; ++i) { 4506 intr = &sc->intr_data[i]; 4507 4508 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4509 intr->intr_cpuid); 4510 if (error) { 4511 device_printf(sc->dev, 4512 "Unable to allocate MSI-X %d on cpu%d\n", i, 4513 intr->intr_cpuid); 4514 goto back; 4515 } 4516 4517 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4518 &intr->intr_rid, RF_ACTIVE); 4519 if (intr->intr_res == NULL) { 4520 device_printf(sc->dev, 4521 "Unable to allocate MSI-X %d resource\n", i); 4522 error = ENOMEM; 4523 goto back; 4524 } 4525 } 4526 4527 pci_enable_msix(sc->dev); 4528 sc->intr_type = PCI_INTR_TYPE_MSIX; 4529 back: 4530 if (error) 4531 igb_free_msix(sc, setup); 4532 } 4533 4534 static void 4535 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4536 { 4537 int i; 4538 4539 KKASSERT(sc->intr_cnt > 1); 4540 4541 for (i = 0; i < sc->intr_cnt; ++i) { 4542 struct igb_intr_data *intr = &sc->intr_data[i]; 4543 4544 if (intr->intr_res != NULL) { 4545 bus_release_resource(sc->dev, SYS_RES_IRQ, 4546 intr->intr_rid, intr->intr_res); 4547 } 4548 if (intr->intr_rid >= 0) 4549 pci_release_msix_vector(sc->dev, intr->intr_rid); 4550 } 4551 if (setup) 4552 pci_teardown_msix(sc->dev); 4553 4554 sc->intr_cnt = 0; 4555 kfree(sc->intr_data, M_DEVBUF); 4556 sc->intr_data = NULL; 4557 } 4558 4559 static void 4560 igb_msix_rx(void *arg) 4561 { 4562 struct igb_rx_ring *rxr = arg; 4563 4564 ASSERT_SERIALIZED(&rxr->rx_serialize); 4565 igb_rxeof(rxr, -1); 4566 4567 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4568 } 4569 4570 static void 4571 igb_msix_tx(void *arg) 4572 { 4573 struct igb_tx_ring *txr = arg; 4574 4575 ASSERT_SERIALIZED(&txr->tx_serialize); 4576 4577 igb_txeof(txr, *(txr->tx_hdr)); 4578 if (!ifsq_is_empty(txr->ifsq)) 4579 ifsq_devstart(txr->ifsq); 4580 4581 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4582 } 4583 4584 static void 4585 igb_msix_status(void *arg) 4586 { 4587 struct igb_softc *sc = arg; 4588 uint32_t icr; 4589 4590 ASSERT_SERIALIZED(&sc->main_serialize); 4591 4592 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4593 if (icr & E1000_ICR_LSC) { 4594 sc->hw.mac.get_link_status = 1; 4595 igb_update_link_status(sc); 4596 } 4597 4598 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4599 } 4600 4601 static void 4602 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4603 { 4604 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4605 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4606 if (bootverbose) { 4607 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4608 sc->rx_ring_inuse, sc->rx_ring_cnt, 4609 sc->tx_ring_inuse, sc->tx_ring_cnt); 4610 } 4611 } 4612 4613 static int 4614 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4615 { 4616 if (!IGB_ENABLE_HWRSS(sc)) 4617 return 1; 4618 4619 if (polling) 4620 return sc->rx_ring_cnt; 4621 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4622 return IGB_MIN_RING_RSS; 4623 else 4624 return sc->rx_ring_msix; 4625 } 4626 4627 static int 4628 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4629 { 4630 if (!IGB_ENABLE_HWTSS(sc)) 4631 return 1; 4632 4633 if (polling) 4634 return sc->tx_ring_cnt; 4635 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4636 return IGB_MIN_RING; 4637 else 4638 return sc->tx_ring_msix; 4639 } 4640 4641 static int 4642 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4643 { 4644 int hoff, iphlen, thoff; 4645 struct mbuf *m; 4646 4647 m = *mp; 4648 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4649 4650 iphlen = m->m_pkthdr.csum_iphlen; 4651 thoff = m->m_pkthdr.csum_thlen; 4652 hoff = m->m_pkthdr.csum_lhlen; 4653 4654 KASSERT(iphlen > 0, ("invalid ip hlen")); 4655 KASSERT(thoff > 0, ("invalid tcp hlen")); 4656 KASSERT(hoff > 0, ("invalid ether hlen")); 4657 4658 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4659 m = m_pullup(m, hoff + iphlen + thoff); 4660 if (m == NULL) { 4661 *mp = NULL; 4662 return ENOBUFS; 4663 } 4664 *mp = m; 4665 } 4666 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4667 struct ip *ip; 4668 4669 ip = mtodoff(m, struct ip *, hoff); 4670 ip->ip_len = 0; 4671 } 4672 4673 return 0; 4674 } 4675 4676 static void 4677 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4678 { 4679 struct e1000_adv_tx_context_desc *TXD; 4680 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4681 int hoff, ctxd, iphlen, thoff; 4682 4683 iphlen = m->m_pkthdr.csum_iphlen; 4684 thoff = m->m_pkthdr.csum_thlen; 4685 hoff = m->m_pkthdr.csum_lhlen; 4686 4687 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4688 4689 ctxd = txr->next_avail_desc; 4690 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4691 4692 if (m->m_flags & M_VLANTAG) { 4693 uint16_t vlantag; 4694 4695 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4696 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4697 } 4698 4699 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4700 vlan_macip_lens |= iphlen; 4701 4702 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4703 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4704 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4705 4706 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4707 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4708 4709 /* 4710 * 82575 needs the TX context index added; the queue 4711 * index is used as TX context index here. 4712 */ 4713 if (txr->sc->hw.mac.type == e1000_82575) 4714 mss_l4len_idx |= txr->me << 4; 4715 4716 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4717 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4718 TXD->seqnum_seed = htole32(0); 4719 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4720 4721 /* We've consumed the first desc, adjust counters */ 4722 if (++ctxd == txr->num_tx_desc) 4723 ctxd = 0; 4724 txr->next_avail_desc = ctxd; 4725 --txr->tx_avail; 4726 4727 *hlen = hoff + iphlen + thoff; 4728 } 4729 4730 static void 4731 igb_setup_serialize(struct igb_softc *sc) 4732 { 4733 int i = 0, j; 4734 4735 /* Main + RX + TX */ 4736 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4737 sc->serializes = 4738 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4739 M_DEVBUF, M_WAITOK | M_ZERO); 4740 4741 /* 4742 * Setup serializes 4743 * 4744 * NOTE: Order is critical 4745 */ 4746 4747 KKASSERT(i < sc->serialize_cnt); 4748 sc->serializes[i++] = &sc->main_serialize; 4749 4750 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4751 KKASSERT(i < sc->serialize_cnt); 4752 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4753 } 4754 4755 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4756 KKASSERT(i < sc->serialize_cnt); 4757 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4758 } 4759 4760 KKASSERT(i == sc->serialize_cnt); 4761 } 4762 4763 static void 4764 igb_msix_rx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4765 { 4766 int x = *x0; 4767 4768 for (; i < sc->rx_ring_msix; ++i) { 4769 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4770 struct igb_intr_data *intr; 4771 4772 KKASSERT(x < sc->intr_cnt); 4773 rxr->rx_intr_vec = x; 4774 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4775 4776 intr = &sc->intr_data[x++]; 4777 4778 intr->intr_serialize = &rxr->rx_serialize; 4779 intr->intr_func = igb_msix_rx; 4780 intr->intr_funcarg = rxr; 4781 intr->intr_rate = IGB_MSIX_RX_RATE; 4782 intr->intr_use = IGB_INTR_USE_RX; 4783 4784 intr->intr_cpuid = i + offset; 4785 KKASSERT(intr->intr_cpuid < ncpus2); 4786 4787 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s rx%d", 4788 device_get_nameunit(sc->dev), i); 4789 intr->intr_desc = intr->intr_desc0; 4790 } 4791 *x0 = x; 4792 } 4793 4794 static void 4795 igb_msix_tx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4796 { 4797 int x = *x0; 4798 4799 for (; i < sc->tx_ring_msix; ++i) { 4800 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4801 struct igb_intr_data *intr; 4802 4803 KKASSERT(x < sc->intr_cnt); 4804 txr->tx_intr_vec = x; 4805 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4806 4807 intr = &sc->intr_data[x++]; 4808 4809 intr->intr_serialize = &txr->tx_serialize; 4810 intr->intr_func = igb_msix_tx; 4811 intr->intr_funcarg = txr; 4812 intr->intr_rate = IGB_MSIX_TX_RATE; 4813 intr->intr_use = IGB_INTR_USE_TX; 4814 4815 intr->intr_cpuid = i + offset; 4816 KKASSERT(intr->intr_cpuid < ncpus2); 4817 txr->tx_intr_cpuid = intr->intr_cpuid; 4818 4819 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4820 device_get_nameunit(sc->dev), i); 4821 intr->intr_desc = intr->intr_desc0; 4822 } 4823 *x0 = x; 4824 } 4825 4826 static void 4827 igb_msix_rxtx(void *arg) 4828 { 4829 struct igb_rx_ring *rxr = arg; 4830 struct igb_tx_ring *txr; 4831 int hdr; 4832 4833 ASSERT_SERIALIZED(&rxr->rx_serialize); 4834 4835 igb_rxeof(rxr, -1); 4836 4837 /* 4838 * NOTE: 4839 * Since next_to_clean is only changed by igb_txeof(), 4840 * which is called only in interrupt handler, the 4841 * check w/o holding tx serializer is MPSAFE. 4842 */ 4843 txr = rxr->rx_txr; 4844 hdr = *(txr->tx_hdr); 4845 if (hdr != txr->next_to_clean) { 4846 lwkt_serialize_enter(&txr->tx_serialize); 4847 igb_txeof(txr, hdr); 4848 if (!ifsq_is_empty(txr->ifsq)) 4849 ifsq_devstart(txr->ifsq); 4850 lwkt_serialize_exit(&txr->tx_serialize); 4851 } 4852 4853 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4854 } 4855 4856 static void 4857 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4858 { 4859 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4860 sc->timer_cpuid = 0; /* XXX fixed */ 4861 else 4862 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4863 } 4864 4865 static void 4866 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4867 { 4868 struct e1000_hw *hw = &sc->hw; 4869 uint32_t reg; 4870 4871 if (hw->mac.type == e1000_i211) 4872 return; 4873 4874 if (hw->mac.type > e1000_82580) { 4875 uint32_t dmac; 4876 uint16_t hwm; 4877 4878 if (sc->dma_coalesce == 0) { /* Disabling it */ 4879 reg = ~E1000_DMACR_DMAC_EN; 4880 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4881 return; 4882 } else { 4883 if_printf(&sc->arpcom.ac_if, 4884 "DMA Coalescing enabled\n"); 4885 } 4886 4887 /* Set starting threshold */ 4888 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4889 4890 hwm = 64 * pba - sc->max_frame_size / 16; 4891 if (hwm < 64 * (pba - 6)) 4892 hwm = 64 * (pba - 6); 4893 reg = E1000_READ_REG(hw, E1000_FCRTC); 4894 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4895 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4896 & E1000_FCRTC_RTH_COAL_MASK); 4897 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4898 4899 dmac = pba - sc->max_frame_size / 512; 4900 if (dmac < pba - 10) 4901 dmac = pba - 10; 4902 reg = E1000_READ_REG(hw, E1000_DMACR); 4903 reg &= ~E1000_DMACR_DMACTHR_MASK; 4904 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4905 & E1000_DMACR_DMACTHR_MASK); 4906 4907 /* transition to L0x or L1 if available..*/ 4908 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 4909 4910 /* 4911 * Check if status is 2.5Gb backplane connection 4912 * before configuration of watchdog timer, which 4913 * is in msec values in 12.8usec intervals watchdog 4914 * timer = msec values in 32usec intervals for non 4915 * 2.5Gb connection. 4916 */ 4917 if (hw->mac.type == e1000_i354) { 4918 int status = E1000_READ_REG(hw, E1000_STATUS); 4919 4920 if ((status & E1000_STATUS_2P5_SKU) && 4921 !(status & E1000_STATUS_2P5_SKU_OVER)) 4922 reg |= ((sc->dma_coalesce * 5) >> 6); 4923 else 4924 reg |= (sc->dma_coalesce >> 5); 4925 } else { 4926 reg |= (sc->dma_coalesce >> 5); 4927 } 4928 4929 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4930 4931 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 4932 4933 /* Set the interval before transition */ 4934 reg = E1000_READ_REG(hw, E1000_DMCTLX); 4935 if (hw->mac.type == e1000_i350) 4936 reg |= IGB_DMCTLX_DCFLUSH_DIS; 4937 /* 4938 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 4939 * is 0x4*2 = 0xA. But delay is still 4 usec. 4940 */ 4941 if (hw->mac.type == e1000_i354) { 4942 int status = E1000_READ_REG(hw, E1000_STATUS); 4943 4944 if ((status & E1000_STATUS_2P5_SKU) && 4945 !(status & E1000_STATUS_2P5_SKU_OVER)) 4946 reg |= 0xA; 4947 else 4948 reg |= 0x4; 4949 } else { 4950 reg |= 0x4; 4951 } 4952 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 4953 4954 /* Free space in tx packet buffer to wake from DMA coal */ 4955 E1000_WRITE_REG(hw, E1000_DMCTXTH, 4956 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 4957 4958 /* Make low power state decision controlled by DMA coal */ 4959 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4960 reg &= ~E1000_PCIEMISC_LX_DECISION; 4961 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 4962 } else if (hw->mac.type == e1000_82580) { 4963 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4964 E1000_WRITE_REG(hw, E1000_PCIEMISC, 4965 reg & ~E1000_PCIEMISC_LX_DECISION); 4966 E1000_WRITE_REG(hw, E1000_DMACR, 0); 4967 } 4968 } 4969