1 /* 2 * Copyright (c) 2001-2011, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 68 #include <bus/pci/pcivar.h> 69 #include <bus/pci/pcireg.h> 70 71 #include <dev/netif/ig_hal/e1000_api.h> 72 #include <dev/netif/ig_hal/e1000_82575.h> 73 #include <dev/netif/ig_hal/e1000_dragonfly.h> 74 #include <dev/netif/igb/if_igb.h> 75 76 #ifdef IGB_RSS_DEBUG 77 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 78 do { \ 79 if (sc->rss_debug >= lvl) \ 80 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 81 } while (0) 82 #else /* !IGB_RSS_DEBUG */ 83 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 84 #endif /* IGB_RSS_DEBUG */ 85 86 #define IGB_NAME "Intel(R) PRO/1000 " 87 #define IGB_DEVICE(id) \ 88 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 89 #define IGB_DEVICE_NULL { 0, 0, NULL } 90 91 static struct igb_device { 92 uint16_t vid; 93 uint16_t did; 94 const char *desc; 95 } igb_devices[] = { 96 IGB_DEVICE(82575EB_COPPER), 97 IGB_DEVICE(82575EB_FIBER_SERDES), 98 IGB_DEVICE(82575GB_QUAD_COPPER), 99 IGB_DEVICE(82576), 100 IGB_DEVICE(82576_NS), 101 IGB_DEVICE(82576_NS_SERDES), 102 IGB_DEVICE(82576_FIBER), 103 IGB_DEVICE(82576_SERDES), 104 IGB_DEVICE(82576_SERDES_QUAD), 105 IGB_DEVICE(82576_QUAD_COPPER), 106 IGB_DEVICE(82576_QUAD_COPPER_ET2), 107 IGB_DEVICE(82576_VF), 108 IGB_DEVICE(82580_COPPER), 109 IGB_DEVICE(82580_FIBER), 110 IGB_DEVICE(82580_SERDES), 111 IGB_DEVICE(82580_SGMII), 112 IGB_DEVICE(82580_COPPER_DUAL), 113 IGB_DEVICE(82580_QUAD_FIBER), 114 IGB_DEVICE(DH89XXCC_SERDES), 115 IGB_DEVICE(DH89XXCC_SGMII), 116 IGB_DEVICE(DH89XXCC_SFP), 117 IGB_DEVICE(DH89XXCC_BACKPLANE), 118 IGB_DEVICE(I350_COPPER), 119 IGB_DEVICE(I350_FIBER), 120 IGB_DEVICE(I350_SERDES), 121 IGB_DEVICE(I350_SGMII), 122 IGB_DEVICE(I350_VF), 123 IGB_DEVICE(I210_COPPER), 124 IGB_DEVICE(I210_COPPER_IT), 125 IGB_DEVICE(I210_COPPER_OEM1), 126 IGB_DEVICE(I210_COPPER_FLASHLESS), 127 IGB_DEVICE(I210_SERDES_FLASHLESS), 128 IGB_DEVICE(I210_FIBER), 129 IGB_DEVICE(I210_SERDES), 130 IGB_DEVICE(I210_SGMII), 131 IGB_DEVICE(I211_COPPER), 132 IGB_DEVICE(I354_BACKPLANE_1GBPS), 133 IGB_DEVICE(I354_SGMII), 134 135 /* required last entry */ 136 IGB_DEVICE_NULL 137 }; 138 139 static int igb_probe(device_t); 140 static int igb_attach(device_t); 141 static int igb_detach(device_t); 142 static int igb_shutdown(device_t); 143 static int igb_suspend(device_t); 144 static int igb_resume(device_t); 145 146 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 147 static void igb_setup_ifp(struct igb_softc *); 148 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 149 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 150 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 151 static void igb_add_sysctl(struct igb_softc *); 152 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 153 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS); 154 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 155 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 156 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 157 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 158 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 159 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 160 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 161 #ifdef IFPOLL_ENABLE 162 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 163 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 164 #endif 165 166 static void igb_vf_init_stats(struct igb_softc *); 167 static void igb_reset(struct igb_softc *); 168 static void igb_update_stats_counters(struct igb_softc *); 169 static void igb_update_vf_stats_counters(struct igb_softc *); 170 static void igb_update_link_status(struct igb_softc *); 171 static void igb_init_tx_unit(struct igb_softc *); 172 static void igb_init_rx_unit(struct igb_softc *); 173 174 static void igb_set_vlan(struct igb_softc *); 175 static void igb_set_multi(struct igb_softc *); 176 static void igb_set_promisc(struct igb_softc *); 177 static void igb_disable_promisc(struct igb_softc *); 178 179 static int igb_alloc_rings(struct igb_softc *); 180 static void igb_free_rings(struct igb_softc *); 181 static int igb_create_tx_ring(struct igb_tx_ring *); 182 static int igb_create_rx_ring(struct igb_rx_ring *); 183 static void igb_free_tx_ring(struct igb_tx_ring *); 184 static void igb_free_rx_ring(struct igb_rx_ring *); 185 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 186 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 187 static void igb_init_tx_ring(struct igb_tx_ring *); 188 static int igb_init_rx_ring(struct igb_rx_ring *); 189 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 190 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 191 static void igb_rx_refresh(struct igb_rx_ring *, int); 192 static void igb_setup_serializer(struct igb_softc *); 193 194 static void igb_stop(struct igb_softc *); 195 static void igb_init(void *); 196 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 197 static void igb_media_status(struct ifnet *, struct ifmediareq *); 198 static int igb_media_change(struct ifnet *); 199 static void igb_timer(void *); 200 static void igb_watchdog(struct ifaltq_subque *); 201 static void igb_start(struct ifnet *, struct ifaltq_subque *); 202 #ifdef IFPOLL_ENABLE 203 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 204 static void igb_npoll_rx(struct ifnet *, void *, int); 205 static void igb_npoll_tx(struct ifnet *, void *, int); 206 static void igb_npoll_status(struct ifnet *); 207 #endif 208 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 209 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 210 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 211 #ifdef INVARIANTS 212 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 213 boolean_t); 214 #endif 215 216 static void igb_intr(void *); 217 static void igb_intr_shared(void *); 218 static void igb_rxeof(struct igb_rx_ring *, int); 219 static void igb_txeof(struct igb_tx_ring *); 220 static void igb_set_eitr(struct igb_softc *, int, int); 221 static void igb_enable_intr(struct igb_softc *); 222 static void igb_disable_intr(struct igb_softc *); 223 static void igb_init_unshared_intr(struct igb_softc *); 224 static void igb_init_intr(struct igb_softc *); 225 static int igb_setup_intr(struct igb_softc *); 226 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 227 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 228 static void igb_set_intr_mask(struct igb_softc *); 229 static int igb_alloc_intr(struct igb_softc *); 230 static void igb_free_intr(struct igb_softc *); 231 static void igb_teardown_intr(struct igb_softc *); 232 static void igb_msix_try_alloc(struct igb_softc *); 233 static void igb_msix_rx_conf(struct igb_softc *, int, int *, int); 234 static void igb_msix_tx_conf(struct igb_softc *, int, int *, int); 235 static void igb_msix_free(struct igb_softc *, boolean_t); 236 static int igb_msix_setup(struct igb_softc *); 237 static void igb_msix_teardown(struct igb_softc *, int); 238 static void igb_msix_rx(void *); 239 static void igb_msix_tx(void *); 240 static void igb_msix_status(void *); 241 static void igb_msix_rxtx(void *); 242 243 /* Management and WOL Support */ 244 static void igb_get_mgmt(struct igb_softc *); 245 static void igb_rel_mgmt(struct igb_softc *); 246 static void igb_get_hw_control(struct igb_softc *); 247 static void igb_rel_hw_control(struct igb_softc *); 248 static void igb_enable_wol(device_t); 249 250 static device_method_t igb_methods[] = { 251 /* Device interface */ 252 DEVMETHOD(device_probe, igb_probe), 253 DEVMETHOD(device_attach, igb_attach), 254 DEVMETHOD(device_detach, igb_detach), 255 DEVMETHOD(device_shutdown, igb_shutdown), 256 DEVMETHOD(device_suspend, igb_suspend), 257 DEVMETHOD(device_resume, igb_resume), 258 DEVMETHOD_END 259 }; 260 261 static driver_t igb_driver = { 262 "igb", 263 igb_methods, 264 sizeof(struct igb_softc), 265 }; 266 267 static devclass_t igb_devclass; 268 269 DECLARE_DUMMY_MODULE(if_igb); 270 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 271 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 272 273 static int igb_rxd = IGB_DEFAULT_RXD; 274 static int igb_txd = IGB_DEFAULT_TXD; 275 static int igb_rxr = 0; 276 static int igb_txr = 0; 277 static int igb_msi_enable = 1; 278 static int igb_msix_enable = 1; 279 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 280 281 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 282 283 /* 284 * DMA Coalescing, only for i350 - default to off, 285 * this feature is for power savings 286 */ 287 static int igb_dma_coalesce = 0; 288 289 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 290 TUNABLE_INT("hw.igb.txd", &igb_txd); 291 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 292 TUNABLE_INT("hw.igb.txr", &igb_txr); 293 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 294 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 295 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 296 297 /* i350 specific */ 298 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 299 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 300 301 static __inline void 302 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 303 { 304 /* Ignore Checksum bit is set */ 305 if (staterr & E1000_RXD_STAT_IXSM) 306 return; 307 308 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 309 E1000_RXD_STAT_IPCS) 310 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 311 312 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 313 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 314 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 315 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 316 mp->m_pkthdr.csum_data = htons(0xffff); 317 } 318 } 319 } 320 321 static __inline struct pktinfo * 322 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 323 uint32_t hash, uint32_t hashtype, uint32_t staterr) 324 { 325 switch (hashtype) { 326 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 327 pi->pi_netisr = NETISR_IP; 328 pi->pi_flags = 0; 329 pi->pi_l3proto = IPPROTO_TCP; 330 break; 331 332 case E1000_RXDADV_RSSTYPE_IPV4: 333 if (staterr & E1000_RXD_STAT_IXSM) 334 return NULL; 335 336 if ((staterr & 337 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 338 E1000_RXD_STAT_TCPCS) { 339 pi->pi_netisr = NETISR_IP; 340 pi->pi_flags = 0; 341 pi->pi_l3proto = IPPROTO_UDP; 342 break; 343 } 344 /* FALL THROUGH */ 345 default: 346 return NULL; 347 } 348 349 m->m_flags |= M_HASH; 350 m->m_pkthdr.hash = toeplitz_hash(hash); 351 return pi; 352 } 353 354 static int 355 igb_probe(device_t dev) 356 { 357 const struct igb_device *d; 358 uint16_t vid, did; 359 360 vid = pci_get_vendor(dev); 361 did = pci_get_device(dev); 362 363 for (d = igb_devices; d->desc != NULL; ++d) { 364 if (vid == d->vid && did == d->did) { 365 device_set_desc(dev, d->desc); 366 return 0; 367 } 368 } 369 return ENXIO; 370 } 371 372 static int 373 igb_attach(device_t dev) 374 { 375 struct igb_softc *sc = device_get_softc(dev); 376 uint16_t eeprom_data; 377 int error = 0, ring_max; 378 char flowctrl[IFM_ETH_FC_STRLEN]; 379 #ifdef IFPOLL_ENABLE 380 int offset, offset_def; 381 #endif 382 383 #ifdef notyet 384 /* SYSCTL stuff */ 385 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 386 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 387 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 388 igb_sysctl_nvm_info, "I", "NVM Information"); 389 #endif 390 391 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 392 igb_media_change, igb_media_status); 393 callout_init_mp(&sc->timer); 394 lwkt_serialize_init(&sc->main_serialize); 395 396 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 397 device_get_unit(dev)); 398 sc->dev = sc->osdep.dev = dev; 399 400 /* 401 * Determine hardware and mac type 402 */ 403 sc->hw.vendor_id = pci_get_vendor(dev); 404 sc->hw.device_id = pci_get_device(dev); 405 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 406 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 407 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 408 409 if (e1000_set_mac_type(&sc->hw)) 410 return ENXIO; 411 412 /* Are we a VF device? */ 413 if (sc->hw.mac.type == e1000_vfadapt || 414 sc->hw.mac.type == e1000_vfadapt_i350) 415 sc->vf_ifp = 1; 416 else 417 sc->vf_ifp = 0; 418 419 /* 420 * Configure total supported RX/TX ring count 421 */ 422 switch (sc->hw.mac.type) { 423 case e1000_82575: 424 ring_max = IGB_MAX_RING_82575; 425 break; 426 427 case e1000_82576: 428 ring_max = IGB_MAX_RING_82576; 429 break; 430 431 case e1000_82580: 432 ring_max = IGB_MAX_RING_82580; 433 break; 434 435 case e1000_i350: 436 ring_max = IGB_MAX_RING_I350; 437 break; 438 439 case e1000_i354: 440 ring_max = IGB_MAX_RING_I354; 441 break; 442 443 case e1000_i210: 444 ring_max = IGB_MAX_RING_I210; 445 break; 446 447 case e1000_i211: 448 ring_max = IGB_MAX_RING_I211; 449 break; 450 451 default: 452 ring_max = IGB_MIN_RING; 453 break; 454 } 455 456 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr); 457 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max); 458 #ifdef IGB_RSS_DEBUG 459 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt); 460 #endif 461 sc->rx_ring_inuse = sc->rx_ring_cnt; 462 463 sc->tx_ring_cnt = device_getenv_int(dev, "txr", igb_txr); 464 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 465 #ifdef IGB_TSS_DEBUG 466 sc->tx_ring_cnt = device_getenv_int(dev, "txr_debug", sc->tx_ring_cnt); 467 #endif 468 sc->tx_ring_inuse = sc->tx_ring_cnt; 469 470 /* Setup flow control. */ 471 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 472 igb_flowctrl); 473 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 474 475 /* Enable bus mastering */ 476 pci_enable_busmaster(dev); 477 478 /* 479 * Allocate IO memory 480 */ 481 sc->mem_rid = PCIR_BAR(0); 482 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 483 RF_ACTIVE); 484 if (sc->mem_res == NULL) { 485 device_printf(dev, "Unable to allocate bus resource: memory\n"); 486 error = ENXIO; 487 goto failed; 488 } 489 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 490 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 491 492 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 493 494 /* Save PCI command register for Shared Code */ 495 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 496 sc->hw.back = &sc->osdep; 497 498 /* Do Shared Code initialization */ 499 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 500 device_printf(dev, "Setup of Shared code failed\n"); 501 error = ENXIO; 502 goto failed; 503 } 504 505 e1000_get_bus_info(&sc->hw); 506 507 sc->hw.mac.autoneg = DO_AUTO_NEG; 508 sc->hw.phy.autoneg_wait_to_complete = FALSE; 509 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 510 511 /* Copper options */ 512 if (sc->hw.phy.media_type == e1000_media_type_copper) { 513 sc->hw.phy.mdix = AUTO_ALL_MODES; 514 sc->hw.phy.disable_polarity_correction = FALSE; 515 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 516 } 517 518 /* Set the frame limits assuming standard ethernet sized frames. */ 519 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 520 521 /* Allocate RX/TX rings */ 522 error = igb_alloc_rings(sc); 523 if (error) 524 goto failed; 525 526 #ifdef IFPOLL_ENABLE 527 /* 528 * NPOLLING RX CPU offset 529 */ 530 if (sc->rx_ring_cnt == ncpus2) { 531 offset = 0; 532 } else { 533 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 534 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 535 if (offset >= ncpus2 || 536 offset % sc->rx_ring_cnt != 0) { 537 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 538 offset, offset_def); 539 offset = offset_def; 540 } 541 } 542 sc->rx_npoll_off = offset; 543 544 /* 545 * NPOLLING TX CPU offset 546 */ 547 if (sc->tx_ring_cnt == ncpus2) { 548 offset = 0; 549 } else { 550 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 551 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 552 if (offset >= ncpus2 || 553 offset % sc->tx_ring_cnt != 0) { 554 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 555 offset, offset_def); 556 offset = offset_def; 557 } 558 } 559 sc->tx_npoll_off = offset; 560 #endif 561 562 /* Allocate interrupt */ 563 error = igb_alloc_intr(sc); 564 if (error) 565 goto failed; 566 567 /* Setup serializers */ 568 igb_setup_serializer(sc); 569 570 /* Allocate the appropriate stats memory */ 571 if (sc->vf_ifp) { 572 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 573 M_WAITOK | M_ZERO); 574 igb_vf_init_stats(sc); 575 } else { 576 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 577 M_WAITOK | M_ZERO); 578 } 579 580 /* Allocate multicast array memory. */ 581 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 582 M_DEVBUF, M_WAITOK); 583 584 /* Some adapter-specific advanced features */ 585 if (sc->hw.mac.type >= e1000_i350) { 586 #ifdef notyet 587 igb_set_sysctl_value(adapter, "dma_coalesce", 588 "configure dma coalesce", 589 &adapter->dma_coalesce, igb_dma_coalesce); 590 igb_set_sysctl_value(adapter, "eee_disabled", 591 "enable Energy Efficient Ethernet", 592 &adapter->hw.dev_spec._82575.eee_disable, 593 igb_eee_disabled); 594 #else 595 sc->dma_coalesce = igb_dma_coalesce; 596 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 597 #endif 598 if (sc->hw.phy.media_type == e1000_media_type_copper) { 599 if (sc->hw.mac.type == e1000_i354) 600 e1000_set_eee_i354(&sc->hw); 601 else 602 e1000_set_eee_i350(&sc->hw); 603 } 604 } 605 606 /* 607 * Start from a known state, this is important in reading the nvm and 608 * mac from that. 609 */ 610 e1000_reset_hw(&sc->hw); 611 612 /* Make sure we have a good EEPROM before we read from it */ 613 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 614 e1000_validate_nvm_checksum(&sc->hw) < 0) { 615 /* 616 * Some PCI-E parts fail the first check due to 617 * the link being in sleep state, call it again, 618 * if it fails a second time its a real issue. 619 */ 620 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 621 device_printf(dev, 622 "The EEPROM Checksum Is Not Valid\n"); 623 error = EIO; 624 goto failed; 625 } 626 } 627 628 /* Copy the permanent MAC address out of the EEPROM */ 629 if (e1000_read_mac_addr(&sc->hw) < 0) { 630 device_printf(dev, "EEPROM read error while reading MAC" 631 " address\n"); 632 error = EIO; 633 goto failed; 634 } 635 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 636 device_printf(dev, "Invalid MAC address\n"); 637 error = EIO; 638 goto failed; 639 } 640 641 /* Setup OS specific network interface */ 642 igb_setup_ifp(sc); 643 644 /* Add sysctl tree, must after igb_setup_ifp() */ 645 igb_add_sysctl(sc); 646 647 /* Now get a good starting state */ 648 igb_reset(sc); 649 650 /* Initialize statistics */ 651 igb_update_stats_counters(sc); 652 653 sc->hw.mac.get_link_status = 1; 654 igb_update_link_status(sc); 655 656 /* Indicate SOL/IDER usage */ 657 if (e1000_check_reset_block(&sc->hw)) { 658 device_printf(dev, 659 "PHY reset is blocked due to SOL/IDER session.\n"); 660 } 661 662 /* Determine if we have to control management hardware */ 663 if (e1000_enable_mng_pass_thru(&sc->hw)) 664 sc->flags |= IGB_FLAG_HAS_MGMT; 665 666 /* 667 * Setup Wake-on-Lan 668 */ 669 /* APME bit in EEPROM is mapped to WUC.APME */ 670 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 671 if (eeprom_data) 672 sc->wol = E1000_WUFC_MAG; 673 /* XXX disable WOL */ 674 sc->wol = 0; 675 676 #ifdef notyet 677 /* Register for VLAN events */ 678 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 679 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 680 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 681 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 682 #endif 683 684 #ifdef notyet 685 igb_add_hw_stats(adapter); 686 #endif 687 688 /* 689 * Disable interrupt to prevent spurious interrupts (line based 690 * interrupt, MSI or even MSI-X), which had been observed on 691 * several types of LOMs, from being handled. 692 */ 693 igb_disable_intr(sc); 694 695 error = igb_setup_intr(sc); 696 if (error) { 697 ether_ifdetach(&sc->arpcom.ac_if); 698 goto failed; 699 } 700 return 0; 701 702 failed: 703 igb_detach(dev); 704 return error; 705 } 706 707 static int 708 igb_detach(device_t dev) 709 { 710 struct igb_softc *sc = device_get_softc(dev); 711 712 if (device_is_attached(dev)) { 713 struct ifnet *ifp = &sc->arpcom.ac_if; 714 715 ifnet_serialize_all(ifp); 716 717 igb_stop(sc); 718 719 e1000_phy_hw_reset(&sc->hw); 720 721 /* Give control back to firmware */ 722 igb_rel_mgmt(sc); 723 igb_rel_hw_control(sc); 724 725 if (sc->wol) { 726 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 727 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 728 igb_enable_wol(dev); 729 } 730 731 igb_teardown_intr(sc); 732 733 ifnet_deserialize_all(ifp); 734 735 ether_ifdetach(ifp); 736 } else if (sc->mem_res != NULL) { 737 igb_rel_hw_control(sc); 738 } 739 740 ifmedia_removeall(&sc->media); 741 bus_generic_detach(dev); 742 743 igb_free_intr(sc); 744 745 if (sc->msix_mem_res != NULL) { 746 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 747 sc->msix_mem_res); 748 } 749 if (sc->mem_res != NULL) { 750 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 751 sc->mem_res); 752 } 753 754 igb_free_rings(sc); 755 756 if (sc->mta != NULL) 757 kfree(sc->mta, M_DEVBUF); 758 if (sc->stats != NULL) 759 kfree(sc->stats, M_DEVBUF); 760 if (sc->serializes != NULL) 761 kfree(sc->serializes, M_DEVBUF); 762 763 return 0; 764 } 765 766 static int 767 igb_shutdown(device_t dev) 768 { 769 return igb_suspend(dev); 770 } 771 772 static int 773 igb_suspend(device_t dev) 774 { 775 struct igb_softc *sc = device_get_softc(dev); 776 struct ifnet *ifp = &sc->arpcom.ac_if; 777 778 ifnet_serialize_all(ifp); 779 780 igb_stop(sc); 781 782 igb_rel_mgmt(sc); 783 igb_rel_hw_control(sc); 784 785 if (sc->wol) { 786 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 787 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 788 igb_enable_wol(dev); 789 } 790 791 ifnet_deserialize_all(ifp); 792 793 return bus_generic_suspend(dev); 794 } 795 796 static int 797 igb_resume(device_t dev) 798 { 799 struct igb_softc *sc = device_get_softc(dev); 800 struct ifnet *ifp = &sc->arpcom.ac_if; 801 int i; 802 803 ifnet_serialize_all(ifp); 804 805 igb_init(sc); 806 igb_get_mgmt(sc); 807 808 for (i = 0; i < sc->tx_ring_inuse; ++i) 809 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 810 811 ifnet_deserialize_all(ifp); 812 813 return bus_generic_resume(dev); 814 } 815 816 static int 817 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 818 { 819 struct igb_softc *sc = ifp->if_softc; 820 struct ifreq *ifr = (struct ifreq *)data; 821 int max_frame_size, mask, reinit; 822 int error = 0; 823 824 ASSERT_IFNET_SERIALIZED_ALL(ifp); 825 826 switch (command) { 827 case SIOCSIFMTU: 828 max_frame_size = 9234; 829 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 830 ETHER_CRC_LEN) { 831 error = EINVAL; 832 break; 833 } 834 835 ifp->if_mtu = ifr->ifr_mtu; 836 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 837 ETHER_CRC_LEN; 838 839 if (ifp->if_flags & IFF_RUNNING) 840 igb_init(sc); 841 break; 842 843 case SIOCSIFFLAGS: 844 if (ifp->if_flags & IFF_UP) { 845 if (ifp->if_flags & IFF_RUNNING) { 846 if ((ifp->if_flags ^ sc->if_flags) & 847 (IFF_PROMISC | IFF_ALLMULTI)) { 848 igb_disable_promisc(sc); 849 igb_set_promisc(sc); 850 } 851 } else { 852 igb_init(sc); 853 } 854 } else if (ifp->if_flags & IFF_RUNNING) { 855 igb_stop(sc); 856 } 857 sc->if_flags = ifp->if_flags; 858 break; 859 860 case SIOCADDMULTI: 861 case SIOCDELMULTI: 862 if (ifp->if_flags & IFF_RUNNING) { 863 igb_disable_intr(sc); 864 igb_set_multi(sc); 865 #ifdef IFPOLL_ENABLE 866 if (!(ifp->if_flags & IFF_NPOLLING)) 867 #endif 868 igb_enable_intr(sc); 869 } 870 break; 871 872 case SIOCSIFMEDIA: 873 /* Check SOL/IDER usage */ 874 if (e1000_check_reset_block(&sc->hw)) { 875 if_printf(ifp, "Media change is " 876 "blocked due to SOL/IDER session.\n"); 877 break; 878 } 879 /* FALL THROUGH */ 880 881 case SIOCGIFMEDIA: 882 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 883 break; 884 885 case SIOCSIFCAP: 886 reinit = 0; 887 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 888 if (mask & IFCAP_RXCSUM) { 889 ifp->if_capenable ^= IFCAP_RXCSUM; 890 reinit = 1; 891 } 892 if (mask & IFCAP_VLAN_HWTAGGING) { 893 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 894 reinit = 1; 895 } 896 if (mask & IFCAP_TXCSUM) { 897 ifp->if_capenable ^= IFCAP_TXCSUM; 898 if (ifp->if_capenable & IFCAP_TXCSUM) 899 ifp->if_hwassist |= IGB_CSUM_FEATURES; 900 else 901 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 902 } 903 if (mask & IFCAP_TSO) { 904 ifp->if_capenable ^= IFCAP_TSO; 905 if (ifp->if_capenable & IFCAP_TSO) 906 ifp->if_hwassist |= CSUM_TSO; 907 else 908 ifp->if_hwassist &= ~CSUM_TSO; 909 } 910 if (mask & IFCAP_RSS) 911 ifp->if_capenable ^= IFCAP_RSS; 912 if (reinit && (ifp->if_flags & IFF_RUNNING)) 913 igb_init(sc); 914 break; 915 916 default: 917 error = ether_ioctl(ifp, command, data); 918 break; 919 } 920 return error; 921 } 922 923 static void 924 igb_init(void *xsc) 925 { 926 struct igb_softc *sc = xsc; 927 struct ifnet *ifp = &sc->arpcom.ac_if; 928 boolean_t polling; 929 int i; 930 931 ASSERT_IFNET_SERIALIZED_ALL(ifp); 932 933 igb_stop(sc); 934 935 /* Get the latest mac address, User can use a LAA */ 936 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 937 938 /* Put the address into the Receive Address Array */ 939 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 940 941 igb_reset(sc); 942 igb_update_link_status(sc); 943 944 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 945 946 /* Configure for OS presence */ 947 igb_get_mgmt(sc); 948 949 polling = FALSE; 950 #ifdef IFPOLL_ENABLE 951 if (ifp->if_flags & IFF_NPOLLING) 952 polling = TRUE; 953 #endif 954 955 /* Configured used RX/TX rings */ 956 igb_set_ring_inuse(sc, polling); 957 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 958 959 /* Initialize interrupt */ 960 igb_init_intr(sc); 961 962 /* Prepare transmit descriptors and buffers */ 963 for (i = 0; i < sc->tx_ring_inuse; ++i) 964 igb_init_tx_ring(&sc->tx_rings[i]); 965 igb_init_tx_unit(sc); 966 967 /* Setup Multicast table */ 968 igb_set_multi(sc); 969 970 #if 0 971 /* 972 * Figure out the desired mbuf pool 973 * for doing jumbo/packetsplit 974 */ 975 if (adapter->max_frame_size <= 2048) 976 adapter->rx_mbuf_sz = MCLBYTES; 977 else if (adapter->max_frame_size <= 4096) 978 adapter->rx_mbuf_sz = MJUMPAGESIZE; 979 else 980 adapter->rx_mbuf_sz = MJUM9BYTES; 981 #endif 982 983 /* Prepare receive descriptors and buffers */ 984 for (i = 0; i < sc->rx_ring_inuse; ++i) { 985 int error; 986 987 error = igb_init_rx_ring(&sc->rx_rings[i]); 988 if (error) { 989 if_printf(ifp, "Could not setup receive structures\n"); 990 igb_stop(sc); 991 return; 992 } 993 } 994 igb_init_rx_unit(sc); 995 996 /* Enable VLAN support */ 997 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 998 igb_set_vlan(sc); 999 1000 /* Don't lose promiscuous settings */ 1001 igb_set_promisc(sc); 1002 1003 ifp->if_flags |= IFF_RUNNING; 1004 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1005 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1006 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 1007 } 1008 1009 igb_set_timer_cpuid(sc, polling); 1010 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1011 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1012 1013 /* This clears any pending interrupts */ 1014 E1000_READ_REG(&sc->hw, E1000_ICR); 1015 1016 /* 1017 * Only enable interrupts if we are not polling, make sure 1018 * they are off otherwise. 1019 */ 1020 if (polling) { 1021 igb_disable_intr(sc); 1022 } else { 1023 igb_enable_intr(sc); 1024 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1025 } 1026 1027 /* Set Energy Efficient Ethernet */ 1028 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1029 if (sc->hw.mac.type == e1000_i354) 1030 e1000_set_eee_i354(&sc->hw); 1031 else 1032 e1000_set_eee_i350(&sc->hw); 1033 } 1034 } 1035 1036 static void 1037 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1038 { 1039 struct igb_softc *sc = ifp->if_softc; 1040 1041 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1042 1043 if ((ifp->if_flags & IFF_RUNNING) == 0) 1044 sc->hw.mac.get_link_status = 1; 1045 igb_update_link_status(sc); 1046 1047 ifmr->ifm_status = IFM_AVALID; 1048 ifmr->ifm_active = IFM_ETHER; 1049 1050 if (!sc->link_active) { 1051 if (sc->hw.mac.autoneg) 1052 ifmr->ifm_active |= IFM_NONE; 1053 else 1054 ifmr->ifm_active |= sc->media.ifm_media; 1055 return; 1056 } 1057 1058 ifmr->ifm_status |= IFM_ACTIVE; 1059 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1060 ifmr->ifm_active |= sc->ifm_flowctrl; 1061 1062 switch (sc->link_speed) { 1063 case 10: 1064 ifmr->ifm_active |= IFM_10_T; 1065 break; 1066 1067 case 100: 1068 /* 1069 * Support for 100Mb SFP - these are Fiber 1070 * but the media type appears as serdes 1071 */ 1072 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1073 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1074 ifmr->ifm_active |= IFM_100_FX; 1075 else 1076 ifmr->ifm_active |= IFM_100_TX; 1077 break; 1078 1079 case 1000: 1080 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1081 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1082 ifmr->ifm_active |= IFM_1000_SX; 1083 else 1084 ifmr->ifm_active |= IFM_1000_T; 1085 break; 1086 } 1087 1088 if (sc->link_duplex == FULL_DUPLEX) 1089 ifmr->ifm_active |= IFM_FDX; 1090 else 1091 ifmr->ifm_active |= IFM_HDX; 1092 1093 if (sc->link_duplex == FULL_DUPLEX) 1094 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1095 } 1096 1097 static int 1098 igb_media_change(struct ifnet *ifp) 1099 { 1100 struct igb_softc *sc = ifp->if_softc; 1101 struct ifmedia *ifm = &sc->media; 1102 1103 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1104 1105 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1106 return EINVAL; 1107 1108 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1109 case IFM_AUTO: 1110 sc->hw.mac.autoneg = DO_AUTO_NEG; 1111 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1112 break; 1113 1114 case IFM_1000_SX: 1115 case IFM_1000_T: 1116 sc->hw.mac.autoneg = DO_AUTO_NEG; 1117 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1118 break; 1119 1120 case IFM_100_TX: 1121 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1122 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1123 } else { 1124 if (IFM_OPTIONS(ifm->ifm_media) & 1125 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1126 if (bootverbose) { 1127 if_printf(ifp, "Flow control is not " 1128 "allowed for half-duplex\n"); 1129 } 1130 return EINVAL; 1131 } 1132 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1133 } 1134 sc->hw.mac.autoneg = FALSE; 1135 sc->hw.phy.autoneg_advertised = 0; 1136 break; 1137 1138 case IFM_10_T: 1139 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1140 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1141 } else { 1142 if (IFM_OPTIONS(ifm->ifm_media) & 1143 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1144 if (bootverbose) { 1145 if_printf(ifp, "Flow control is not " 1146 "allowed for half-duplex\n"); 1147 } 1148 return EINVAL; 1149 } 1150 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1151 } 1152 sc->hw.mac.autoneg = FALSE; 1153 sc->hw.phy.autoneg_advertised = 0; 1154 break; 1155 1156 default: 1157 if (bootverbose) { 1158 if_printf(ifp, "Unsupported media type %d\n", 1159 IFM_SUBTYPE(ifm->ifm_media)); 1160 } 1161 return EINVAL; 1162 } 1163 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1164 1165 if (ifp->if_flags & IFF_RUNNING) 1166 igb_init(sc); 1167 1168 return 0; 1169 } 1170 1171 static void 1172 igb_set_promisc(struct igb_softc *sc) 1173 { 1174 struct ifnet *ifp = &sc->arpcom.ac_if; 1175 struct e1000_hw *hw = &sc->hw; 1176 uint32_t reg; 1177 1178 if (sc->vf_ifp) { 1179 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1180 return; 1181 } 1182 1183 reg = E1000_READ_REG(hw, E1000_RCTL); 1184 if (ifp->if_flags & IFF_PROMISC) { 1185 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1186 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1187 } else if (ifp->if_flags & IFF_ALLMULTI) { 1188 reg |= E1000_RCTL_MPE; 1189 reg &= ~E1000_RCTL_UPE; 1190 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1191 } 1192 } 1193 1194 static void 1195 igb_disable_promisc(struct igb_softc *sc) 1196 { 1197 struct e1000_hw *hw = &sc->hw; 1198 struct ifnet *ifp = &sc->arpcom.ac_if; 1199 uint32_t reg; 1200 int mcnt = 0; 1201 1202 if (sc->vf_ifp) { 1203 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1204 return; 1205 } 1206 reg = E1000_READ_REG(hw, E1000_RCTL); 1207 reg &= ~E1000_RCTL_UPE; 1208 if (ifp->if_flags & IFF_ALLMULTI) { 1209 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1210 } else { 1211 struct ifmultiaddr *ifma; 1212 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1213 if (ifma->ifma_addr->sa_family != AF_LINK) 1214 continue; 1215 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1216 break; 1217 mcnt++; 1218 } 1219 } 1220 /* Don't disable if in MAX groups */ 1221 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1222 reg &= ~E1000_RCTL_MPE; 1223 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1224 } 1225 1226 static void 1227 igb_set_multi(struct igb_softc *sc) 1228 { 1229 struct ifnet *ifp = &sc->arpcom.ac_if; 1230 struct ifmultiaddr *ifma; 1231 uint32_t reg_rctl = 0; 1232 uint8_t *mta; 1233 int mcnt = 0; 1234 1235 mta = sc->mta; 1236 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1237 1238 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1239 if (ifma->ifma_addr->sa_family != AF_LINK) 1240 continue; 1241 1242 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1243 break; 1244 1245 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1246 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1247 mcnt++; 1248 } 1249 1250 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1251 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1252 reg_rctl |= E1000_RCTL_MPE; 1253 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1254 } else { 1255 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1256 } 1257 } 1258 1259 static void 1260 igb_timer(void *xsc) 1261 { 1262 struct igb_softc *sc = xsc; 1263 1264 lwkt_serialize_enter(&sc->main_serialize); 1265 1266 igb_update_link_status(sc); 1267 igb_update_stats_counters(sc); 1268 1269 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1270 1271 lwkt_serialize_exit(&sc->main_serialize); 1272 } 1273 1274 static void 1275 igb_update_link_status(struct igb_softc *sc) 1276 { 1277 struct ifnet *ifp = &sc->arpcom.ac_if; 1278 struct e1000_hw *hw = &sc->hw; 1279 uint32_t link_check, thstat, ctrl; 1280 1281 link_check = thstat = ctrl = 0; 1282 1283 /* Get the cached link value or read for real */ 1284 switch (hw->phy.media_type) { 1285 case e1000_media_type_copper: 1286 if (hw->mac.get_link_status) { 1287 /* Do the work to read phy */ 1288 e1000_check_for_link(hw); 1289 link_check = !hw->mac.get_link_status; 1290 } else { 1291 link_check = TRUE; 1292 } 1293 break; 1294 1295 case e1000_media_type_fiber: 1296 e1000_check_for_link(hw); 1297 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1298 break; 1299 1300 case e1000_media_type_internal_serdes: 1301 e1000_check_for_link(hw); 1302 link_check = hw->mac.serdes_has_link; 1303 break; 1304 1305 /* VF device is type_unknown */ 1306 case e1000_media_type_unknown: 1307 e1000_check_for_link(hw); 1308 link_check = !hw->mac.get_link_status; 1309 /* Fall thru */ 1310 default: 1311 break; 1312 } 1313 1314 /* Check for thermal downshift or shutdown */ 1315 if (hw->mac.type == e1000_i350) { 1316 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1317 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1318 } 1319 1320 /* Now we check if a transition has happened */ 1321 if (link_check && sc->link_active == 0) { 1322 e1000_get_speed_and_duplex(hw, 1323 &sc->link_speed, &sc->link_duplex); 1324 if (bootverbose) { 1325 char flowctrl[IFM_ETH_FC_STRLEN]; 1326 1327 /* Get the flow control for display */ 1328 e1000_fc2str(hw->fc.current_mode, flowctrl, 1329 sizeof(flowctrl)); 1330 1331 if_printf(ifp, "Link is up %d Mbps %s, " 1332 "Flow control: %s\n", 1333 sc->link_speed, 1334 sc->link_duplex == FULL_DUPLEX ? 1335 "Full Duplex" : "Half Duplex", 1336 flowctrl); 1337 } 1338 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1339 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1340 sc->link_active = 1; 1341 1342 ifp->if_baudrate = sc->link_speed * 1000000; 1343 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1344 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1345 if_printf(ifp, "Link: thermal downshift\n"); 1346 /* Delay Link Up for Phy update */ 1347 if ((hw->mac.type == e1000_i210 || 1348 hw->mac.type == e1000_i211) && 1349 hw->phy.id == I210_I_PHY_ID) 1350 msec_delay(IGB_I210_LINK_DELAY); 1351 /* This can sleep */ 1352 ifp->if_link_state = LINK_STATE_UP; 1353 if_link_state_change(ifp); 1354 } else if (!link_check && sc->link_active == 1) { 1355 ifp->if_baudrate = sc->link_speed = 0; 1356 sc->link_duplex = 0; 1357 if (bootverbose) 1358 if_printf(ifp, "Link is Down\n"); 1359 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1360 (thstat & E1000_THSTAT_PWR_DOWN)) 1361 if_printf(ifp, "Link: thermal shutdown\n"); 1362 sc->link_active = 0; 1363 /* This can sleep */ 1364 ifp->if_link_state = LINK_STATE_DOWN; 1365 if_link_state_change(ifp); 1366 } 1367 } 1368 1369 static void 1370 igb_stop(struct igb_softc *sc) 1371 { 1372 struct ifnet *ifp = &sc->arpcom.ac_if; 1373 int i; 1374 1375 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1376 1377 igb_disable_intr(sc); 1378 1379 callout_stop(&sc->timer); 1380 1381 ifp->if_flags &= ~IFF_RUNNING; 1382 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1383 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1384 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 1385 sc->tx_rings[i].tx_flags &= ~IGB_TXFLAG_ENABLED; 1386 } 1387 1388 e1000_reset_hw(&sc->hw); 1389 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1390 1391 e1000_led_off(&sc->hw); 1392 e1000_cleanup_led(&sc->hw); 1393 1394 for (i = 0; i < sc->tx_ring_cnt; ++i) 1395 igb_free_tx_ring(&sc->tx_rings[i]); 1396 for (i = 0; i < sc->rx_ring_cnt; ++i) 1397 igb_free_rx_ring(&sc->rx_rings[i]); 1398 } 1399 1400 static void 1401 igb_reset(struct igb_softc *sc) 1402 { 1403 struct ifnet *ifp = &sc->arpcom.ac_if; 1404 struct e1000_hw *hw = &sc->hw; 1405 struct e1000_fc_info *fc = &hw->fc; 1406 uint32_t pba = 0; 1407 uint16_t hwm; 1408 1409 /* Let the firmware know the OS is in control */ 1410 igb_get_hw_control(sc); 1411 1412 /* 1413 * Packet Buffer Allocation (PBA) 1414 * Writing PBA sets the receive portion of the buffer 1415 * the remainder is used for the transmit buffer. 1416 */ 1417 switch (hw->mac.type) { 1418 case e1000_82575: 1419 pba = E1000_PBA_32K; 1420 break; 1421 1422 case e1000_82576: 1423 case e1000_vfadapt: 1424 pba = E1000_READ_REG(hw, E1000_RXPBS); 1425 pba &= E1000_RXPBS_SIZE_MASK_82576; 1426 break; 1427 1428 case e1000_82580: 1429 case e1000_i350: 1430 case e1000_i354: 1431 case e1000_vfadapt_i350: 1432 pba = E1000_READ_REG(hw, E1000_RXPBS); 1433 pba = e1000_rxpbs_adjust_82580(pba); 1434 break; 1435 1436 case e1000_i210: 1437 case e1000_i211: 1438 pba = E1000_PBA_34K; 1439 break; 1440 1441 default: 1442 break; 1443 } 1444 1445 /* Special needs in case of Jumbo frames */ 1446 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1447 uint32_t tx_space, min_tx, min_rx; 1448 1449 pba = E1000_READ_REG(hw, E1000_PBA); 1450 tx_space = pba >> 16; 1451 pba &= 0xffff; 1452 1453 min_tx = (sc->max_frame_size + 1454 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1455 min_tx = roundup2(min_tx, 1024); 1456 min_tx >>= 10; 1457 min_rx = sc->max_frame_size; 1458 min_rx = roundup2(min_rx, 1024); 1459 min_rx >>= 10; 1460 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1461 pba = pba - (min_tx - tx_space); 1462 /* 1463 * if short on rx space, rx wins 1464 * and must trump tx adjustment 1465 */ 1466 if (pba < min_rx) 1467 pba = min_rx; 1468 } 1469 E1000_WRITE_REG(hw, E1000_PBA, pba); 1470 } 1471 1472 /* 1473 * These parameters control the automatic generation (Tx) and 1474 * response (Rx) to Ethernet PAUSE frames. 1475 * - High water mark should allow for at least two frames to be 1476 * received after sending an XOFF. 1477 * - Low water mark works best when it is very near the high water mark. 1478 * This allows the receiver to restart by sending XON when it has 1479 * drained a bit. 1480 */ 1481 hwm = min(((pba << 10) * 9 / 10), 1482 ((pba << 10) - 2 * sc->max_frame_size)); 1483 1484 if (hw->mac.type < e1000_82576) { 1485 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1486 fc->low_water = fc->high_water - 8; 1487 } else { 1488 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1489 fc->low_water = fc->high_water - 16; 1490 } 1491 fc->pause_time = IGB_FC_PAUSE_TIME; 1492 fc->send_xon = TRUE; 1493 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1494 1495 /* Issue a global reset */ 1496 e1000_reset_hw(hw); 1497 E1000_WRITE_REG(hw, E1000_WUC, 0); 1498 1499 if (e1000_init_hw(hw) < 0) 1500 if_printf(ifp, "Hardware Initialization Failed\n"); 1501 1502 /* Setup DMA Coalescing */ 1503 if (hw->mac.type > e1000_82580 && hw->mac.type != e1000_i211) { 1504 uint32_t dmac; 1505 uint32_t reg; 1506 1507 if (sc->dma_coalesce == 0) { 1508 /* 1509 * Disabled 1510 */ 1511 reg = E1000_READ_REG(hw, E1000_DMACR); 1512 reg &= ~E1000_DMACR_DMAC_EN; 1513 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1514 goto reset_out; 1515 } 1516 1517 /* Set starting thresholds */ 1518 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 1519 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 1520 1521 hwm = 64 * pba - sc->max_frame_size / 16; 1522 if (hwm < 64 * (pba - 6)) 1523 hwm = 64 * (pba - 6); 1524 reg = E1000_READ_REG(hw, E1000_FCRTC); 1525 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 1526 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 1527 & E1000_FCRTC_RTH_COAL_MASK); 1528 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 1529 1530 dmac = pba - sc->max_frame_size / 512; 1531 if (dmac < pba - 10) 1532 dmac = pba - 10; 1533 reg = E1000_READ_REG(hw, E1000_DMACR); 1534 reg &= ~E1000_DMACR_DMACTHR_MASK; 1535 reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT) 1536 & E1000_DMACR_DMACTHR_MASK); 1537 /* Transition to L0x or L1 if available.. */ 1538 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 1539 /* timer = value in sc->dma_coalesce in 32usec intervals */ 1540 reg |= (sc->dma_coalesce >> 5); 1541 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1542 1543 /* Set the interval before transition */ 1544 reg = E1000_READ_REG(hw, E1000_DMCTLX); 1545 reg |= 0x80000004; 1546 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 1547 1548 /* Free space in tx packet buffer to wake from DMA coal */ 1549 E1000_WRITE_REG(hw, E1000_DMCTXTH, 1550 (20480 - (2 * sc->max_frame_size)) >> 6); 1551 1552 /* Make low power state decision controlled by DMA coal */ 1553 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1554 reg &= ~E1000_PCIEMISC_LX_DECISION; 1555 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 1556 if_printf(ifp, "DMA Coalescing enabled\n"); 1557 } else if (hw->mac.type == e1000_82580) { 1558 uint32_t reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1559 1560 E1000_WRITE_REG(hw, E1000_DMACR, 0); 1561 E1000_WRITE_REG(hw, E1000_PCIEMISC, 1562 reg & ~E1000_PCIEMISC_LX_DECISION); 1563 } 1564 1565 reset_out: 1566 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1567 e1000_get_phy_info(hw); 1568 e1000_check_for_link(hw); 1569 } 1570 1571 static void 1572 igb_setup_ifp(struct igb_softc *sc) 1573 { 1574 struct ifnet *ifp = &sc->arpcom.ac_if; 1575 int i; 1576 1577 ifp->if_softc = sc; 1578 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1579 ifp->if_init = igb_init; 1580 ifp->if_ioctl = igb_ioctl; 1581 ifp->if_start = igb_start; 1582 ifp->if_serialize = igb_serialize; 1583 ifp->if_deserialize = igb_deserialize; 1584 ifp->if_tryserialize = igb_tryserialize; 1585 #ifdef INVARIANTS 1586 ifp->if_serialize_assert = igb_serialize_assert; 1587 #endif 1588 #ifdef IFPOLL_ENABLE 1589 ifp->if_npoll = igb_npoll; 1590 #endif 1591 1592 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1593 1594 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1595 ifq_set_ready(&ifp->if_snd); 1596 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1597 1598 ifp->if_mapsubq = ifq_mapsubq_mask; 1599 ifq_set_subq_mask(&ifp->if_snd, 0); 1600 1601 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1602 1603 ifp->if_capabilities = 1604 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1605 if (IGB_ENABLE_HWRSS(sc)) 1606 ifp->if_capabilities |= IFCAP_RSS; 1607 ifp->if_capenable = ifp->if_capabilities; 1608 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1609 1610 /* 1611 * Tell the upper layer(s) we support long frames 1612 */ 1613 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1614 1615 /* Setup TX rings and subqueues */ 1616 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1617 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1618 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1619 1620 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1621 ifsq_set_priv(ifsq, txr); 1622 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1623 txr->ifsq = ifsq; 1624 1625 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1626 } 1627 1628 /* 1629 * Specify the media types supported by this adapter and register 1630 * callbacks to update media and link information 1631 */ 1632 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1633 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1634 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1635 0, NULL); 1636 } else { 1637 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1638 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1639 0, NULL); 1640 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1641 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1642 0, NULL); 1643 if (sc->hw.phy.type != e1000_phy_ife) { 1644 ifmedia_add(&sc->media, 1645 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1646 } 1647 } 1648 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1649 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1650 } 1651 1652 static void 1653 igb_add_sysctl(struct igb_softc *sc) 1654 { 1655 struct sysctl_ctx_list *ctx; 1656 struct sysctl_oid *tree; 1657 char node[32]; 1658 int i; 1659 1660 ctx = device_get_sysctl_ctx(sc->dev); 1661 tree = device_get_sysctl_tree(sc->dev); 1662 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1663 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1664 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1665 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1666 "# of RX rings used"); 1667 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1668 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1669 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1670 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1671 "# of TX rings used"); 1672 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1673 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1674 "# of RX descs"); 1675 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1676 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1677 "# of TX descs"); 1678 1679 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 1680 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1681 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1682 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate"); 1683 } else { 1684 for (i = 0; i < sc->msix_cnt; ++i) { 1685 struct igb_msix_data *msix = &sc->msix_data[i]; 1686 1687 ksnprintf(node, sizeof(node), "msix%d_rate", i); 1688 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1689 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW, 1690 msix, 0, igb_sysctl_msix_rate, "I", 1691 msix->msix_rate_desc); 1692 } 1693 } 1694 1695 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1696 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1697 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1698 "# of segments per TX interrupt"); 1699 1700 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1701 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1702 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1703 "# of segments sent before write to hardware register"); 1704 1705 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1706 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1707 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1708 "# of segments received before write to hardware register"); 1709 1710 #ifdef IFPOLL_ENABLE 1711 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1712 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 1713 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1714 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1715 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 1716 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1717 #endif 1718 1719 #ifdef IGB_RSS_DEBUG 1720 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1721 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1722 "RSS debug level"); 1723 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1724 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1725 SYSCTL_ADD_ULONG(ctx, 1726 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1727 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1728 } 1729 #endif 1730 #ifdef IGB_TSS_DEBUG 1731 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1732 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1733 SYSCTL_ADD_ULONG(ctx, 1734 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1735 CTLFLAG_RW, &sc->tx_rings[i].tx_packets, "TXed packets"); 1736 } 1737 #endif 1738 } 1739 1740 static int 1741 igb_alloc_rings(struct igb_softc *sc) 1742 { 1743 int error, i; 1744 1745 /* 1746 * Create top level busdma tag 1747 */ 1748 error = bus_dma_tag_create(NULL, 1, 0, 1749 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1750 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1751 &sc->parent_tag); 1752 if (error) { 1753 device_printf(sc->dev, "could not create top level DMA tag\n"); 1754 return error; 1755 } 1756 1757 /* 1758 * Allocate TX descriptor rings and buffers 1759 */ 1760 sc->tx_rings = kmalloc_cachealign( 1761 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1762 M_DEVBUF, M_WAITOK | M_ZERO); 1763 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1764 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1765 1766 /* Set up some basics */ 1767 txr->sc = sc; 1768 txr->me = i; 1769 lwkt_serialize_init(&txr->tx_serialize); 1770 1771 error = igb_create_tx_ring(txr); 1772 if (error) 1773 return error; 1774 } 1775 1776 /* 1777 * Allocate RX descriptor rings and buffers 1778 */ 1779 sc->rx_rings = kmalloc_cachealign( 1780 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1781 M_DEVBUF, M_WAITOK | M_ZERO); 1782 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1783 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1784 1785 /* Set up some basics */ 1786 rxr->sc = sc; 1787 rxr->me = i; 1788 lwkt_serialize_init(&rxr->rx_serialize); 1789 1790 error = igb_create_rx_ring(rxr); 1791 if (error) 1792 return error; 1793 } 1794 1795 return 0; 1796 } 1797 1798 static void 1799 igb_free_rings(struct igb_softc *sc) 1800 { 1801 int i; 1802 1803 if (sc->tx_rings != NULL) { 1804 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1805 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1806 1807 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1808 } 1809 kfree(sc->tx_rings, M_DEVBUF); 1810 } 1811 1812 if (sc->rx_rings != NULL) { 1813 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1814 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1815 1816 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1817 } 1818 kfree(sc->rx_rings, M_DEVBUF); 1819 } 1820 } 1821 1822 static int 1823 igb_create_tx_ring(struct igb_tx_ring *txr) 1824 { 1825 int tsize, error, i, ntxd; 1826 1827 /* 1828 * Validate number of transmit descriptors. It must not exceed 1829 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1830 */ 1831 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1832 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1833 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1834 device_printf(txr->sc->dev, 1835 "Using %d TX descriptors instead of %d!\n", 1836 IGB_DEFAULT_TXD, ntxd); 1837 txr->num_tx_desc = IGB_DEFAULT_TXD; 1838 } else { 1839 txr->num_tx_desc = ntxd; 1840 } 1841 1842 /* 1843 * Allocate TX descriptor ring 1844 */ 1845 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1846 IGB_DBA_ALIGN); 1847 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1848 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1849 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1850 if (txr->txdma.dma_vaddr == NULL) { 1851 device_printf(txr->sc->dev, 1852 "Unable to allocate TX Descriptor memory\n"); 1853 return ENOMEM; 1854 } 1855 txr->tx_base = txr->txdma.dma_vaddr; 1856 bzero(txr->tx_base, tsize); 1857 1858 tsize = __VM_CACHELINE_ALIGN( 1859 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1860 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1861 1862 /* 1863 * Allocate TX head write-back buffer 1864 */ 1865 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1866 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1867 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1868 if (txr->tx_hdr == NULL) { 1869 device_printf(txr->sc->dev, 1870 "Unable to allocate TX head write-back buffer\n"); 1871 return ENOMEM; 1872 } 1873 1874 /* 1875 * Create DMA tag for TX buffers 1876 */ 1877 error = bus_dma_tag_create(txr->sc->parent_tag, 1878 1, 0, /* alignment, bounds */ 1879 BUS_SPACE_MAXADDR, /* lowaddr */ 1880 BUS_SPACE_MAXADDR, /* highaddr */ 1881 NULL, NULL, /* filter, filterarg */ 1882 IGB_TSO_SIZE, /* maxsize */ 1883 IGB_MAX_SCATTER, /* nsegments */ 1884 PAGE_SIZE, /* maxsegsize */ 1885 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1886 BUS_DMA_ONEBPAGE, /* flags */ 1887 &txr->tx_tag); 1888 if (error) { 1889 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1890 kfree(txr->tx_buf, M_DEVBUF); 1891 txr->tx_buf = NULL; 1892 return error; 1893 } 1894 1895 /* 1896 * Create DMA maps for TX buffers 1897 */ 1898 for (i = 0; i < txr->num_tx_desc; ++i) { 1899 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1900 1901 error = bus_dmamap_create(txr->tx_tag, 1902 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1903 if (error) { 1904 device_printf(txr->sc->dev, 1905 "Unable to create TX DMA map\n"); 1906 igb_destroy_tx_ring(txr, i); 1907 return error; 1908 } 1909 } 1910 1911 if (txr->sc->hw.mac.type == e1000_82575) 1912 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1913 1914 /* 1915 * Initialize various watermark 1916 */ 1917 txr->spare_desc = IGB_TX_SPARE; 1918 txr->intr_nsegs = txr->num_tx_desc / 16; 1919 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1920 txr->oact_hi_desc = txr->num_tx_desc / 2; 1921 txr->oact_lo_desc = txr->num_tx_desc / 8; 1922 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX) 1923 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX; 1924 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED) 1925 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED; 1926 1927 return 0; 1928 } 1929 1930 static void 1931 igb_free_tx_ring(struct igb_tx_ring *txr) 1932 { 1933 int i; 1934 1935 for (i = 0; i < txr->num_tx_desc; ++i) { 1936 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1937 1938 if (txbuf->m_head != NULL) { 1939 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1940 m_freem(txbuf->m_head); 1941 txbuf->m_head = NULL; 1942 } 1943 } 1944 } 1945 1946 static void 1947 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1948 { 1949 int i; 1950 1951 if (txr->txdma.dma_vaddr != NULL) { 1952 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1953 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1954 txr->txdma.dma_map); 1955 bus_dma_tag_destroy(txr->txdma.dma_tag); 1956 txr->txdma.dma_vaddr = NULL; 1957 } 1958 1959 if (txr->tx_hdr != NULL) { 1960 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1961 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1962 txr->tx_hdr_dmap); 1963 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1964 txr->tx_hdr = NULL; 1965 } 1966 1967 if (txr->tx_buf == NULL) 1968 return; 1969 1970 for (i = 0; i < ndesc; ++i) { 1971 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1972 1973 KKASSERT(txbuf->m_head == NULL); 1974 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1975 } 1976 bus_dma_tag_destroy(txr->tx_tag); 1977 1978 kfree(txr->tx_buf, M_DEVBUF); 1979 txr->tx_buf = NULL; 1980 } 1981 1982 static void 1983 igb_init_tx_ring(struct igb_tx_ring *txr) 1984 { 1985 /* Clear the old descriptor contents */ 1986 bzero(txr->tx_base, 1987 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1988 1989 /* Clear TX head write-back buffer */ 1990 *(txr->tx_hdr) = 0; 1991 1992 /* Reset indices */ 1993 txr->next_avail_desc = 0; 1994 txr->next_to_clean = 0; 1995 txr->tx_nsegs = 0; 1996 1997 /* Set number of descriptors available */ 1998 txr->tx_avail = txr->num_tx_desc; 1999 2000 /* Enable this TX ring */ 2001 txr->tx_flags |= IGB_TXFLAG_ENABLED; 2002 } 2003 2004 static void 2005 igb_init_tx_unit(struct igb_softc *sc) 2006 { 2007 struct e1000_hw *hw = &sc->hw; 2008 uint32_t tctl; 2009 int i; 2010 2011 /* Setup the Tx Descriptor Rings */ 2012 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2013 struct igb_tx_ring *txr = &sc->tx_rings[i]; 2014 uint64_t bus_addr = txr->txdma.dma_paddr; 2015 uint64_t hdr_paddr = txr->tx_hdr_paddr; 2016 uint32_t txdctl = 0; 2017 uint32_t dca_txctrl; 2018 2019 E1000_WRITE_REG(hw, E1000_TDLEN(i), 2020 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 2021 E1000_WRITE_REG(hw, E1000_TDBAH(i), 2022 (uint32_t)(bus_addr >> 32)); 2023 E1000_WRITE_REG(hw, E1000_TDBAL(i), 2024 (uint32_t)bus_addr); 2025 2026 /* Setup the HW Tx Head and Tail descriptor pointers */ 2027 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 2028 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 2029 2030 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 2031 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2032 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 2033 2034 /* 2035 * Don't set WB_on_EITR: 2036 * - 82575 does not have it 2037 * - It almost has no effect on 82576, see: 2038 * 82576 specification update errata #26 2039 * - It causes unnecessary bus traffic 2040 */ 2041 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 2042 (uint32_t)(hdr_paddr >> 32)); 2043 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 2044 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 2045 2046 /* 2047 * WTHRESH is ignored by the hardware, since header 2048 * write back mode is used. 2049 */ 2050 txdctl |= IGB_TX_PTHRESH; 2051 txdctl |= IGB_TX_HTHRESH << 8; 2052 txdctl |= IGB_TX_WTHRESH << 16; 2053 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2054 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 2055 } 2056 2057 if (sc->vf_ifp) 2058 return; 2059 2060 e1000_config_collision_dist(hw); 2061 2062 /* Program the Transmit Control Register */ 2063 tctl = E1000_READ_REG(hw, E1000_TCTL); 2064 tctl &= ~E1000_TCTL_CT; 2065 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2066 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2067 2068 /* This write will effectively turn on the transmit unit. */ 2069 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2070 } 2071 2072 static boolean_t 2073 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2074 { 2075 struct e1000_adv_tx_context_desc *TXD; 2076 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2077 int ehdrlen, ctxd, ip_hlen = 0; 2078 boolean_t offload = TRUE; 2079 2080 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2081 offload = FALSE; 2082 2083 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2084 2085 ctxd = txr->next_avail_desc; 2086 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2087 2088 /* 2089 * In advanced descriptors the vlan tag must 2090 * be placed into the context descriptor, thus 2091 * we need to be here just for that setup. 2092 */ 2093 if (mp->m_flags & M_VLANTAG) { 2094 uint16_t vlantag; 2095 2096 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2097 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2098 } else if (!offload) { 2099 return FALSE; 2100 } 2101 2102 ehdrlen = mp->m_pkthdr.csum_lhlen; 2103 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2104 2105 /* Set the ether header length */ 2106 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2107 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2108 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2109 ip_hlen = mp->m_pkthdr.csum_iphlen; 2110 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2111 } 2112 vlan_macip_lens |= ip_hlen; 2113 2114 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2115 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2116 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2117 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2118 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2119 2120 /* 2121 * 82575 needs the TX context index added; the queue 2122 * index is used as TX context index here. 2123 */ 2124 if (txr->sc->hw.mac.type == e1000_82575) 2125 mss_l4len_idx = txr->me << 4; 2126 2127 /* Now copy bits into descriptor */ 2128 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2129 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2130 TXD->seqnum_seed = htole32(0); 2131 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2132 2133 /* We've consumed the first desc, adjust counters */ 2134 if (++ctxd == txr->num_tx_desc) 2135 ctxd = 0; 2136 txr->next_avail_desc = ctxd; 2137 --txr->tx_avail; 2138 2139 return offload; 2140 } 2141 2142 static void 2143 igb_txeof(struct igb_tx_ring *txr) 2144 { 2145 int first, hdr, avail; 2146 2147 if (txr->tx_avail == txr->num_tx_desc) 2148 return; 2149 2150 first = txr->next_to_clean; 2151 hdr = *(txr->tx_hdr); 2152 2153 if (first == hdr) 2154 return; 2155 2156 avail = txr->tx_avail; 2157 while (first != hdr) { 2158 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2159 2160 ++avail; 2161 if (txbuf->m_head) { 2162 bus_dmamap_unload(txr->tx_tag, txbuf->map); 2163 m_freem(txbuf->m_head); 2164 txbuf->m_head = NULL; 2165 } 2166 if (++first == txr->num_tx_desc) 2167 first = 0; 2168 } 2169 txr->next_to_clean = first; 2170 txr->tx_avail = avail; 2171 2172 /* 2173 * If we have a minimum free, clear OACTIVE 2174 * to tell the stack that it is OK to send packets. 2175 */ 2176 if (IGB_IS_NOT_OACTIVE(txr)) { 2177 ifsq_clr_oactive(txr->ifsq); 2178 2179 /* 2180 * We have enough TX descriptors, turn off 2181 * the watchdog. We allow small amount of 2182 * packets (roughly intr_nsegs) pending on 2183 * the transmit ring. 2184 */ 2185 txr->tx_watchdog.wd_timer = 0; 2186 } 2187 } 2188 2189 static int 2190 igb_create_rx_ring(struct igb_rx_ring *rxr) 2191 { 2192 int rsize, i, error, nrxd; 2193 2194 /* 2195 * Validate number of receive descriptors. It must not exceed 2196 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2197 */ 2198 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2199 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2200 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2201 device_printf(rxr->sc->dev, 2202 "Using %d RX descriptors instead of %d!\n", 2203 IGB_DEFAULT_RXD, nrxd); 2204 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2205 } else { 2206 rxr->num_rx_desc = nrxd; 2207 } 2208 2209 /* 2210 * Allocate RX descriptor ring 2211 */ 2212 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2213 IGB_DBA_ALIGN); 2214 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2215 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2216 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2217 &rxr->rxdma.dma_paddr); 2218 if (rxr->rxdma.dma_vaddr == NULL) { 2219 device_printf(rxr->sc->dev, 2220 "Unable to allocate RxDescriptor memory\n"); 2221 return ENOMEM; 2222 } 2223 rxr->rx_base = rxr->rxdma.dma_vaddr; 2224 bzero(rxr->rx_base, rsize); 2225 2226 rsize = __VM_CACHELINE_ALIGN( 2227 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2228 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2229 2230 /* 2231 * Create DMA tag for RX buffers 2232 */ 2233 error = bus_dma_tag_create(rxr->sc->parent_tag, 2234 1, 0, /* alignment, bounds */ 2235 BUS_SPACE_MAXADDR, /* lowaddr */ 2236 BUS_SPACE_MAXADDR, /* highaddr */ 2237 NULL, NULL, /* filter, filterarg */ 2238 MCLBYTES, /* maxsize */ 2239 1, /* nsegments */ 2240 MCLBYTES, /* maxsegsize */ 2241 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2242 &rxr->rx_tag); 2243 if (error) { 2244 device_printf(rxr->sc->dev, 2245 "Unable to create RX payload DMA tag\n"); 2246 kfree(rxr->rx_buf, M_DEVBUF); 2247 rxr->rx_buf = NULL; 2248 return error; 2249 } 2250 2251 /* 2252 * Create spare DMA map for RX buffers 2253 */ 2254 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2255 &rxr->rx_sparemap); 2256 if (error) { 2257 device_printf(rxr->sc->dev, 2258 "Unable to create spare RX DMA maps\n"); 2259 bus_dma_tag_destroy(rxr->rx_tag); 2260 kfree(rxr->rx_buf, M_DEVBUF); 2261 rxr->rx_buf = NULL; 2262 return error; 2263 } 2264 2265 /* 2266 * Create DMA maps for RX buffers 2267 */ 2268 for (i = 0; i < rxr->num_rx_desc; i++) { 2269 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2270 2271 error = bus_dmamap_create(rxr->rx_tag, 2272 BUS_DMA_WAITOK, &rxbuf->map); 2273 if (error) { 2274 device_printf(rxr->sc->dev, 2275 "Unable to create RX DMA maps\n"); 2276 igb_destroy_rx_ring(rxr, i); 2277 return error; 2278 } 2279 } 2280 2281 /* 2282 * Initialize various watermark 2283 */ 2284 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2285 2286 return 0; 2287 } 2288 2289 static void 2290 igb_free_rx_ring(struct igb_rx_ring *rxr) 2291 { 2292 int i; 2293 2294 for (i = 0; i < rxr->num_rx_desc; ++i) { 2295 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2296 2297 if (rxbuf->m_head != NULL) { 2298 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2299 m_freem(rxbuf->m_head); 2300 rxbuf->m_head = NULL; 2301 } 2302 } 2303 2304 if (rxr->fmp != NULL) 2305 m_freem(rxr->fmp); 2306 rxr->fmp = NULL; 2307 rxr->lmp = NULL; 2308 } 2309 2310 static void 2311 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2312 { 2313 int i; 2314 2315 if (rxr->rxdma.dma_vaddr != NULL) { 2316 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2317 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2318 rxr->rxdma.dma_map); 2319 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2320 rxr->rxdma.dma_vaddr = NULL; 2321 } 2322 2323 if (rxr->rx_buf == NULL) 2324 return; 2325 2326 for (i = 0; i < ndesc; ++i) { 2327 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2328 2329 KKASSERT(rxbuf->m_head == NULL); 2330 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2331 } 2332 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2333 bus_dma_tag_destroy(rxr->rx_tag); 2334 2335 kfree(rxr->rx_buf, M_DEVBUF); 2336 rxr->rx_buf = NULL; 2337 } 2338 2339 static void 2340 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2341 { 2342 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2343 rxd->wb.upper.status_error = 0; 2344 } 2345 2346 static int 2347 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2348 { 2349 struct mbuf *m; 2350 bus_dma_segment_t seg; 2351 bus_dmamap_t map; 2352 struct igb_rx_buf *rxbuf; 2353 int error, nseg; 2354 2355 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2356 if (m == NULL) { 2357 if (wait) { 2358 if_printf(&rxr->sc->arpcom.ac_if, 2359 "Unable to allocate RX mbuf\n"); 2360 } 2361 return ENOBUFS; 2362 } 2363 m->m_len = m->m_pkthdr.len = MCLBYTES; 2364 2365 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2366 m_adj(m, ETHER_ALIGN); 2367 2368 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2369 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2370 if (error) { 2371 m_freem(m); 2372 if (wait) { 2373 if_printf(&rxr->sc->arpcom.ac_if, 2374 "Unable to load RX mbuf\n"); 2375 } 2376 return error; 2377 } 2378 2379 rxbuf = &rxr->rx_buf[i]; 2380 if (rxbuf->m_head != NULL) 2381 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2382 2383 map = rxbuf->map; 2384 rxbuf->map = rxr->rx_sparemap; 2385 rxr->rx_sparemap = map; 2386 2387 rxbuf->m_head = m; 2388 rxbuf->paddr = seg.ds_addr; 2389 2390 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2391 return 0; 2392 } 2393 2394 static int 2395 igb_init_rx_ring(struct igb_rx_ring *rxr) 2396 { 2397 int i; 2398 2399 /* Clear the ring contents */ 2400 bzero(rxr->rx_base, 2401 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2402 2403 /* Now replenish the ring mbufs */ 2404 for (i = 0; i < rxr->num_rx_desc; ++i) { 2405 int error; 2406 2407 error = igb_newbuf(rxr, i, TRUE); 2408 if (error) 2409 return error; 2410 } 2411 2412 /* Setup our descriptor indices */ 2413 rxr->next_to_check = 0; 2414 2415 rxr->fmp = NULL; 2416 rxr->lmp = NULL; 2417 rxr->discard = FALSE; 2418 2419 return 0; 2420 } 2421 2422 static void 2423 igb_init_rx_unit(struct igb_softc *sc) 2424 { 2425 struct ifnet *ifp = &sc->arpcom.ac_if; 2426 struct e1000_hw *hw = &sc->hw; 2427 uint32_t rctl, rxcsum, srrctl = 0; 2428 int i; 2429 2430 /* 2431 * Make sure receives are disabled while setting 2432 * up the descriptor ring 2433 */ 2434 rctl = E1000_READ_REG(hw, E1000_RCTL); 2435 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2436 2437 #if 0 2438 /* 2439 ** Set up for header split 2440 */ 2441 if (igb_header_split) { 2442 /* Use a standard mbuf for the header */ 2443 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2444 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2445 } else 2446 #endif 2447 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2448 2449 /* 2450 ** Set up for jumbo frames 2451 */ 2452 if (ifp->if_mtu > ETHERMTU) { 2453 rctl |= E1000_RCTL_LPE; 2454 #if 0 2455 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2456 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2457 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2458 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2459 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2460 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2461 } 2462 /* Set maximum packet len */ 2463 psize = adapter->max_frame_size; 2464 /* are we on a vlan? */ 2465 if (adapter->ifp->if_vlantrunk != NULL) 2466 psize += VLAN_TAG_SIZE; 2467 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2468 #else 2469 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2470 rctl |= E1000_RCTL_SZ_2048; 2471 #endif 2472 } else { 2473 rctl &= ~E1000_RCTL_LPE; 2474 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2475 rctl |= E1000_RCTL_SZ_2048; 2476 } 2477 2478 /* Setup the Base and Length of the Rx Descriptor Rings */ 2479 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2480 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2481 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2482 uint32_t rxdctl; 2483 2484 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2485 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2486 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2487 (uint32_t)(bus_addr >> 32)); 2488 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2489 (uint32_t)bus_addr); 2490 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2491 /* Enable this Queue */ 2492 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2493 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2494 rxdctl &= 0xFFF00000; 2495 rxdctl |= IGB_RX_PTHRESH; 2496 rxdctl |= IGB_RX_HTHRESH << 8; 2497 /* 2498 * Don't set WTHRESH to a value above 1 on 82576, see: 2499 * 82576 specification update errata #26 2500 */ 2501 rxdctl |= IGB_RX_WTHRESH << 16; 2502 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2503 } 2504 2505 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2506 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2507 2508 /* 2509 * Receive Checksum Offload for TCP and UDP 2510 * 2511 * Checksum offloading is also enabled if multiple receive 2512 * queue is to be supported, since we need it to figure out 2513 * fragments. 2514 */ 2515 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2516 /* 2517 * NOTE: 2518 * PCSD must be enabled to enable multiple 2519 * receive queues. 2520 */ 2521 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2522 E1000_RXCSUM_PCSD; 2523 } else { 2524 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2525 E1000_RXCSUM_PCSD); 2526 } 2527 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2528 2529 if (IGB_ENABLE_HWRSS(sc)) { 2530 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2531 uint32_t reta_shift; 2532 int j, r; 2533 2534 /* 2535 * NOTE: 2536 * When we reach here, RSS has already been disabled 2537 * in igb_stop(), so we could safely configure RSS key 2538 * and redirect table. 2539 */ 2540 2541 /* 2542 * Configure RSS key 2543 */ 2544 toeplitz_get_key(key, sizeof(key)); 2545 for (i = 0; i < IGB_NRSSRK; ++i) { 2546 uint32_t rssrk; 2547 2548 rssrk = IGB_RSSRK_VAL(key, i); 2549 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2550 2551 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2552 } 2553 2554 /* 2555 * Configure RSS redirect table in following fashion: 2556 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2557 */ 2558 reta_shift = IGB_RETA_SHIFT; 2559 if (hw->mac.type == e1000_82575) 2560 reta_shift = IGB_RETA_SHIFT_82575; 2561 2562 r = 0; 2563 for (j = 0; j < IGB_NRETA; ++j) { 2564 uint32_t reta = 0; 2565 2566 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2567 uint32_t q; 2568 2569 q = (r % sc->rx_ring_inuse) << reta_shift; 2570 reta |= q << (8 * i); 2571 ++r; 2572 } 2573 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2574 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2575 } 2576 2577 /* 2578 * Enable multiple receive queues. 2579 * Enable IPv4 RSS standard hash functions. 2580 * Disable RSS interrupt on 82575 2581 */ 2582 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2583 E1000_MRQC_ENABLE_RSS_4Q | 2584 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2585 E1000_MRQC_RSS_FIELD_IPV4); 2586 } 2587 2588 /* Setup the Receive Control Register */ 2589 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2590 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2591 E1000_RCTL_RDMTS_HALF | 2592 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2593 /* Strip CRC bytes. */ 2594 rctl |= E1000_RCTL_SECRC; 2595 /* Make sure VLAN Filters are off */ 2596 rctl &= ~E1000_RCTL_VFE; 2597 /* Don't store bad packets */ 2598 rctl &= ~E1000_RCTL_SBP; 2599 2600 /* Enable Receives */ 2601 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2602 2603 /* 2604 * Setup the HW Rx Head and Tail Descriptor Pointers 2605 * - needs to be after enable 2606 */ 2607 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2608 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2609 2610 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2611 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2612 } 2613 } 2614 2615 static void 2616 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2617 { 2618 if (--i < 0) 2619 i = rxr->num_rx_desc - 1; 2620 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2621 } 2622 2623 static void 2624 igb_rxeof(struct igb_rx_ring *rxr, int count) 2625 { 2626 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2627 union e1000_adv_rx_desc *cur; 2628 uint32_t staterr; 2629 int i, ncoll = 0, cpuid = mycpuid; 2630 2631 i = rxr->next_to_check; 2632 cur = &rxr->rx_base[i]; 2633 staterr = le32toh(cur->wb.upper.status_error); 2634 2635 if ((staterr & E1000_RXD_STAT_DD) == 0) 2636 return; 2637 2638 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2639 struct pktinfo *pi = NULL, pi0; 2640 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2641 struct mbuf *m = NULL; 2642 boolean_t eop; 2643 2644 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2645 if (eop) 2646 --count; 2647 2648 ++ncoll; 2649 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2650 !rxr->discard) { 2651 struct mbuf *mp = rxbuf->m_head; 2652 uint32_t hash, hashtype; 2653 uint16_t vlan; 2654 int len; 2655 2656 len = le16toh(cur->wb.upper.length); 2657 if ((rxr->sc->hw.mac.type == e1000_i350 || 2658 rxr->sc->hw.mac.type == e1000_i354) && 2659 (staterr & E1000_RXDEXT_STATERR_LB)) 2660 vlan = be16toh(cur->wb.upper.vlan); 2661 else 2662 vlan = le16toh(cur->wb.upper.vlan); 2663 2664 hash = le32toh(cur->wb.lower.hi_dword.rss); 2665 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2666 E1000_RXDADV_RSSTYPE_MASK; 2667 2668 IGB_RSS_DPRINTF(rxr->sc, 10, 2669 "ring%d, hash 0x%08x, hashtype %u\n", 2670 rxr->me, hash, hashtype); 2671 2672 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2673 BUS_DMASYNC_POSTREAD); 2674 2675 if (igb_newbuf(rxr, i, FALSE) != 0) { 2676 IFNET_STAT_INC(ifp, iqdrops, 1); 2677 goto discard; 2678 } 2679 2680 mp->m_len = len; 2681 if (rxr->fmp == NULL) { 2682 mp->m_pkthdr.len = len; 2683 rxr->fmp = mp; 2684 rxr->lmp = mp; 2685 } else { 2686 rxr->lmp->m_next = mp; 2687 rxr->lmp = rxr->lmp->m_next; 2688 rxr->fmp->m_pkthdr.len += len; 2689 } 2690 2691 if (eop) { 2692 m = rxr->fmp; 2693 rxr->fmp = NULL; 2694 rxr->lmp = NULL; 2695 2696 m->m_pkthdr.rcvif = ifp; 2697 IFNET_STAT_INC(ifp, ipackets, 1); 2698 2699 if (ifp->if_capenable & IFCAP_RXCSUM) 2700 igb_rxcsum(staterr, m); 2701 2702 if (staterr & E1000_RXD_STAT_VP) { 2703 m->m_pkthdr.ether_vlantag = vlan; 2704 m->m_flags |= M_VLANTAG; 2705 } 2706 2707 if (ifp->if_capenable & IFCAP_RSS) { 2708 pi = igb_rssinfo(m, &pi0, 2709 hash, hashtype, staterr); 2710 } 2711 #ifdef IGB_RSS_DEBUG 2712 rxr->rx_packets++; 2713 #endif 2714 } 2715 } else { 2716 IFNET_STAT_INC(ifp, ierrors, 1); 2717 discard: 2718 igb_setup_rxdesc(cur, rxbuf); 2719 if (!eop) 2720 rxr->discard = TRUE; 2721 else 2722 rxr->discard = FALSE; 2723 if (rxr->fmp != NULL) { 2724 m_freem(rxr->fmp); 2725 rxr->fmp = NULL; 2726 rxr->lmp = NULL; 2727 } 2728 m = NULL; 2729 } 2730 2731 if (m != NULL) 2732 ifp->if_input(ifp, m, pi, cpuid); 2733 2734 /* Advance our pointers to the next descriptor. */ 2735 if (++i == rxr->num_rx_desc) 2736 i = 0; 2737 2738 if (ncoll >= rxr->wreg_nsegs) { 2739 igb_rx_refresh(rxr, i); 2740 ncoll = 0; 2741 } 2742 2743 cur = &rxr->rx_base[i]; 2744 staterr = le32toh(cur->wb.upper.status_error); 2745 } 2746 rxr->next_to_check = i; 2747 2748 if (ncoll > 0) 2749 igb_rx_refresh(rxr, i); 2750 } 2751 2752 2753 static void 2754 igb_set_vlan(struct igb_softc *sc) 2755 { 2756 struct e1000_hw *hw = &sc->hw; 2757 uint32_t reg; 2758 #if 0 2759 struct ifnet *ifp = sc->arpcom.ac_if; 2760 #endif 2761 2762 if (sc->vf_ifp) { 2763 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2764 return; 2765 } 2766 2767 reg = E1000_READ_REG(hw, E1000_CTRL); 2768 reg |= E1000_CTRL_VME; 2769 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2770 2771 #if 0 2772 /* Enable the Filter Table */ 2773 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2774 reg = E1000_READ_REG(hw, E1000_RCTL); 2775 reg &= ~E1000_RCTL_CFIEN; 2776 reg |= E1000_RCTL_VFE; 2777 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2778 } 2779 #endif 2780 2781 /* Update the frame size */ 2782 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2783 sc->max_frame_size + VLAN_TAG_SIZE); 2784 2785 #if 0 2786 /* Don't bother with table if no vlans */ 2787 if ((adapter->num_vlans == 0) || 2788 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2789 return; 2790 /* 2791 ** A soft reset zero's out the VFTA, so 2792 ** we need to repopulate it now. 2793 */ 2794 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2795 if (adapter->shadow_vfta[i] != 0) { 2796 if (adapter->vf_ifp) 2797 e1000_vfta_set_vf(hw, 2798 adapter->shadow_vfta[i], TRUE); 2799 else 2800 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2801 i, adapter->shadow_vfta[i]); 2802 } 2803 #endif 2804 } 2805 2806 static void 2807 igb_enable_intr(struct igb_softc *sc) 2808 { 2809 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2810 lwkt_serialize_handler_enable(&sc->main_serialize); 2811 } else { 2812 int i; 2813 2814 for (i = 0; i < sc->msix_cnt; ++i) { 2815 lwkt_serialize_handler_enable( 2816 sc->msix_data[i].msix_serialize); 2817 } 2818 } 2819 2820 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2821 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2822 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2823 else 2824 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2825 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2826 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2827 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2828 } else { 2829 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2830 } 2831 E1000_WRITE_FLUSH(&sc->hw); 2832 } 2833 2834 static void 2835 igb_disable_intr(struct igb_softc *sc) 2836 { 2837 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2838 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2839 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2840 } 2841 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2842 E1000_WRITE_FLUSH(&sc->hw); 2843 2844 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2845 lwkt_serialize_handler_disable(&sc->main_serialize); 2846 } else { 2847 int i; 2848 2849 for (i = 0; i < sc->msix_cnt; ++i) { 2850 lwkt_serialize_handler_disable( 2851 sc->msix_data[i].msix_serialize); 2852 } 2853 } 2854 } 2855 2856 /* 2857 * Bit of a misnomer, what this really means is 2858 * to enable OS management of the system... aka 2859 * to disable special hardware management features 2860 */ 2861 static void 2862 igb_get_mgmt(struct igb_softc *sc) 2863 { 2864 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2865 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2866 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2867 2868 /* disable hardware interception of ARP */ 2869 manc &= ~E1000_MANC_ARP_EN; 2870 2871 /* enable receiving management packets to the host */ 2872 manc |= E1000_MANC_EN_MNG2HOST; 2873 manc2h |= 1 << 5; /* Mng Port 623 */ 2874 manc2h |= 1 << 6; /* Mng Port 664 */ 2875 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2876 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2877 } 2878 } 2879 2880 /* 2881 * Give control back to hardware management controller 2882 * if there is one. 2883 */ 2884 static void 2885 igb_rel_mgmt(struct igb_softc *sc) 2886 { 2887 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2888 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2889 2890 /* Re-enable hardware interception of ARP */ 2891 manc |= E1000_MANC_ARP_EN; 2892 manc &= ~E1000_MANC_EN_MNG2HOST; 2893 2894 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2895 } 2896 } 2897 2898 /* 2899 * Sets CTRL_EXT:DRV_LOAD bit. 2900 * 2901 * For ASF and Pass Through versions of f/w this means that 2902 * the driver is loaded. 2903 */ 2904 static void 2905 igb_get_hw_control(struct igb_softc *sc) 2906 { 2907 uint32_t ctrl_ext; 2908 2909 if (sc->vf_ifp) 2910 return; 2911 2912 /* Let firmware know the driver has taken over */ 2913 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2914 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2915 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2916 } 2917 2918 /* 2919 * Resets CTRL_EXT:DRV_LOAD bit. 2920 * 2921 * For ASF and Pass Through versions of f/w this means that the 2922 * driver is no longer loaded. 2923 */ 2924 static void 2925 igb_rel_hw_control(struct igb_softc *sc) 2926 { 2927 uint32_t ctrl_ext; 2928 2929 if (sc->vf_ifp) 2930 return; 2931 2932 /* Let firmware taken over control of h/w */ 2933 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2934 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2935 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2936 } 2937 2938 static boolean_t 2939 igb_is_valid_ether_addr(const uint8_t *addr) 2940 { 2941 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2942 2943 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2944 return FALSE; 2945 return TRUE; 2946 } 2947 2948 /* 2949 * Enable PCI Wake On Lan capability 2950 */ 2951 static void 2952 igb_enable_wol(device_t dev) 2953 { 2954 uint16_t cap, status; 2955 uint8_t id; 2956 2957 /* First find the capabilities pointer*/ 2958 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2959 2960 /* Read the PM Capabilities */ 2961 id = pci_read_config(dev, cap, 1); 2962 if (id != PCIY_PMG) /* Something wrong */ 2963 return; 2964 2965 /* 2966 * OK, we have the power capabilities, 2967 * so now get the status register 2968 */ 2969 cap += PCIR_POWER_STATUS; 2970 status = pci_read_config(dev, cap, 2); 2971 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2972 pci_write_config(dev, cap, status, 2); 2973 } 2974 2975 static void 2976 igb_update_stats_counters(struct igb_softc *sc) 2977 { 2978 struct e1000_hw *hw = &sc->hw; 2979 struct e1000_hw_stats *stats; 2980 struct ifnet *ifp = &sc->arpcom.ac_if; 2981 2982 /* 2983 * The virtual function adapter has only a 2984 * small controlled set of stats, do only 2985 * those and return. 2986 */ 2987 if (sc->vf_ifp) { 2988 igb_update_vf_stats_counters(sc); 2989 return; 2990 } 2991 stats = sc->stats; 2992 2993 if (sc->hw.phy.media_type == e1000_media_type_copper || 2994 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2995 stats->symerrs += 2996 E1000_READ_REG(hw,E1000_SYMERRS); 2997 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2998 } 2999 3000 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 3001 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 3002 stats->scc += E1000_READ_REG(hw, E1000_SCC); 3003 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 3004 3005 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 3006 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 3007 stats->colc += E1000_READ_REG(hw, E1000_COLC); 3008 stats->dc += E1000_READ_REG(hw, E1000_DC); 3009 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 3010 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 3011 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 3012 3013 /* 3014 * For watchdog management we need to know if we have been 3015 * paused during the last interval, so capture that here. 3016 */ 3017 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 3018 stats->xoffrxc += sc->pause_frames; 3019 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 3020 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 3021 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 3022 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 3023 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 3024 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 3025 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 3026 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 3027 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 3028 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 3029 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 3030 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 3031 3032 /* For the 64-bit byte counters the low dword must be read first. */ 3033 /* Both registers clear on the read of the high dword */ 3034 3035 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 3036 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 3037 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 3038 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 3039 3040 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 3041 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 3042 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 3043 stats->roc += E1000_READ_REG(hw, E1000_ROC); 3044 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 3045 3046 stats->tor += E1000_READ_REG(hw, E1000_TORH); 3047 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 3048 3049 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 3050 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 3051 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 3052 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 3053 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 3054 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3055 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3056 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3057 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3058 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3059 3060 /* Interrupt Counts */ 3061 3062 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3063 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3064 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3065 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3066 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3067 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3068 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3069 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3070 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3071 3072 /* Host to Card Statistics */ 3073 3074 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3075 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3076 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3077 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3078 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3079 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3080 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3081 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3082 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3083 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3084 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3085 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3086 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3087 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3088 3089 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3090 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3091 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3092 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3093 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3094 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3095 3096 IFNET_STAT_SET(ifp, collisions, stats->colc); 3097 3098 /* Rx Errors */ 3099 IFNET_STAT_SET(ifp, ierrors, 3100 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3101 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3102 3103 /* Tx Errors */ 3104 IFNET_STAT_SET(ifp, oerrors, 3105 stats->ecol + stats->latecol + sc->watchdog_events); 3106 3107 /* Driver specific counters */ 3108 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3109 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3110 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3111 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3112 sc->packet_buf_alloc_tx = 3113 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3114 sc->packet_buf_alloc_rx = 3115 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3116 } 3117 3118 static void 3119 igb_vf_init_stats(struct igb_softc *sc) 3120 { 3121 struct e1000_hw *hw = &sc->hw; 3122 struct e1000_vf_stats *stats; 3123 3124 stats = sc->stats; 3125 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3126 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3127 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3128 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3129 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3130 } 3131 3132 static void 3133 igb_update_vf_stats_counters(struct igb_softc *sc) 3134 { 3135 struct e1000_hw *hw = &sc->hw; 3136 struct e1000_vf_stats *stats; 3137 3138 if (sc->link_speed == 0) 3139 return; 3140 3141 stats = sc->stats; 3142 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3143 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3144 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3145 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3146 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3147 } 3148 3149 #ifdef IFPOLL_ENABLE 3150 3151 static void 3152 igb_npoll_status(struct ifnet *ifp) 3153 { 3154 struct igb_softc *sc = ifp->if_softc; 3155 uint32_t reg_icr; 3156 3157 ASSERT_SERIALIZED(&sc->main_serialize); 3158 3159 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3160 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3161 sc->hw.mac.get_link_status = 1; 3162 igb_update_link_status(sc); 3163 } 3164 } 3165 3166 static void 3167 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3168 { 3169 struct igb_tx_ring *txr = arg; 3170 3171 ASSERT_SERIALIZED(&txr->tx_serialize); 3172 3173 igb_txeof(txr); 3174 if (!ifsq_is_empty(txr->ifsq)) 3175 ifsq_devstart(txr->ifsq); 3176 } 3177 3178 static void 3179 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3180 { 3181 struct igb_rx_ring *rxr = arg; 3182 3183 ASSERT_SERIALIZED(&rxr->rx_serialize); 3184 3185 igb_rxeof(rxr, cycle); 3186 } 3187 3188 static void 3189 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3190 { 3191 struct igb_softc *sc = ifp->if_softc; 3192 int i, txr_cnt, rxr_cnt; 3193 3194 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3195 3196 if (info) { 3197 int off; 3198 3199 info->ifpi_status.status_func = igb_npoll_status; 3200 info->ifpi_status.serializer = &sc->main_serialize; 3201 3202 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3203 off = sc->tx_npoll_off; 3204 for (i = 0; i < txr_cnt; ++i) { 3205 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3206 int idx = i + off; 3207 3208 KKASSERT(idx < ncpus2); 3209 info->ifpi_tx[idx].poll_func = igb_npoll_tx; 3210 info->ifpi_tx[idx].arg = txr; 3211 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 3212 ifsq_set_cpuid(txr->ifsq, idx); 3213 } 3214 3215 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3216 off = sc->rx_npoll_off; 3217 for (i = 0; i < rxr_cnt; ++i) { 3218 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3219 int idx = i + off; 3220 3221 KKASSERT(idx < ncpus2); 3222 info->ifpi_rx[idx].poll_func = igb_npoll_rx; 3223 info->ifpi_rx[idx].arg = rxr; 3224 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 3225 } 3226 3227 if (ifp->if_flags & IFF_RUNNING) { 3228 if (rxr_cnt == sc->rx_ring_inuse && 3229 txr_cnt == sc->tx_ring_inuse) { 3230 igb_set_timer_cpuid(sc, TRUE); 3231 igb_disable_intr(sc); 3232 } else { 3233 igb_init(sc); 3234 } 3235 } 3236 } else { 3237 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3238 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3239 3240 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3241 } 3242 3243 if (ifp->if_flags & IFF_RUNNING) { 3244 txr_cnt = igb_get_txring_inuse(sc, FALSE); 3245 rxr_cnt = igb_get_rxring_inuse(sc, FALSE); 3246 3247 if (rxr_cnt == sc->rx_ring_inuse && 3248 txr_cnt == sc->tx_ring_inuse) { 3249 igb_set_timer_cpuid(sc, FALSE); 3250 igb_enable_intr(sc); 3251 } else { 3252 igb_init(sc); 3253 } 3254 } 3255 } 3256 } 3257 3258 #endif /* IFPOLL_ENABLE */ 3259 3260 static void 3261 igb_intr(void *xsc) 3262 { 3263 struct igb_softc *sc = xsc; 3264 struct ifnet *ifp = &sc->arpcom.ac_if; 3265 uint32_t eicr; 3266 3267 ASSERT_SERIALIZED(&sc->main_serialize); 3268 3269 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3270 3271 if (eicr == 0) 3272 return; 3273 3274 if (ifp->if_flags & IFF_RUNNING) { 3275 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3276 int i; 3277 3278 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3279 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3280 3281 if (eicr & rxr->rx_intr_mask) { 3282 lwkt_serialize_enter(&rxr->rx_serialize); 3283 igb_rxeof(rxr, -1); 3284 lwkt_serialize_exit(&rxr->rx_serialize); 3285 } 3286 } 3287 3288 if (eicr & txr->tx_intr_mask) { 3289 lwkt_serialize_enter(&txr->tx_serialize); 3290 igb_txeof(txr); 3291 if (!ifsq_is_empty(txr->ifsq)) 3292 ifsq_devstart(txr->ifsq); 3293 lwkt_serialize_exit(&txr->tx_serialize); 3294 } 3295 } 3296 3297 if (eicr & E1000_EICR_OTHER) { 3298 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3299 3300 /* Link status change */ 3301 if (icr & E1000_ICR_LSC) { 3302 sc->hw.mac.get_link_status = 1; 3303 igb_update_link_status(sc); 3304 } 3305 } 3306 3307 /* 3308 * Reading EICR has the side effect to clear interrupt mask, 3309 * so all interrupts need to be enabled here. 3310 */ 3311 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3312 } 3313 3314 static void 3315 igb_intr_shared(void *xsc) 3316 { 3317 struct igb_softc *sc = xsc; 3318 struct ifnet *ifp = &sc->arpcom.ac_if; 3319 uint32_t reg_icr; 3320 3321 ASSERT_SERIALIZED(&sc->main_serialize); 3322 3323 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3324 3325 /* Hot eject? */ 3326 if (reg_icr == 0xffffffff) 3327 return; 3328 3329 /* Definitely not our interrupt. */ 3330 if (reg_icr == 0x0) 3331 return; 3332 3333 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3334 return; 3335 3336 if (ifp->if_flags & IFF_RUNNING) { 3337 if (reg_icr & 3338 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3339 int i; 3340 3341 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3342 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3343 3344 lwkt_serialize_enter(&rxr->rx_serialize); 3345 igb_rxeof(rxr, -1); 3346 lwkt_serialize_exit(&rxr->rx_serialize); 3347 } 3348 } 3349 3350 if (reg_icr & E1000_ICR_TXDW) { 3351 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3352 3353 lwkt_serialize_enter(&txr->tx_serialize); 3354 igb_txeof(txr); 3355 if (!ifsq_is_empty(txr->ifsq)) 3356 ifsq_devstart(txr->ifsq); 3357 lwkt_serialize_exit(&txr->tx_serialize); 3358 } 3359 } 3360 3361 /* Link status change */ 3362 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3363 sc->hw.mac.get_link_status = 1; 3364 igb_update_link_status(sc); 3365 } 3366 3367 if (reg_icr & E1000_ICR_RXO) 3368 sc->rx_overruns++; 3369 } 3370 3371 static int 3372 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3373 int *segs_used, int *idx) 3374 { 3375 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3376 bus_dmamap_t map; 3377 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3378 union e1000_adv_tx_desc *txd = NULL; 3379 struct mbuf *m_head = *m_headp; 3380 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3381 int maxsegs, nsegs, i, j, error; 3382 uint32_t hdrlen = 0; 3383 3384 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3385 error = igb_tso_pullup(txr, m_headp); 3386 if (error) 3387 return error; 3388 m_head = *m_headp; 3389 } 3390 3391 /* Set basic descriptor constants */ 3392 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3393 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3394 if (m_head->m_flags & M_VLANTAG) 3395 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3396 3397 /* 3398 * Map the packet for DMA. 3399 */ 3400 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3401 tx_buf_mapped = tx_buf; 3402 map = tx_buf->map; 3403 3404 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3405 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n")); 3406 if (maxsegs > IGB_MAX_SCATTER) 3407 maxsegs = IGB_MAX_SCATTER; 3408 3409 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3410 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3411 if (error) { 3412 if (error == ENOBUFS) 3413 txr->sc->mbuf_defrag_failed++; 3414 else 3415 txr->sc->no_tx_dma_setup++; 3416 3417 m_freem(*m_headp); 3418 *m_headp = NULL; 3419 return error; 3420 } 3421 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3422 3423 m_head = *m_headp; 3424 3425 /* 3426 * Set up the TX context descriptor, if any hardware offloading is 3427 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3428 * TX descriptor. 3429 * 3430 * Unlike these chips' predecessors (em/emx), TX context descriptor 3431 * will _not_ interfere TX data fetching pipelining. 3432 */ 3433 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3434 igb_tso_ctx(txr, m_head, &hdrlen); 3435 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3436 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3437 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3438 txr->tx_nsegs++; 3439 (*segs_used)++; 3440 } else if (igb_txcsum_ctx(txr, m_head)) { 3441 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3442 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3443 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3444 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3445 txr->tx_nsegs++; 3446 (*segs_used)++; 3447 } 3448 3449 *segs_used += nsegs; 3450 txr->tx_nsegs += nsegs; 3451 if (txr->tx_nsegs >= txr->intr_nsegs) { 3452 /* 3453 * Report Status (RS) is turned on every intr_nsegs 3454 * descriptors (roughly). 3455 */ 3456 txr->tx_nsegs = 0; 3457 cmd_rs = E1000_ADVTXD_DCMD_RS; 3458 } 3459 3460 /* Calculate payload length */ 3461 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3462 << E1000_ADVTXD_PAYLEN_SHIFT); 3463 3464 /* 3465 * 82575 needs the TX context index added; the queue 3466 * index is used as TX context index here. 3467 */ 3468 if (txr->sc->hw.mac.type == e1000_82575) 3469 olinfo_status |= txr->me << 4; 3470 3471 /* Set up our transmit descriptors */ 3472 i = txr->next_avail_desc; 3473 for (j = 0; j < nsegs; j++) { 3474 bus_size_t seg_len; 3475 bus_addr_t seg_addr; 3476 3477 tx_buf = &txr->tx_buf[i]; 3478 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3479 seg_addr = segs[j].ds_addr; 3480 seg_len = segs[j].ds_len; 3481 3482 txd->read.buffer_addr = htole64(seg_addr); 3483 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3484 txd->read.olinfo_status = htole32(olinfo_status); 3485 if (++i == txr->num_tx_desc) 3486 i = 0; 3487 tx_buf->m_head = NULL; 3488 } 3489 3490 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3491 txr->next_avail_desc = i; 3492 txr->tx_avail -= nsegs; 3493 3494 tx_buf->m_head = m_head; 3495 tx_buf_mapped->map = tx_buf->map; 3496 tx_buf->map = map; 3497 3498 /* 3499 * Last Descriptor of Packet needs End Of Packet (EOP) 3500 */ 3501 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3502 3503 /* 3504 * Defer TDT updating, until enough descrptors are setup 3505 */ 3506 *idx = i; 3507 #ifdef IGB_TSS_DEBUG 3508 ++txr->tx_packets; 3509 #endif 3510 3511 return 0; 3512 } 3513 3514 static void 3515 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3516 { 3517 struct igb_softc *sc = ifp->if_softc; 3518 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3519 struct mbuf *m_head; 3520 int idx = -1, nsegs = 0; 3521 3522 KKASSERT(txr->ifsq == ifsq); 3523 ASSERT_SERIALIZED(&txr->tx_serialize); 3524 3525 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3526 return; 3527 3528 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3529 ifsq_purge(ifsq); 3530 return; 3531 } 3532 3533 if (!IGB_IS_NOT_OACTIVE(txr)) 3534 igb_txeof(txr); 3535 3536 while (!ifsq_is_empty(ifsq)) { 3537 if (IGB_IS_OACTIVE(txr)) { 3538 ifsq_set_oactive(ifsq); 3539 /* Set watchdog on */ 3540 txr->tx_watchdog.wd_timer = 5; 3541 break; 3542 } 3543 3544 m_head = ifsq_dequeue(ifsq); 3545 if (m_head == NULL) 3546 break; 3547 3548 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3549 IFNET_STAT_INC(ifp, oerrors, 1); 3550 continue; 3551 } 3552 3553 /* 3554 * TX interrupt are aggressively aggregated, so increasing 3555 * opackets at TX interrupt time will make the opackets 3556 * statistics vastly inaccurate; we do the opackets increment 3557 * now. 3558 */ 3559 IFNET_STAT_INC(ifp, opackets, 1); 3560 3561 if (nsegs >= txr->wreg_nsegs) { 3562 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3563 idx = -1; 3564 nsegs = 0; 3565 } 3566 3567 /* Send a copy of the frame to the BPF listener */ 3568 ETHER_BPF_MTAP(ifp, m_head); 3569 } 3570 if (idx >= 0) 3571 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3572 } 3573 3574 static void 3575 igb_watchdog(struct ifaltq_subque *ifsq) 3576 { 3577 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3578 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3579 struct igb_softc *sc = ifp->if_softc; 3580 int i; 3581 3582 KKASSERT(txr->ifsq == ifsq); 3583 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3584 3585 /* 3586 * If flow control has paused us since last checking 3587 * it invalidates the watchdog timing, so dont run it. 3588 */ 3589 if (sc->pause_frames) { 3590 sc->pause_frames = 0; 3591 txr->tx_watchdog.wd_timer = 5; 3592 return; 3593 } 3594 3595 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3596 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3597 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3598 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3599 if_printf(ifp, "TX(%d) desc avail = %d, " 3600 "Next TX to Clean = %d\n", 3601 txr->me, txr->tx_avail, txr->next_to_clean); 3602 3603 IFNET_STAT_INC(ifp, oerrors, 1); 3604 sc->watchdog_events++; 3605 3606 igb_init(sc); 3607 for (i = 0; i < sc->tx_ring_inuse; ++i) 3608 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3609 } 3610 3611 static void 3612 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3613 { 3614 uint32_t eitr = 0; 3615 3616 if (rate > 0) { 3617 if (sc->hw.mac.type == e1000_82575) { 3618 eitr = 1000000000 / 256 / rate; 3619 /* 3620 * NOTE: 3621 * Document is wrong on the 2 bits left shift 3622 */ 3623 } else { 3624 eitr = 1000000 / rate; 3625 eitr <<= IGB_EITR_INTVL_SHIFT; 3626 } 3627 3628 if (eitr == 0) { 3629 /* Don't disable it */ 3630 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3631 } else if (eitr > IGB_EITR_INTVL_MASK) { 3632 /* Don't allow it to be too large */ 3633 eitr = IGB_EITR_INTVL_MASK; 3634 } 3635 } 3636 if (sc->hw.mac.type == e1000_82575) 3637 eitr |= eitr << 16; 3638 else 3639 eitr |= E1000_EITR_CNT_IGNR; 3640 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3641 } 3642 3643 static int 3644 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3645 { 3646 struct igb_softc *sc = (void *)arg1; 3647 struct ifnet *ifp = &sc->arpcom.ac_if; 3648 int error, intr_rate; 3649 3650 intr_rate = sc->intr_rate; 3651 error = sysctl_handle_int(oidp, &intr_rate, 0, req); 3652 if (error || req->newptr == NULL) 3653 return error; 3654 if (intr_rate < 0) 3655 return EINVAL; 3656 3657 ifnet_serialize_all(ifp); 3658 3659 sc->intr_rate = intr_rate; 3660 if (ifp->if_flags & IFF_RUNNING) 3661 igb_set_eitr(sc, 0, sc->intr_rate); 3662 3663 if (bootverbose) 3664 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate); 3665 3666 ifnet_deserialize_all(ifp); 3667 3668 return 0; 3669 } 3670 3671 static int 3672 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS) 3673 { 3674 struct igb_msix_data *msix = (void *)arg1; 3675 struct igb_softc *sc = msix->msix_sc; 3676 struct ifnet *ifp = &sc->arpcom.ac_if; 3677 int error, msix_rate; 3678 3679 msix_rate = msix->msix_rate; 3680 error = sysctl_handle_int(oidp, &msix_rate, 0, req); 3681 if (error || req->newptr == NULL) 3682 return error; 3683 if (msix_rate < 0) 3684 return EINVAL; 3685 3686 lwkt_serialize_enter(msix->msix_serialize); 3687 3688 msix->msix_rate = msix_rate; 3689 if (ifp->if_flags & IFF_RUNNING) 3690 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate); 3691 3692 if (bootverbose) { 3693 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc, 3694 msix->msix_rate); 3695 } 3696 3697 lwkt_serialize_exit(msix->msix_serialize); 3698 3699 return 0; 3700 } 3701 3702 static int 3703 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3704 { 3705 struct igb_softc *sc = (void *)arg1; 3706 struct ifnet *ifp = &sc->arpcom.ac_if; 3707 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3708 int error, nsegs; 3709 3710 nsegs = txr->intr_nsegs; 3711 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3712 if (error || req->newptr == NULL) 3713 return error; 3714 if (nsegs <= 0) 3715 return EINVAL; 3716 3717 ifnet_serialize_all(ifp); 3718 3719 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc || 3720 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) { 3721 error = EINVAL; 3722 } else { 3723 int i; 3724 3725 error = 0; 3726 for (i = 0; i < sc->tx_ring_cnt; ++i) 3727 sc->tx_rings[i].intr_nsegs = nsegs; 3728 } 3729 3730 ifnet_deserialize_all(ifp); 3731 3732 return error; 3733 } 3734 3735 static int 3736 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3737 { 3738 struct igb_softc *sc = (void *)arg1; 3739 struct ifnet *ifp = &sc->arpcom.ac_if; 3740 int error, nsegs, i; 3741 3742 nsegs = sc->rx_rings[0].wreg_nsegs; 3743 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3744 if (error || req->newptr == NULL) 3745 return error; 3746 3747 ifnet_serialize_all(ifp); 3748 for (i = 0; i < sc->rx_ring_cnt; ++i) 3749 sc->rx_rings[i].wreg_nsegs =nsegs; 3750 ifnet_deserialize_all(ifp); 3751 3752 return 0; 3753 } 3754 3755 static int 3756 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3757 { 3758 struct igb_softc *sc = (void *)arg1; 3759 struct ifnet *ifp = &sc->arpcom.ac_if; 3760 int error, nsegs, i; 3761 3762 nsegs = sc->tx_rings[0].wreg_nsegs; 3763 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3764 if (error || req->newptr == NULL) 3765 return error; 3766 3767 ifnet_serialize_all(ifp); 3768 for (i = 0; i < sc->tx_ring_cnt; ++i) 3769 sc->tx_rings[i].wreg_nsegs =nsegs; 3770 ifnet_deserialize_all(ifp); 3771 3772 return 0; 3773 } 3774 3775 #ifdef IFPOLL_ENABLE 3776 3777 static int 3778 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3779 { 3780 struct igb_softc *sc = (void *)arg1; 3781 struct ifnet *ifp = &sc->arpcom.ac_if; 3782 int error, off; 3783 3784 off = sc->rx_npoll_off; 3785 error = sysctl_handle_int(oidp, &off, 0, req); 3786 if (error || req->newptr == NULL) 3787 return error; 3788 if (off < 0) 3789 return EINVAL; 3790 3791 ifnet_serialize_all(ifp); 3792 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3793 error = EINVAL; 3794 } else { 3795 error = 0; 3796 sc->rx_npoll_off = off; 3797 } 3798 ifnet_deserialize_all(ifp); 3799 3800 return error; 3801 } 3802 3803 static int 3804 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3805 { 3806 struct igb_softc *sc = (void *)arg1; 3807 struct ifnet *ifp = &sc->arpcom.ac_if; 3808 int error, off; 3809 3810 off = sc->tx_npoll_off; 3811 error = sysctl_handle_int(oidp, &off, 0, req); 3812 if (error || req->newptr == NULL) 3813 return error; 3814 if (off < 0) 3815 return EINVAL; 3816 3817 ifnet_serialize_all(ifp); 3818 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3819 error = EINVAL; 3820 } else { 3821 error = 0; 3822 sc->tx_npoll_off = off; 3823 } 3824 ifnet_deserialize_all(ifp); 3825 3826 return error; 3827 } 3828 3829 #endif /* IFPOLL_ENABLE */ 3830 3831 static void 3832 igb_init_intr(struct igb_softc *sc) 3833 { 3834 igb_set_intr_mask(sc); 3835 3836 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3837 igb_init_unshared_intr(sc); 3838 3839 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3840 igb_set_eitr(sc, 0, sc->intr_rate); 3841 } else { 3842 int i; 3843 3844 for (i = 0; i < sc->msix_cnt; ++i) 3845 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate); 3846 } 3847 } 3848 3849 static void 3850 igb_init_unshared_intr(struct igb_softc *sc) 3851 { 3852 struct e1000_hw *hw = &sc->hw; 3853 const struct igb_rx_ring *rxr; 3854 const struct igb_tx_ring *txr; 3855 uint32_t ivar, index; 3856 int i; 3857 3858 /* 3859 * Enable extended mode 3860 */ 3861 if (sc->hw.mac.type != e1000_82575) { 3862 uint32_t gpie; 3863 int ivar_max; 3864 3865 gpie = E1000_GPIE_NSICR; 3866 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3867 gpie |= E1000_GPIE_MSIX_MODE | 3868 E1000_GPIE_EIAME | 3869 E1000_GPIE_PBA; 3870 } 3871 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3872 3873 /* 3874 * Clear IVARs 3875 */ 3876 switch (sc->hw.mac.type) { 3877 case e1000_82576: 3878 ivar_max = IGB_MAX_IVAR_82576; 3879 break; 3880 3881 case e1000_82580: 3882 ivar_max = IGB_MAX_IVAR_82580; 3883 break; 3884 3885 case e1000_i350: 3886 ivar_max = IGB_MAX_IVAR_I350; 3887 break; 3888 3889 case e1000_i354: 3890 ivar_max = IGB_MAX_IVAR_I354; 3891 break; 3892 3893 case e1000_vfadapt: 3894 case e1000_vfadapt_i350: 3895 ivar_max = IGB_MAX_IVAR_VF; 3896 break; 3897 3898 case e1000_i210: 3899 ivar_max = IGB_MAX_IVAR_I210; 3900 break; 3901 3902 case e1000_i211: 3903 ivar_max = IGB_MAX_IVAR_I211; 3904 break; 3905 3906 default: 3907 panic("unknown mac type %d\n", sc->hw.mac.type); 3908 } 3909 for (i = 0; i < ivar_max; ++i) 3910 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3911 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3912 } else { 3913 uint32_t tmp; 3914 3915 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3916 ("82575 w/ MSI-X")); 3917 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3918 tmp |= E1000_CTRL_EXT_IRCA; 3919 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3920 } 3921 3922 /* 3923 * Map TX/RX interrupts to EICR 3924 */ 3925 switch (sc->hw.mac.type) { 3926 case e1000_82580: 3927 case e1000_i350: 3928 case e1000_i354: 3929 case e1000_vfadapt: 3930 case e1000_vfadapt_i350: 3931 case e1000_i210: 3932 case e1000_i211: 3933 /* RX entries */ 3934 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3935 rxr = &sc->rx_rings[i]; 3936 3937 index = i >> 1; 3938 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3939 3940 if (i & 1) { 3941 ivar &= 0xff00ffff; 3942 ivar |= 3943 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3944 } else { 3945 ivar &= 0xffffff00; 3946 ivar |= 3947 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3948 } 3949 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3950 } 3951 /* TX entries */ 3952 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3953 txr = &sc->tx_rings[i]; 3954 3955 index = i >> 1; 3956 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3957 3958 if (i & 1) { 3959 ivar &= 0x00ffffff; 3960 ivar |= 3961 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3962 } else { 3963 ivar &= 0xffff00ff; 3964 ivar |= 3965 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3966 } 3967 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3968 } 3969 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3970 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3971 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3972 } 3973 break; 3974 3975 case e1000_82576: 3976 /* RX entries */ 3977 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3978 rxr = &sc->rx_rings[i]; 3979 3980 index = i & 0x7; /* Each IVAR has two entries */ 3981 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3982 3983 if (i < 8) { 3984 ivar &= 0xffffff00; 3985 ivar |= 3986 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3987 } else { 3988 ivar &= 0xff00ffff; 3989 ivar |= 3990 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3991 } 3992 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3993 } 3994 /* TX entries */ 3995 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3996 txr = &sc->tx_rings[i]; 3997 3998 index = i & 0x7; /* Each IVAR has two entries */ 3999 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4000 4001 if (i < 8) { 4002 ivar &= 0xffff00ff; 4003 ivar |= 4004 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 4005 } else { 4006 ivar &= 0x00ffffff; 4007 ivar |= 4008 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 4009 } 4010 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4011 } 4012 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4013 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 4014 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4015 } 4016 break; 4017 4018 case e1000_82575: 4019 /* 4020 * Enable necessary interrupt bits. 4021 * 4022 * The name of the register is confusing; in addition to 4023 * configuring the first vector of MSI-X, it also configures 4024 * which bits of EICR could be set by the hardware even when 4025 * MSI or line interrupt is used; it thus controls interrupt 4026 * generation. It MUST be configured explicitly; the default 4027 * value mentioned in the datasheet is wrong: RX queue0 and 4028 * TX queue0 are NOT enabled by default. 4029 */ 4030 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 4031 break; 4032 4033 default: 4034 panic("unknown mac type %d\n", sc->hw.mac.type); 4035 } 4036 } 4037 4038 static int 4039 igb_setup_intr(struct igb_softc *sc) 4040 { 4041 int error; 4042 4043 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 4044 return igb_msix_setup(sc); 4045 4046 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE, 4047 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr, 4048 sc, &sc->intr_tag, &sc->main_serialize); 4049 if (error) { 4050 device_printf(sc->dev, "Failed to register interrupt handler"); 4051 return error; 4052 } 4053 return 0; 4054 } 4055 4056 static void 4057 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax) 4058 { 4059 if (txr->sc->hw.mac.type == e1000_82575) { 4060 txr->tx_intr_bit = 0; /* unused */ 4061 switch (txr->me) { 4062 case 0: 4063 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 4064 break; 4065 case 1: 4066 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 4067 break; 4068 case 2: 4069 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4070 break; 4071 case 3: 4072 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4073 break; 4074 default: 4075 panic("unsupported # of TX ring, %d\n", txr->me); 4076 } 4077 } else { 4078 int intr_bit = *intr_bit0; 4079 4080 txr->tx_intr_bit = intr_bit % intr_bitmax; 4081 txr->tx_intr_mask = 1 << txr->tx_intr_bit; 4082 4083 *intr_bit0 = intr_bit + 1; 4084 } 4085 } 4086 4087 static void 4088 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax) 4089 { 4090 if (rxr->sc->hw.mac.type == e1000_82575) { 4091 rxr->rx_intr_bit = 0; /* unused */ 4092 switch (rxr->me) { 4093 case 0: 4094 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4095 break; 4096 case 1: 4097 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4098 break; 4099 case 2: 4100 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4101 break; 4102 case 3: 4103 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4104 break; 4105 default: 4106 panic("unsupported # of RX ring, %d\n", rxr->me); 4107 } 4108 } else { 4109 int intr_bit = *intr_bit0; 4110 4111 rxr->rx_intr_bit = intr_bit % intr_bitmax; 4112 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit; 4113 4114 *intr_bit0 = intr_bit + 1; 4115 } 4116 } 4117 4118 static void 4119 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4120 { 4121 struct igb_softc *sc = ifp->if_softc; 4122 4123 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4124 } 4125 4126 static void 4127 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4128 { 4129 struct igb_softc *sc = ifp->if_softc; 4130 4131 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4132 } 4133 4134 static int 4135 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4136 { 4137 struct igb_softc *sc = ifp->if_softc; 4138 4139 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4140 slz); 4141 } 4142 4143 #ifdef INVARIANTS 4144 4145 static void 4146 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4147 boolean_t serialized) 4148 { 4149 struct igb_softc *sc = ifp->if_softc; 4150 4151 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4152 slz, serialized); 4153 } 4154 4155 #endif /* INVARIANTS */ 4156 4157 static void 4158 igb_set_intr_mask(struct igb_softc *sc) 4159 { 4160 int i; 4161 4162 sc->intr_mask = sc->sts_intr_mask; 4163 for (i = 0; i < sc->rx_ring_inuse; ++i) 4164 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4165 for (i = 0; i < sc->tx_ring_inuse; ++i) 4166 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4167 if (bootverbose) { 4168 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4169 sc->intr_mask); 4170 } 4171 } 4172 4173 static int 4174 igb_alloc_intr(struct igb_softc *sc) 4175 { 4176 int i, intr_bit, intr_bitmax; 4177 u_int intr_flags; 4178 4179 igb_msix_try_alloc(sc); 4180 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 4181 goto done; 4182 4183 /* 4184 * Allocate MSI/legacy interrupt resource 4185 */ 4186 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4187 &sc->intr_rid, &intr_flags); 4188 4189 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4190 int unshared; 4191 4192 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4193 if (!unshared) { 4194 sc->flags |= IGB_FLAG_SHARED_INTR; 4195 if (bootverbose) 4196 device_printf(sc->dev, "IRQ shared\n"); 4197 } else { 4198 intr_flags &= ~RF_SHAREABLE; 4199 if (bootverbose) 4200 device_printf(sc->dev, "IRQ unshared\n"); 4201 } 4202 } 4203 4204 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4205 &sc->intr_rid, intr_flags); 4206 if (sc->intr_res == NULL) { 4207 device_printf(sc->dev, "Unable to allocate bus resource: " 4208 "interrupt\n"); 4209 return ENXIO; 4210 } 4211 4212 for (i = 0; i < sc->tx_ring_cnt; ++i) 4213 sc->tx_rings[i].tx_intr_cpuid = rman_get_cpuid(sc->intr_res); 4214 4215 /* 4216 * Setup MSI/legacy interrupt mask 4217 */ 4218 switch (sc->hw.mac.type) { 4219 case e1000_82575: 4220 intr_bitmax = IGB_MAX_TXRXINT_82575; 4221 break; 4222 4223 case e1000_82576: 4224 intr_bitmax = IGB_MAX_TXRXINT_82576; 4225 break; 4226 4227 case e1000_82580: 4228 intr_bitmax = IGB_MAX_TXRXINT_82580; 4229 break; 4230 4231 case e1000_i350: 4232 intr_bitmax = IGB_MAX_TXRXINT_I350; 4233 break; 4234 4235 case e1000_i354: 4236 intr_bitmax = IGB_MAX_TXRXINT_I354; 4237 break; 4238 4239 case e1000_i210: 4240 intr_bitmax = IGB_MAX_TXRXINT_I210; 4241 break; 4242 4243 case e1000_i211: 4244 intr_bitmax = IGB_MAX_TXRXINT_I211; 4245 break; 4246 4247 default: 4248 intr_bitmax = IGB_MIN_TXRXINT; 4249 break; 4250 } 4251 intr_bit = 0; 4252 for (i = 0; i < sc->tx_ring_cnt; ++i) 4253 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax); 4254 for (i = 0; i < sc->rx_ring_cnt; ++i) 4255 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax); 4256 sc->sts_intr_bit = 0; 4257 sc->sts_intr_mask = E1000_EICR_OTHER; 4258 4259 /* Initialize interrupt rate */ 4260 sc->intr_rate = IGB_INTR_RATE; 4261 done: 4262 igb_set_ring_inuse(sc, FALSE); 4263 igb_set_intr_mask(sc); 4264 return 0; 4265 } 4266 4267 static void 4268 igb_free_intr(struct igb_softc *sc) 4269 { 4270 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4271 if (sc->intr_res != NULL) { 4272 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid, 4273 sc->intr_res); 4274 } 4275 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4276 pci_release_msi(sc->dev); 4277 } else { 4278 igb_msix_free(sc, TRUE); 4279 } 4280 } 4281 4282 static void 4283 igb_teardown_intr(struct igb_softc *sc) 4284 { 4285 if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4286 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag); 4287 else 4288 igb_msix_teardown(sc, sc->msix_cnt); 4289 } 4290 4291 static void 4292 igb_msix_try_alloc(struct igb_softc *sc) 4293 { 4294 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 4295 int i, x, error; 4296 int offset, offset_def; 4297 struct igb_msix_data *msix; 4298 boolean_t aggregate, setup = FALSE; 4299 4300 /* 4301 * Don't enable MSI-X on 82575, see: 4302 * 82575 specification update errata #25 4303 */ 4304 if (sc->hw.mac.type == e1000_82575) 4305 return; 4306 4307 /* Don't enable MSI-X on VF */ 4308 if (sc->vf_ifp) 4309 return; 4310 4311 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4312 igb_msix_enable); 4313 if (!msix_enable) 4314 return; 4315 4316 msix_cnt = pci_msix_count(sc->dev); 4317 #ifdef IGB_MSIX_DEBUG 4318 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4319 #endif 4320 if (msix_cnt <= 1) { 4321 /* One MSI-X model does not make sense */ 4322 return; 4323 } 4324 4325 i = 0; 4326 while ((1 << (i + 1)) <= msix_cnt) 4327 ++i; 4328 msix_cnt2 = 1 << i; 4329 4330 if (bootverbose) { 4331 device_printf(sc->dev, "MSI-X count %d/%d\n", 4332 msix_cnt2, msix_cnt); 4333 } 4334 4335 KKASSERT(msix_cnt2 <= msix_cnt); 4336 if (msix_cnt == msix_cnt2) { 4337 /* We need at least one MSI-X for link status */ 4338 msix_cnt2 >>= 1; 4339 if (msix_cnt2 <= 1) { 4340 /* One MSI-X for RX/TX does not make sense */ 4341 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 4342 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 4343 return; 4344 } 4345 KKASSERT(msix_cnt > msix_cnt2); 4346 4347 if (bootverbose) { 4348 device_printf(sc->dev, "MSI-X count fixup %d/%d\n", 4349 msix_cnt2, msix_cnt); 4350 } 4351 } 4352 4353 sc->rx_ring_msix = sc->rx_ring_cnt; 4354 if (sc->rx_ring_msix > msix_cnt2) 4355 sc->rx_ring_msix = msix_cnt2; 4356 4357 sc->tx_ring_msix = sc->tx_ring_cnt; 4358 if (sc->tx_ring_msix > msix_cnt2) 4359 sc->tx_ring_msix = msix_cnt2; 4360 4361 if (msix_cnt >= sc->tx_ring_msix + sc->rx_ring_msix + 1) { 4362 /* 4363 * Independent TX/RX MSI-X 4364 */ 4365 aggregate = FALSE; 4366 if (bootverbose) 4367 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 4368 alloc_cnt = sc->tx_ring_msix + sc->rx_ring_msix; 4369 } else { 4370 /* 4371 * Aggregate TX/RX MSI-X 4372 */ 4373 aggregate = TRUE; 4374 if (bootverbose) 4375 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 4376 alloc_cnt = msix_cnt2; 4377 if (alloc_cnt > ncpus2) 4378 alloc_cnt = ncpus2; 4379 if (sc->rx_ring_msix > alloc_cnt) 4380 sc->rx_ring_msix = alloc_cnt; 4381 if (sc->tx_ring_msix > alloc_cnt) 4382 sc->tx_ring_msix = alloc_cnt; 4383 } 4384 ++alloc_cnt; /* For link status */ 4385 4386 if (bootverbose) { 4387 device_printf(sc->dev, "MSI-X alloc %d, " 4388 "RX ring %d, TX ring %d\n", alloc_cnt, 4389 sc->rx_ring_msix, sc->tx_ring_msix); 4390 } 4391 4392 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4393 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4394 &sc->msix_mem_rid, RF_ACTIVE); 4395 if (sc->msix_mem_res == NULL) { 4396 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4397 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4398 &sc->msix_mem_rid, RF_ACTIVE); 4399 if (sc->msix_mem_res == NULL) { 4400 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4401 return; 4402 } 4403 } 4404 4405 sc->msix_cnt = alloc_cnt; 4406 sc->msix_data = kmalloc_cachealign( 4407 sizeof(struct igb_msix_data) * sc->msix_cnt, 4408 M_DEVBUF, M_WAITOK | M_ZERO); 4409 for (x = 0; x < sc->msix_cnt; ++x) { 4410 msix = &sc->msix_data[x]; 4411 4412 lwkt_serialize_init(&msix->msix_serialize0); 4413 msix->msix_sc = sc; 4414 msix->msix_rid = -1; 4415 msix->msix_vector = x; 4416 msix->msix_mask = 1 << msix->msix_vector; 4417 msix->msix_rate = IGB_INTR_RATE; 4418 } 4419 4420 x = 0; 4421 if (!aggregate) { 4422 /* 4423 * RX rings 4424 */ 4425 if (sc->rx_ring_msix == ncpus2) { 4426 offset = 0; 4427 } else { 4428 offset_def = (sc->rx_ring_msix * 4429 device_get_unit(sc->dev)) % ncpus2; 4430 4431 offset = device_getenv_int(sc->dev, 4432 "msix.rxoff", offset_def); 4433 if (offset >= ncpus2 || 4434 offset % sc->rx_ring_msix != 0) { 4435 device_printf(sc->dev, 4436 "invalid msix.rxoff %d, use %d\n", 4437 offset, offset_def); 4438 offset = offset_def; 4439 } 4440 } 4441 igb_msix_rx_conf(sc, 0, &x, offset); 4442 4443 /* 4444 * TX rings 4445 */ 4446 if (sc->tx_ring_msix == ncpus2) { 4447 offset = 0; 4448 } else { 4449 offset_def = (sc->tx_ring_msix * 4450 device_get_unit(sc->dev)) % ncpus2; 4451 4452 offset = device_getenv_int(sc->dev, 4453 "msix.txoff", offset_def); 4454 if (offset >= ncpus2 || 4455 offset % sc->tx_ring_msix != 0) { 4456 device_printf(sc->dev, 4457 "invalid msix.txoff %d, use %d\n", 4458 offset, offset_def); 4459 offset = offset_def; 4460 } 4461 } 4462 igb_msix_tx_conf(sc, 0, &x, offset); 4463 } else { 4464 int ring_agg, ring_max; 4465 4466 ring_agg = sc->rx_ring_msix; 4467 if (ring_agg > sc->tx_ring_msix) 4468 ring_agg = sc->tx_ring_msix; 4469 4470 ring_max = sc->rx_ring_msix; 4471 if (ring_max < sc->tx_ring_msix) 4472 ring_max = sc->tx_ring_msix; 4473 4474 if (ring_max == ncpus2) { 4475 offset = 0; 4476 } else { 4477 offset_def = (ring_max * device_get_unit(sc->dev)) % 4478 ncpus2; 4479 4480 offset = device_getenv_int(sc->dev, "msix.off", 4481 offset_def); 4482 if (offset >= ncpus2 || offset % ring_max != 0) { 4483 device_printf(sc->dev, 4484 "invalid msix.off %d, use %d\n", 4485 offset, offset_def); 4486 offset = offset_def; 4487 } 4488 } 4489 4490 for (i = 0; i < ring_agg; ++i) { 4491 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4492 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4493 4494 KKASSERT(x < sc->msix_cnt); 4495 msix = &sc->msix_data[x++]; 4496 4497 txr->tx_intr_bit = msix->msix_vector; 4498 txr->tx_intr_mask = msix->msix_mask; 4499 rxr->rx_intr_bit = msix->msix_vector; 4500 rxr->rx_intr_mask = msix->msix_mask; 4501 4502 msix->msix_serialize = &msix->msix_serialize0; 4503 msix->msix_func = igb_msix_rxtx; 4504 msix->msix_arg = msix; 4505 msix->msix_rx = rxr; 4506 msix->msix_tx = txr; 4507 4508 msix->msix_cpuid = i + offset; 4509 KKASSERT(msix->msix_cpuid < ncpus2); 4510 txr->tx_intr_cpuid = msix->msix_cpuid; 4511 4512 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4513 "%s rxtx%d", device_get_nameunit(sc->dev), i); 4514 msix->msix_rate = IGB_MSIX_RX_RATE; 4515 ksnprintf(msix->msix_rate_desc, 4516 sizeof(msix->msix_rate_desc), 4517 "RXTX%d interrupt rate", i); 4518 } 4519 4520 if (ring_agg != ring_max) { 4521 if (ring_max == sc->tx_ring_msix) 4522 igb_msix_tx_conf(sc, i, &x, offset); 4523 else 4524 igb_msix_rx_conf(sc, i, &x, offset); 4525 } 4526 } 4527 4528 /* 4529 * Link status 4530 */ 4531 KKASSERT(x < sc->msix_cnt); 4532 msix = &sc->msix_data[x++]; 4533 sc->sts_intr_bit = msix->msix_vector; 4534 sc->sts_intr_mask = msix->msix_mask; 4535 4536 msix->msix_serialize = &sc->main_serialize; 4537 msix->msix_func = igb_msix_status; 4538 msix->msix_arg = sc; 4539 msix->msix_cpuid = 0; 4540 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts", 4541 device_get_nameunit(sc->dev)); 4542 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4543 "status interrupt rate"); 4544 4545 KKASSERT(x == sc->msix_cnt); 4546 4547 error = pci_setup_msix(sc->dev); 4548 if (error) { 4549 device_printf(sc->dev, "Setup MSI-X failed\n"); 4550 goto back; 4551 } 4552 setup = TRUE; 4553 4554 for (i = 0; i < sc->msix_cnt; ++i) { 4555 msix = &sc->msix_data[i]; 4556 4557 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector, 4558 &msix->msix_rid, msix->msix_cpuid); 4559 if (error) { 4560 device_printf(sc->dev, 4561 "Unable to allocate MSI-X %d on cpu%d\n", 4562 msix->msix_vector, msix->msix_cpuid); 4563 goto back; 4564 } 4565 4566 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4567 &msix->msix_rid, RF_ACTIVE); 4568 if (msix->msix_res == NULL) { 4569 device_printf(sc->dev, 4570 "Unable to allocate MSI-X %d resource\n", 4571 msix->msix_vector); 4572 error = ENOMEM; 4573 goto back; 4574 } 4575 } 4576 4577 pci_enable_msix(sc->dev); 4578 sc->intr_type = PCI_INTR_TYPE_MSIX; 4579 back: 4580 if (error) 4581 igb_msix_free(sc, setup); 4582 } 4583 4584 static void 4585 igb_msix_free(struct igb_softc *sc, boolean_t setup) 4586 { 4587 int i; 4588 4589 KKASSERT(sc->msix_cnt > 1); 4590 4591 for (i = 0; i < sc->msix_cnt; ++i) { 4592 struct igb_msix_data *msix = &sc->msix_data[i]; 4593 4594 if (msix->msix_res != NULL) { 4595 bus_release_resource(sc->dev, SYS_RES_IRQ, 4596 msix->msix_rid, msix->msix_res); 4597 } 4598 if (msix->msix_rid >= 0) 4599 pci_release_msix_vector(sc->dev, msix->msix_rid); 4600 } 4601 if (setup) 4602 pci_teardown_msix(sc->dev); 4603 4604 sc->msix_cnt = 0; 4605 kfree(sc->msix_data, M_DEVBUF); 4606 sc->msix_data = NULL; 4607 } 4608 4609 static int 4610 igb_msix_setup(struct igb_softc *sc) 4611 { 4612 int i; 4613 4614 for (i = 0; i < sc->msix_cnt; ++i) { 4615 struct igb_msix_data *msix = &sc->msix_data[i]; 4616 int error; 4617 4618 error = bus_setup_intr_descr(sc->dev, msix->msix_res, 4619 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 4620 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 4621 if (error) { 4622 device_printf(sc->dev, "could not set up %s " 4623 "interrupt handler.\n", msix->msix_desc); 4624 igb_msix_teardown(sc, i); 4625 return error; 4626 } 4627 } 4628 return 0; 4629 } 4630 4631 static void 4632 igb_msix_teardown(struct igb_softc *sc, int msix_cnt) 4633 { 4634 int i; 4635 4636 for (i = 0; i < msix_cnt; ++i) { 4637 struct igb_msix_data *msix = &sc->msix_data[i]; 4638 4639 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle); 4640 } 4641 } 4642 4643 static void 4644 igb_msix_rx(void *arg) 4645 { 4646 struct igb_rx_ring *rxr = arg; 4647 4648 ASSERT_SERIALIZED(&rxr->rx_serialize); 4649 igb_rxeof(rxr, -1); 4650 4651 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4652 } 4653 4654 static void 4655 igb_msix_tx(void *arg) 4656 { 4657 struct igb_tx_ring *txr = arg; 4658 4659 ASSERT_SERIALIZED(&txr->tx_serialize); 4660 4661 igb_txeof(txr); 4662 if (!ifsq_is_empty(txr->ifsq)) 4663 ifsq_devstart(txr->ifsq); 4664 4665 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4666 } 4667 4668 static void 4669 igb_msix_status(void *arg) 4670 { 4671 struct igb_softc *sc = arg; 4672 uint32_t icr; 4673 4674 ASSERT_SERIALIZED(&sc->main_serialize); 4675 4676 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4677 if (icr & E1000_ICR_LSC) { 4678 sc->hw.mac.get_link_status = 1; 4679 igb_update_link_status(sc); 4680 } 4681 4682 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4683 } 4684 4685 static void 4686 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4687 { 4688 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4689 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4690 if (bootverbose) { 4691 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4692 sc->rx_ring_inuse, sc->rx_ring_cnt, 4693 sc->tx_ring_inuse, sc->tx_ring_cnt); 4694 } 4695 } 4696 4697 static int 4698 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4699 { 4700 if (!IGB_ENABLE_HWRSS(sc)) 4701 return 1; 4702 4703 if (polling) 4704 return sc->rx_ring_cnt; 4705 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4706 return IGB_MIN_RING_RSS; 4707 else 4708 return sc->rx_ring_msix; 4709 } 4710 4711 static int 4712 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4713 { 4714 if (!IGB_ENABLE_HWTSS(sc)) 4715 return 1; 4716 4717 if (polling) 4718 return sc->tx_ring_cnt; 4719 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4720 return IGB_MIN_RING; 4721 else 4722 return sc->tx_ring_msix; 4723 } 4724 4725 static int 4726 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4727 { 4728 int hoff, iphlen, thoff; 4729 struct mbuf *m; 4730 4731 m = *mp; 4732 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4733 4734 iphlen = m->m_pkthdr.csum_iphlen; 4735 thoff = m->m_pkthdr.csum_thlen; 4736 hoff = m->m_pkthdr.csum_lhlen; 4737 4738 KASSERT(iphlen > 0, ("invalid ip hlen")); 4739 KASSERT(thoff > 0, ("invalid tcp hlen")); 4740 KASSERT(hoff > 0, ("invalid ether hlen")); 4741 4742 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4743 m = m_pullup(m, hoff + iphlen + thoff); 4744 if (m == NULL) { 4745 *mp = NULL; 4746 return ENOBUFS; 4747 } 4748 *mp = m; 4749 } 4750 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4751 struct ip *ip; 4752 4753 ip = mtodoff(m, struct ip *, hoff); 4754 ip->ip_len = 0; 4755 } 4756 4757 return 0; 4758 } 4759 4760 static void 4761 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4762 { 4763 struct e1000_adv_tx_context_desc *TXD; 4764 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4765 int hoff, ctxd, iphlen, thoff; 4766 4767 iphlen = m->m_pkthdr.csum_iphlen; 4768 thoff = m->m_pkthdr.csum_thlen; 4769 hoff = m->m_pkthdr.csum_lhlen; 4770 4771 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4772 4773 ctxd = txr->next_avail_desc; 4774 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4775 4776 if (m->m_flags & M_VLANTAG) { 4777 uint16_t vlantag; 4778 4779 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4780 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4781 } 4782 4783 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4784 vlan_macip_lens |= iphlen; 4785 4786 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4787 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4788 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4789 4790 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4791 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4792 4793 /* 4794 * 82575 needs the TX context index added; the queue 4795 * index is used as TX context index here. 4796 */ 4797 if (txr->sc->hw.mac.type == e1000_82575) 4798 mss_l4len_idx |= txr->me << 4; 4799 4800 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4801 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4802 TXD->seqnum_seed = htole32(0); 4803 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4804 4805 /* We've consumed the first desc, adjust counters */ 4806 if (++ctxd == txr->num_tx_desc) 4807 ctxd = 0; 4808 txr->next_avail_desc = ctxd; 4809 --txr->tx_avail; 4810 4811 *hlen = hoff + iphlen + thoff; 4812 } 4813 4814 static void 4815 igb_setup_serializer(struct igb_softc *sc) 4816 { 4817 const struct igb_msix_data *msix; 4818 int i, j; 4819 4820 /* 4821 * Allocate serializer array 4822 */ 4823 4824 /* Main + TX + RX */ 4825 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt; 4826 4827 /* Aggregate TX/RX MSI-X */ 4828 for (i = 0; i < sc->msix_cnt; ++i) { 4829 msix = &sc->msix_data[i]; 4830 if (msix->msix_serialize == &msix->msix_serialize0) 4831 sc->serialize_cnt++; 4832 } 4833 4834 sc->serializes = 4835 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4836 M_DEVBUF, M_WAITOK | M_ZERO); 4837 4838 /* 4839 * Setup serializers 4840 * 4841 * NOTE: Order is critical 4842 */ 4843 4844 i = 0; 4845 4846 KKASSERT(i < sc->serialize_cnt); 4847 sc->serializes[i++] = &sc->main_serialize; 4848 4849 for (j = 0; j < sc->msix_cnt; ++j) { 4850 msix = &sc->msix_data[j]; 4851 if (msix->msix_serialize == &msix->msix_serialize0) { 4852 KKASSERT(i < sc->serialize_cnt); 4853 sc->serializes[i++] = msix->msix_serialize; 4854 } 4855 } 4856 4857 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4858 KKASSERT(i < sc->serialize_cnt); 4859 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4860 } 4861 4862 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4863 KKASSERT(i < sc->serialize_cnt); 4864 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4865 } 4866 4867 KKASSERT(i == sc->serialize_cnt); 4868 } 4869 4870 static void 4871 igb_msix_rx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4872 { 4873 int x = *x0; 4874 4875 for (; i < sc->rx_ring_msix; ++i) { 4876 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4877 struct igb_msix_data *msix; 4878 4879 KKASSERT(x < sc->msix_cnt); 4880 msix = &sc->msix_data[x++]; 4881 4882 rxr->rx_intr_bit = msix->msix_vector; 4883 rxr->rx_intr_mask = msix->msix_mask; 4884 4885 msix->msix_serialize = &rxr->rx_serialize; 4886 msix->msix_func = igb_msix_rx; 4887 msix->msix_arg = rxr; 4888 4889 msix->msix_cpuid = i + offset; 4890 KKASSERT(msix->msix_cpuid < ncpus2); 4891 4892 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s rx%d", 4893 device_get_nameunit(sc->dev), i); 4894 4895 msix->msix_rate = IGB_MSIX_RX_RATE; 4896 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4897 "RX%d interrupt rate", i); 4898 } 4899 *x0 = x; 4900 } 4901 4902 static void 4903 igb_msix_tx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4904 { 4905 int x = *x0; 4906 4907 for (; i < sc->tx_ring_msix; ++i) { 4908 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4909 struct igb_msix_data *msix; 4910 4911 KKASSERT(x < sc->msix_cnt); 4912 msix = &sc->msix_data[x++]; 4913 4914 txr->tx_intr_bit = msix->msix_vector; 4915 txr->tx_intr_mask = msix->msix_mask; 4916 4917 msix->msix_serialize = &txr->tx_serialize; 4918 msix->msix_func = igb_msix_tx; 4919 msix->msix_arg = txr; 4920 4921 msix->msix_cpuid = i + offset; 4922 KKASSERT(msix->msix_cpuid < ncpus2); 4923 txr->tx_intr_cpuid = msix->msix_cpuid; 4924 4925 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s tx%d", 4926 device_get_nameunit(sc->dev), i); 4927 4928 msix->msix_rate = IGB_MSIX_TX_RATE; 4929 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4930 "TX%d interrupt rate", i); 4931 } 4932 *x0 = x; 4933 } 4934 4935 static void 4936 igb_msix_rxtx(void *arg) 4937 { 4938 struct igb_msix_data *msix = arg; 4939 struct igb_rx_ring *rxr = msix->msix_rx; 4940 struct igb_tx_ring *txr = msix->msix_tx; 4941 4942 ASSERT_SERIALIZED(&msix->msix_serialize0); 4943 4944 lwkt_serialize_enter(&rxr->rx_serialize); 4945 igb_rxeof(rxr, -1); 4946 lwkt_serialize_exit(&rxr->rx_serialize); 4947 4948 lwkt_serialize_enter(&txr->tx_serialize); 4949 igb_txeof(txr); 4950 if (!ifsq_is_empty(txr->ifsq)) 4951 ifsq_devstart(txr->ifsq); 4952 lwkt_serialize_exit(&txr->tx_serialize); 4953 4954 E1000_WRITE_REG(&msix->msix_sc->hw, E1000_EIMS, msix->msix_mask); 4955 } 4956 4957 static void 4958 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4959 { 4960 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4961 sc->timer_cpuid = 0; /* XXX fixed */ 4962 else 4963 sc->timer_cpuid = rman_get_cpuid(sc->intr_res); 4964 } 4965