1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/if_ringmap.h> 59 #include <net/toeplitz.h> 60 #include <net/toeplitz2.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 #include <net/if_poll.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/ig_hal/e1000_dragonfly.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_COPPER_FLASHLESS), 128 IGB_DEVICE(I210_SERDES_FLASHLESS), 129 IGB_DEVICE(I210_FIBER), 130 IGB_DEVICE(I210_SERDES), 131 IGB_DEVICE(I210_SGMII), 132 IGB_DEVICE(I211_COPPER), 133 IGB_DEVICE(I354_BACKPLANE_1GBPS), 134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS), 135 IGB_DEVICE(I354_SGMII), 136 137 /* required last entry */ 138 IGB_DEVICE_NULL 139 }; 140 141 static int igb_probe(device_t); 142 static int igb_attach(device_t); 143 static int igb_detach(device_t); 144 static int igb_shutdown(device_t); 145 static int igb_suspend(device_t); 146 static int igb_resume(device_t); 147 148 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 149 static void igb_setup_ifp(struct igb_softc *); 150 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 151 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 152 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 153 static void igb_add_sysctl(struct igb_softc *); 154 static void igb_add_intr_rate_sysctl(struct igb_softc *, int, 155 const char *, const char *); 156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 160 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 161 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 162 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 163 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 164 165 static void igb_vf_init_stats(struct igb_softc *); 166 static void igb_reset(struct igb_softc *, boolean_t); 167 static void igb_update_stats_counters(struct igb_softc *); 168 static void igb_update_vf_stats_counters(struct igb_softc *); 169 static void igb_update_link_status(struct igb_softc *); 170 static void igb_init_tx_unit(struct igb_softc *); 171 static void igb_init_rx_unit(struct igb_softc *, boolean_t); 172 static void igb_init_dmac(struct igb_softc *, uint32_t); 173 174 static void igb_set_vlan(struct igb_softc *); 175 static void igb_set_multi(struct igb_softc *); 176 static void igb_set_promisc(struct igb_softc *); 177 static void igb_disable_promisc(struct igb_softc *); 178 179 static int igb_get_ring_max(const struct igb_softc *); 180 static void igb_get_rxring_cnt(const struct igb_softc *, int *, int *); 181 static void igb_get_txring_cnt(const struct igb_softc *, int *, int *); 182 static int igb_alloc_rings(struct igb_softc *); 183 static void igb_free_rings(struct igb_softc *); 184 static int igb_create_tx_ring(struct igb_tx_ring *); 185 static int igb_create_rx_ring(struct igb_rx_ring *); 186 static void igb_free_tx_ring(struct igb_tx_ring *); 187 static void igb_free_rx_ring(struct igb_rx_ring *); 188 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 189 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 190 static void igb_init_tx_ring(struct igb_tx_ring *); 191 static int igb_init_rx_ring(struct igb_rx_ring *); 192 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 193 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 194 static void igb_rx_refresh(struct igb_rx_ring *, int); 195 static void igb_setup_serialize(struct igb_softc *); 196 197 static void igb_stop(struct igb_softc *); 198 static void igb_init(void *); 199 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 200 static void igb_media_status(struct ifnet *, struct ifmediareq *); 201 static int igb_media_change(struct ifnet *); 202 static void igb_timer(void *); 203 static void igb_watchdog(struct ifaltq_subque *); 204 static void igb_start(struct ifnet *, struct ifaltq_subque *); 205 #ifdef IFPOLL_ENABLE 206 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 207 static void igb_npoll_rx(struct ifnet *, void *, int); 208 static void igb_npoll_tx(struct ifnet *, void *, int); 209 static void igb_npoll_status(struct ifnet *); 210 #endif 211 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 212 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 213 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 214 #ifdef INVARIANTS 215 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 216 boolean_t); 217 #endif 218 219 static void igb_intr(void *); 220 static void igb_intr_shared(void *); 221 static void igb_rxeof(struct igb_rx_ring *, int); 222 static void igb_txeof(struct igb_tx_ring *, int); 223 static void igb_set_eitr(struct igb_softc *, int, int); 224 static void igb_enable_intr(struct igb_softc *); 225 static void igb_disable_intr(struct igb_softc *); 226 static void igb_init_unshared_intr(struct igb_softc *); 227 static void igb_init_intr(struct igb_softc *); 228 static int igb_setup_intr(struct igb_softc *); 229 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 230 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 231 static void igb_set_intr_mask(struct igb_softc *); 232 static int igb_alloc_intr(struct igb_softc *); 233 static void igb_free_intr(struct igb_softc *); 234 static void igb_teardown_intr(struct igb_softc *, int); 235 static void igb_alloc_msix(struct igb_softc *); 236 static void igb_free_msix(struct igb_softc *, boolean_t); 237 static void igb_msix_rx(void *); 238 static void igb_msix_tx(void *); 239 static void igb_msix_status(void *); 240 static void igb_msix_rxtx(void *); 241 242 /* Management and WOL Support */ 243 static void igb_get_mgmt(struct igb_softc *); 244 static void igb_rel_mgmt(struct igb_softc *); 245 static void igb_get_hw_control(struct igb_softc *); 246 static void igb_rel_hw_control(struct igb_softc *); 247 static void igb_enable_wol(device_t); 248 249 static device_method_t igb_methods[] = { 250 /* Device interface */ 251 DEVMETHOD(device_probe, igb_probe), 252 DEVMETHOD(device_attach, igb_attach), 253 DEVMETHOD(device_detach, igb_detach), 254 DEVMETHOD(device_shutdown, igb_shutdown), 255 DEVMETHOD(device_suspend, igb_suspend), 256 DEVMETHOD(device_resume, igb_resume), 257 DEVMETHOD_END 258 }; 259 260 static driver_t igb_driver = { 261 "igb", 262 igb_methods, 263 sizeof(struct igb_softc), 264 }; 265 266 static devclass_t igb_devclass; 267 268 DECLARE_DUMMY_MODULE(if_igb); 269 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 270 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 271 272 static int igb_rxd = IGB_DEFAULT_RXD; 273 static int igb_txd = IGB_DEFAULT_TXD; 274 static int igb_rxr = 0; 275 static int igb_txr = 0; 276 static int igb_msi_enable = 1; 277 static int igb_msix_enable = 1; 278 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 279 280 static char igb_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 281 282 /* 283 * DMA Coalescing, only for i350 - default to off, 284 * this feature is for power savings 285 */ 286 static int igb_dma_coalesce = 0; 287 288 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 289 TUNABLE_INT("hw.igb.txd", &igb_txd); 290 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 291 TUNABLE_INT("hw.igb.txr", &igb_txr); 292 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 293 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 294 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl, sizeof(igb_flowctrl)); 295 296 /* i350 specific */ 297 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 298 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 299 300 static __inline void 301 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 302 { 303 /* Ignore Checksum bit is set */ 304 if (staterr & E1000_RXD_STAT_IXSM) 305 return; 306 307 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 308 E1000_RXD_STAT_IPCS) 309 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 310 311 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 312 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 313 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 314 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 315 mp->m_pkthdr.csum_data = htons(0xffff); 316 } 317 } 318 } 319 320 static __inline struct pktinfo * 321 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 322 uint32_t hash, uint32_t hashtype, uint32_t staterr) 323 { 324 switch (hashtype) { 325 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 326 pi->pi_netisr = NETISR_IP; 327 pi->pi_flags = 0; 328 pi->pi_l3proto = IPPROTO_TCP; 329 break; 330 331 case E1000_RXDADV_RSSTYPE_IPV4: 332 if (staterr & E1000_RXD_STAT_IXSM) 333 return NULL; 334 335 if ((staterr & 336 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 337 E1000_RXD_STAT_TCPCS) { 338 pi->pi_netisr = NETISR_IP; 339 pi->pi_flags = 0; 340 pi->pi_l3proto = IPPROTO_UDP; 341 break; 342 } 343 /* FALL THROUGH */ 344 default: 345 return NULL; 346 } 347 348 m_sethash(m, toeplitz_hash(hash)); 349 return pi; 350 } 351 352 static int 353 igb_get_ring_max(const struct igb_softc *sc) 354 { 355 356 switch (sc->hw.mac.type) { 357 case e1000_82575: 358 return (IGB_MAX_RING_82575); 359 360 case e1000_82576: 361 return (IGB_MAX_RING_82576); 362 363 case e1000_82580: 364 return (IGB_MAX_RING_82580); 365 366 case e1000_i350: 367 return (IGB_MAX_RING_I350); 368 369 case e1000_i354: 370 return (IGB_MAX_RING_I354); 371 372 case e1000_i210: 373 return (IGB_MAX_RING_I210); 374 375 case e1000_i211: 376 return (IGB_MAX_RING_I211); 377 378 default: 379 return (IGB_MIN_RING); 380 } 381 } 382 383 static void 384 igb_get_rxring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 385 { 386 387 *ring_max = igb_get_ring_max(sc); 388 *ring_cnt = device_getenv_int(sc->dev, "rxr", igb_rxr); 389 } 390 391 static void 392 igb_get_txring_cnt(const struct igb_softc *sc, int *ring_cnt, int *ring_max) 393 { 394 395 *ring_max = igb_get_ring_max(sc); 396 *ring_cnt = device_getenv_int(sc->dev, "txr", igb_txr); 397 } 398 399 static int 400 igb_probe(device_t dev) 401 { 402 const struct igb_device *d; 403 uint16_t vid, did; 404 405 vid = pci_get_vendor(dev); 406 did = pci_get_device(dev); 407 408 for (d = igb_devices; d->desc != NULL; ++d) { 409 if (vid == d->vid && did == d->did) { 410 device_set_desc(dev, d->desc); 411 return 0; 412 } 413 } 414 return ENXIO; 415 } 416 417 static int 418 igb_attach(device_t dev) 419 { 420 struct igb_softc *sc = device_get_softc(dev); 421 uint16_t eeprom_data; 422 int error = 0, ring_max, ring_cnt; 423 char flowctrl[IFM_ETH_FC_STRLEN]; 424 425 #ifdef notyet 426 /* SYSCTL stuff */ 427 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 429 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 430 igb_sysctl_nvm_info, "I", "NVM Information"); 431 #endif 432 433 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 434 igb_media_change, igb_media_status); 435 callout_init_mp(&sc->timer); 436 lwkt_serialize_init(&sc->main_serialize); 437 438 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 439 device_get_unit(dev)); 440 sc->dev = sc->osdep.dev = dev; 441 442 /* Enable bus mastering */ 443 pci_enable_busmaster(dev); 444 445 /* 446 * Determine hardware and mac type 447 */ 448 sc->hw.vendor_id = pci_get_vendor(dev); 449 sc->hw.device_id = pci_get_device(dev); 450 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 451 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 452 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 453 454 if (e1000_set_mac_type(&sc->hw)) 455 return ENXIO; 456 457 /* Are we a VF device? */ 458 if (sc->hw.mac.type == e1000_vfadapt || 459 sc->hw.mac.type == e1000_vfadapt_i350) 460 sc->vf_ifp = 1; 461 else 462 sc->vf_ifp = 0; 463 464 /* 465 * Configure total supported RX/TX ring count 466 */ 467 igb_get_rxring_cnt(sc, &ring_cnt, &ring_max); 468 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 469 igb_get_txring_cnt(sc, &ring_cnt, &ring_max); 470 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, ring_max); 471 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 472 473 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 474 sc->rx_ring_inuse = sc->rx_ring_cnt; 475 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 476 sc->tx_ring_inuse = sc->tx_ring_cnt; 477 478 /* Setup flow control. */ 479 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 480 igb_flowctrl); 481 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 482 483 /* 484 * Allocate IO memory 485 */ 486 sc->mem_rid = PCIR_BAR(0); 487 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 488 RF_ACTIVE); 489 if (sc->mem_res == NULL) { 490 device_printf(dev, "Unable to allocate bus resource: memory\n"); 491 error = ENXIO; 492 goto failed; 493 } 494 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 495 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 496 497 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 498 499 /* Save PCI command register for Shared Code */ 500 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 501 sc->hw.back = &sc->osdep; 502 503 /* Do Shared Code initialization */ 504 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 505 device_printf(dev, "Setup of Shared code failed\n"); 506 error = ENXIO; 507 goto failed; 508 } 509 510 e1000_get_bus_info(&sc->hw); 511 512 sc->hw.mac.autoneg = DO_AUTO_NEG; 513 sc->hw.phy.autoneg_wait_to_complete = FALSE; 514 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 515 516 /* Copper options */ 517 if (sc->hw.phy.media_type == e1000_media_type_copper) { 518 sc->hw.phy.mdix = AUTO_ALL_MODES; 519 sc->hw.phy.disable_polarity_correction = FALSE; 520 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 521 } 522 523 /* Set the frame limits assuming standard ethernet sized frames. */ 524 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 525 526 /* Allocate RX/TX rings */ 527 error = igb_alloc_rings(sc); 528 if (error) 529 goto failed; 530 531 /* Allocate interrupt */ 532 error = igb_alloc_intr(sc); 533 if (error) 534 goto failed; 535 536 /* Setup serializes */ 537 igb_setup_serialize(sc); 538 539 /* Allocate the appropriate stats memory */ 540 if (sc->vf_ifp) { 541 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 542 M_WAITOK | M_ZERO); 543 igb_vf_init_stats(sc); 544 } else { 545 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 546 M_WAITOK | M_ZERO); 547 } 548 549 /* Allocate multicast array memory. */ 550 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 551 M_DEVBUF, M_WAITOK); 552 553 /* Some adapter-specific advanced features */ 554 if (sc->hw.mac.type >= e1000_i350) { 555 #ifdef notyet 556 igb_set_sysctl_value(adapter, "dma_coalesce", 557 "configure dma coalesce", 558 &adapter->dma_coalesce, igb_dma_coalesce); 559 igb_set_sysctl_value(adapter, "eee_disabled", 560 "enable Energy Efficient Ethernet", 561 &adapter->hw.dev_spec._82575.eee_disable, 562 igb_eee_disabled); 563 #else 564 sc->dma_coalesce = igb_dma_coalesce; 565 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 566 #endif 567 if (sc->hw.phy.media_type == e1000_media_type_copper) { 568 if (sc->hw.mac.type == e1000_i354) 569 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 570 else 571 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 572 } 573 } 574 575 /* 576 * Start from a known state, this is important in reading the nvm and 577 * mac from that. 578 */ 579 e1000_reset_hw(&sc->hw); 580 581 /* Make sure we have a good EEPROM before we read from it */ 582 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 583 e1000_validate_nvm_checksum(&sc->hw) < 0) { 584 /* 585 * Some PCI-E parts fail the first check due to 586 * the link being in sleep state, call it again, 587 * if it fails a second time its a real issue. 588 */ 589 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 590 device_printf(dev, 591 "The EEPROM Checksum Is Not Valid\n"); 592 error = EIO; 593 goto failed; 594 } 595 } 596 597 /* Copy the permanent MAC address out of the EEPROM */ 598 if (e1000_read_mac_addr(&sc->hw) < 0) { 599 device_printf(dev, "EEPROM read error while reading MAC" 600 " address\n"); 601 error = EIO; 602 goto failed; 603 } 604 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 605 device_printf(dev, "Invalid MAC address\n"); 606 error = EIO; 607 goto failed; 608 } 609 610 /* Setup OS specific network interface */ 611 igb_setup_ifp(sc); 612 613 /* Add sysctl tree, must after igb_setup_ifp() */ 614 igb_add_sysctl(sc); 615 616 /* Now get a good starting state */ 617 igb_reset(sc, FALSE); 618 619 /* Initialize statistics */ 620 igb_update_stats_counters(sc); 621 622 sc->hw.mac.get_link_status = 1; 623 igb_update_link_status(sc); 624 625 /* Indicate SOL/IDER usage */ 626 if (e1000_check_reset_block(&sc->hw)) { 627 device_printf(dev, 628 "PHY reset is blocked due to SOL/IDER session.\n"); 629 } 630 631 /* Determine if we have to control management hardware */ 632 if (e1000_enable_mng_pass_thru(&sc->hw)) 633 sc->flags |= IGB_FLAG_HAS_MGMT; 634 635 /* 636 * Setup Wake-on-Lan 637 */ 638 /* APME bit in EEPROM is mapped to WUC.APME */ 639 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 640 if (eeprom_data) 641 sc->wol = E1000_WUFC_MAG; 642 /* XXX disable WOL */ 643 sc->wol = 0; 644 645 #ifdef notyet 646 /* Register for VLAN events */ 647 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 648 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 649 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 650 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 651 #endif 652 653 #ifdef notyet 654 igb_add_hw_stats(adapter); 655 #endif 656 657 /* 658 * Disable interrupt to prevent spurious interrupts (line based 659 * interrupt, MSI or even MSI-X), which had been observed on 660 * several types of LOMs, from being handled. 661 */ 662 igb_disable_intr(sc); 663 664 error = igb_setup_intr(sc); 665 if (error) { 666 ether_ifdetach(&sc->arpcom.ac_if); 667 goto failed; 668 } 669 return 0; 670 671 failed: 672 igb_detach(dev); 673 return error; 674 } 675 676 static int 677 igb_detach(device_t dev) 678 { 679 struct igb_softc *sc = device_get_softc(dev); 680 681 if (device_is_attached(dev)) { 682 struct ifnet *ifp = &sc->arpcom.ac_if; 683 684 ifnet_serialize_all(ifp); 685 686 igb_stop(sc); 687 688 e1000_phy_hw_reset(&sc->hw); 689 690 /* Give control back to firmware */ 691 igb_rel_mgmt(sc); 692 igb_rel_hw_control(sc); 693 694 if (sc->wol) { 695 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 696 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 697 igb_enable_wol(dev); 698 } 699 700 igb_teardown_intr(sc, sc->intr_cnt); 701 702 ifnet_deserialize_all(ifp); 703 704 ether_ifdetach(ifp); 705 } else if (sc->mem_res != NULL) { 706 igb_rel_hw_control(sc); 707 } 708 709 ifmedia_removeall(&sc->media); 710 bus_generic_detach(dev); 711 712 igb_free_intr(sc); 713 714 if (sc->msix_mem_res != NULL) { 715 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 716 sc->msix_mem_res); 717 } 718 if (sc->mem_res != NULL) { 719 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 720 sc->mem_res); 721 } 722 723 igb_free_rings(sc); 724 725 if (sc->mta != NULL) 726 kfree(sc->mta, M_DEVBUF); 727 if (sc->stats != NULL) 728 kfree(sc->stats, M_DEVBUF); 729 if (sc->serializes != NULL) 730 kfree(sc->serializes, M_DEVBUF); 731 if (sc->rx_rmap != NULL) 732 if_ringmap_free(sc->rx_rmap); 733 if (sc->rx_rmap_intr != NULL) 734 if_ringmap_free(sc->rx_rmap_intr); 735 if (sc->tx_rmap != NULL) 736 if_ringmap_free(sc->tx_rmap); 737 if (sc->tx_rmap_intr != NULL) 738 if_ringmap_free(sc->tx_rmap_intr); 739 740 return 0; 741 } 742 743 static int 744 igb_shutdown(device_t dev) 745 { 746 return igb_suspend(dev); 747 } 748 749 static int 750 igb_suspend(device_t dev) 751 { 752 struct igb_softc *sc = device_get_softc(dev); 753 struct ifnet *ifp = &sc->arpcom.ac_if; 754 755 ifnet_serialize_all(ifp); 756 757 igb_stop(sc); 758 759 igb_rel_mgmt(sc); 760 igb_rel_hw_control(sc); 761 762 if (sc->wol) { 763 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 764 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 765 igb_enable_wol(dev); 766 } 767 768 ifnet_deserialize_all(ifp); 769 770 return bus_generic_suspend(dev); 771 } 772 773 static int 774 igb_resume(device_t dev) 775 { 776 struct igb_softc *sc = device_get_softc(dev); 777 struct ifnet *ifp = &sc->arpcom.ac_if; 778 int i; 779 780 ifnet_serialize_all(ifp); 781 782 igb_init(sc); 783 igb_get_mgmt(sc); 784 785 for (i = 0; i < sc->tx_ring_inuse; ++i) 786 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 787 788 ifnet_deserialize_all(ifp); 789 790 return bus_generic_resume(dev); 791 } 792 793 static int 794 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 795 { 796 struct igb_softc *sc = ifp->if_softc; 797 struct ifreq *ifr = (struct ifreq *)data; 798 int max_frame_size, mask, reinit; 799 int error = 0; 800 801 ASSERT_IFNET_SERIALIZED_ALL(ifp); 802 803 switch (command) { 804 case SIOCSIFMTU: 805 max_frame_size = 9234; 806 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 807 ETHER_CRC_LEN) { 808 error = EINVAL; 809 break; 810 } 811 812 ifp->if_mtu = ifr->ifr_mtu; 813 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 814 ETHER_CRC_LEN; 815 816 if (ifp->if_flags & IFF_RUNNING) 817 igb_init(sc); 818 break; 819 820 case SIOCSIFFLAGS: 821 if (ifp->if_flags & IFF_UP) { 822 if (ifp->if_flags & IFF_RUNNING) { 823 if ((ifp->if_flags ^ sc->if_flags) & 824 (IFF_PROMISC | IFF_ALLMULTI)) { 825 igb_disable_promisc(sc); 826 igb_set_promisc(sc); 827 } 828 } else { 829 igb_init(sc); 830 } 831 } else if (ifp->if_flags & IFF_RUNNING) { 832 igb_stop(sc); 833 } 834 sc->if_flags = ifp->if_flags; 835 break; 836 837 case SIOCADDMULTI: 838 case SIOCDELMULTI: 839 if (ifp->if_flags & IFF_RUNNING) { 840 igb_disable_intr(sc); 841 igb_set_multi(sc); 842 #ifdef IFPOLL_ENABLE 843 if (!(ifp->if_flags & IFF_NPOLLING)) 844 #endif 845 igb_enable_intr(sc); 846 } 847 break; 848 849 case SIOCSIFMEDIA: 850 /* Check SOL/IDER usage */ 851 if (e1000_check_reset_block(&sc->hw)) { 852 if_printf(ifp, "Media change is " 853 "blocked due to SOL/IDER session.\n"); 854 break; 855 } 856 /* FALL THROUGH */ 857 858 case SIOCGIFMEDIA: 859 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 860 break; 861 862 case SIOCSIFCAP: 863 reinit = 0; 864 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 865 if (mask & IFCAP_RXCSUM) { 866 ifp->if_capenable ^= IFCAP_RXCSUM; 867 reinit = 1; 868 } 869 if (mask & IFCAP_VLAN_HWTAGGING) { 870 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 871 reinit = 1; 872 } 873 if (mask & IFCAP_TXCSUM) { 874 ifp->if_capenable ^= IFCAP_TXCSUM; 875 if (ifp->if_capenable & IFCAP_TXCSUM) 876 ifp->if_hwassist |= IGB_CSUM_FEATURES; 877 else 878 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 879 } 880 if (mask & IFCAP_TSO) { 881 ifp->if_capenable ^= IFCAP_TSO; 882 if (ifp->if_capenable & IFCAP_TSO) 883 ifp->if_hwassist |= CSUM_TSO; 884 else 885 ifp->if_hwassist &= ~CSUM_TSO; 886 } 887 if (mask & IFCAP_RSS) 888 ifp->if_capenable ^= IFCAP_RSS; 889 if (reinit && (ifp->if_flags & IFF_RUNNING)) 890 igb_init(sc); 891 break; 892 893 default: 894 error = ether_ioctl(ifp, command, data); 895 break; 896 } 897 return error; 898 } 899 900 static void 901 igb_init(void *xsc) 902 { 903 struct igb_softc *sc = xsc; 904 struct ifnet *ifp = &sc->arpcom.ac_if; 905 boolean_t polling; 906 int i; 907 908 ASSERT_IFNET_SERIALIZED_ALL(ifp); 909 910 igb_stop(sc); 911 912 /* Get the latest mac address, User can use a LAA */ 913 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 914 915 /* Put the address into the Receive Address Array */ 916 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 917 918 igb_reset(sc, FALSE); 919 igb_update_link_status(sc); 920 921 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 922 923 /* Configure for OS presence */ 924 igb_get_mgmt(sc); 925 926 polling = FALSE; 927 #ifdef IFPOLL_ENABLE 928 if (ifp->if_flags & IFF_NPOLLING) 929 polling = TRUE; 930 #endif 931 932 /* Configured used RX/TX rings */ 933 igb_set_ring_inuse(sc, polling); 934 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 935 936 /* Initialize interrupt */ 937 igb_init_intr(sc); 938 939 /* Prepare transmit descriptors and buffers */ 940 for (i = 0; i < sc->tx_ring_inuse; ++i) 941 igb_init_tx_ring(&sc->tx_rings[i]); 942 igb_init_tx_unit(sc); 943 944 /* Setup Multicast table */ 945 igb_set_multi(sc); 946 947 #if 0 948 /* 949 * Figure out the desired mbuf pool 950 * for doing jumbo/packetsplit 951 */ 952 if (adapter->max_frame_size <= 2048) 953 adapter->rx_mbuf_sz = MCLBYTES; 954 else if (adapter->max_frame_size <= 4096) 955 adapter->rx_mbuf_sz = MJUMPAGESIZE; 956 else 957 adapter->rx_mbuf_sz = MJUM9BYTES; 958 #endif 959 960 /* Prepare receive descriptors and buffers */ 961 for (i = 0; i < sc->rx_ring_inuse; ++i) { 962 int error; 963 964 error = igb_init_rx_ring(&sc->rx_rings[i]); 965 if (error) { 966 if_printf(ifp, "Could not setup receive structures\n"); 967 igb_stop(sc); 968 return; 969 } 970 } 971 igb_init_rx_unit(sc, polling); 972 973 /* Enable VLAN support */ 974 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 975 igb_set_vlan(sc); 976 977 /* Don't lose promiscuous settings */ 978 igb_set_promisc(sc); 979 980 ifp->if_flags |= IFF_RUNNING; 981 for (i = 0; i < sc->tx_ring_inuse; ++i) { 982 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 983 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 984 } 985 986 igb_set_timer_cpuid(sc, polling); 987 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 988 e1000_clear_hw_cntrs_base_generic(&sc->hw); 989 990 /* This clears any pending interrupts */ 991 E1000_READ_REG(&sc->hw, E1000_ICR); 992 993 /* 994 * Only enable interrupts if we are not polling, make sure 995 * they are off otherwise. 996 */ 997 if (polling) { 998 igb_disable_intr(sc); 999 } else { 1000 igb_enable_intr(sc); 1001 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1002 } 1003 1004 /* Set Energy Efficient Ethernet */ 1005 if (sc->hw.phy.media_type == e1000_media_type_copper) { 1006 if (sc->hw.mac.type == e1000_i354) 1007 e1000_set_eee_i354(&sc->hw, TRUE, TRUE); 1008 else 1009 e1000_set_eee_i350(&sc->hw, TRUE, TRUE); 1010 } 1011 } 1012 1013 static void 1014 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1015 { 1016 struct igb_softc *sc = ifp->if_softc; 1017 1018 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1019 1020 if ((ifp->if_flags & IFF_RUNNING) == 0) 1021 sc->hw.mac.get_link_status = 1; 1022 igb_update_link_status(sc); 1023 1024 ifmr->ifm_status = IFM_AVALID; 1025 ifmr->ifm_active = IFM_ETHER; 1026 1027 if (!sc->link_active) { 1028 if (sc->hw.mac.autoneg) 1029 ifmr->ifm_active |= IFM_NONE; 1030 else 1031 ifmr->ifm_active |= sc->media.ifm_media; 1032 return; 1033 } 1034 1035 ifmr->ifm_status |= IFM_ACTIVE; 1036 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1037 ifmr->ifm_active |= sc->ifm_flowctrl; 1038 1039 switch (sc->link_speed) { 1040 case 10: 1041 ifmr->ifm_active |= IFM_10_T; 1042 break; 1043 1044 case 100: 1045 /* 1046 * Support for 100Mb SFP - these are Fiber 1047 * but the media type appears as serdes 1048 */ 1049 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1050 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1051 ifmr->ifm_active |= IFM_100_FX; 1052 else 1053 ifmr->ifm_active |= IFM_100_TX; 1054 break; 1055 1056 case 1000: 1057 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1058 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1059 ifmr->ifm_active |= IFM_1000_SX; 1060 else 1061 ifmr->ifm_active |= IFM_1000_T; 1062 break; 1063 1064 case 2500: 1065 ifmr->ifm_active |= IFM_2500_SX; 1066 break; 1067 } 1068 1069 if (sc->link_duplex == FULL_DUPLEX) 1070 ifmr->ifm_active |= IFM_FDX; 1071 else 1072 ifmr->ifm_active |= IFM_HDX; 1073 1074 if (sc->link_duplex == FULL_DUPLEX) 1075 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1076 } 1077 1078 static int 1079 igb_media_change(struct ifnet *ifp) 1080 { 1081 struct igb_softc *sc = ifp->if_softc; 1082 struct ifmedia *ifm = &sc->media; 1083 1084 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1085 1086 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1087 return EINVAL; 1088 1089 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1090 case IFM_AUTO: 1091 sc->hw.mac.autoneg = DO_AUTO_NEG; 1092 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1093 break; 1094 1095 case IFM_1000_SX: 1096 case IFM_1000_T: 1097 sc->hw.mac.autoneg = DO_AUTO_NEG; 1098 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1099 break; 1100 1101 case IFM_100_TX: 1102 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1103 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1104 } else { 1105 if (IFM_OPTIONS(ifm->ifm_media) & 1106 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1107 if (bootverbose) { 1108 if_printf(ifp, "Flow control is not " 1109 "allowed for half-duplex\n"); 1110 } 1111 return EINVAL; 1112 } 1113 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1114 } 1115 sc->hw.mac.autoneg = FALSE; 1116 sc->hw.phy.autoneg_advertised = 0; 1117 break; 1118 1119 case IFM_10_T: 1120 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1121 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1122 } else { 1123 if (IFM_OPTIONS(ifm->ifm_media) & 1124 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1125 if (bootverbose) { 1126 if_printf(ifp, "Flow control is not " 1127 "allowed for half-duplex\n"); 1128 } 1129 return EINVAL; 1130 } 1131 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1132 } 1133 sc->hw.mac.autoneg = FALSE; 1134 sc->hw.phy.autoneg_advertised = 0; 1135 break; 1136 1137 default: 1138 if (bootverbose) { 1139 if_printf(ifp, "Unsupported media type %d\n", 1140 IFM_SUBTYPE(ifm->ifm_media)); 1141 } 1142 return EINVAL; 1143 } 1144 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1145 1146 if (ifp->if_flags & IFF_RUNNING) 1147 igb_init(sc); 1148 1149 return 0; 1150 } 1151 1152 static void 1153 igb_set_promisc(struct igb_softc *sc) 1154 { 1155 struct ifnet *ifp = &sc->arpcom.ac_if; 1156 struct e1000_hw *hw = &sc->hw; 1157 uint32_t reg; 1158 1159 if (sc->vf_ifp) { 1160 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1161 return; 1162 } 1163 1164 reg = E1000_READ_REG(hw, E1000_RCTL); 1165 if (ifp->if_flags & IFF_PROMISC) { 1166 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1167 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1168 } else if (ifp->if_flags & IFF_ALLMULTI) { 1169 reg |= E1000_RCTL_MPE; 1170 reg &= ~E1000_RCTL_UPE; 1171 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1172 } 1173 } 1174 1175 static void 1176 igb_disable_promisc(struct igb_softc *sc) 1177 { 1178 struct e1000_hw *hw = &sc->hw; 1179 struct ifnet *ifp = &sc->arpcom.ac_if; 1180 uint32_t reg; 1181 int mcnt = 0; 1182 1183 if (sc->vf_ifp) { 1184 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1185 return; 1186 } 1187 reg = E1000_READ_REG(hw, E1000_RCTL); 1188 reg &= ~E1000_RCTL_UPE; 1189 if (ifp->if_flags & IFF_ALLMULTI) { 1190 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1191 } else { 1192 struct ifmultiaddr *ifma; 1193 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1194 if (ifma->ifma_addr->sa_family != AF_LINK) 1195 continue; 1196 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1197 break; 1198 mcnt++; 1199 } 1200 } 1201 /* Don't disable if in MAX groups */ 1202 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1203 reg &= ~E1000_RCTL_MPE; 1204 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1205 } 1206 1207 static void 1208 igb_set_multi(struct igb_softc *sc) 1209 { 1210 struct ifnet *ifp = &sc->arpcom.ac_if; 1211 struct ifmultiaddr *ifma; 1212 uint32_t reg_rctl = 0; 1213 uint8_t *mta; 1214 int mcnt = 0; 1215 1216 mta = sc->mta; 1217 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1218 1219 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1220 if (ifma->ifma_addr->sa_family != AF_LINK) 1221 continue; 1222 1223 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1224 break; 1225 1226 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1227 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1228 mcnt++; 1229 } 1230 1231 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1232 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1233 reg_rctl |= E1000_RCTL_MPE; 1234 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1235 } else { 1236 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1237 } 1238 } 1239 1240 static void 1241 igb_timer(void *xsc) 1242 { 1243 struct igb_softc *sc = xsc; 1244 1245 lwkt_serialize_enter(&sc->main_serialize); 1246 1247 igb_update_link_status(sc); 1248 igb_update_stats_counters(sc); 1249 1250 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1251 1252 lwkt_serialize_exit(&sc->main_serialize); 1253 } 1254 1255 static void 1256 igb_update_link_status(struct igb_softc *sc) 1257 { 1258 struct ifnet *ifp = &sc->arpcom.ac_if; 1259 struct e1000_hw *hw = &sc->hw; 1260 uint32_t link_check, thstat, ctrl; 1261 1262 link_check = thstat = ctrl = 0; 1263 1264 /* Get the cached link value or read for real */ 1265 switch (hw->phy.media_type) { 1266 case e1000_media_type_copper: 1267 if (hw->mac.get_link_status) { 1268 /* Do the work to read phy */ 1269 e1000_check_for_link(hw); 1270 link_check = !hw->mac.get_link_status; 1271 } else { 1272 link_check = TRUE; 1273 } 1274 break; 1275 1276 case e1000_media_type_fiber: 1277 e1000_check_for_link(hw); 1278 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1279 break; 1280 1281 case e1000_media_type_internal_serdes: 1282 e1000_check_for_link(hw); 1283 link_check = hw->mac.serdes_has_link; 1284 break; 1285 1286 /* VF device is type_unknown */ 1287 case e1000_media_type_unknown: 1288 e1000_check_for_link(hw); 1289 link_check = !hw->mac.get_link_status; 1290 /* Fall thru */ 1291 default: 1292 break; 1293 } 1294 1295 /* Check for thermal downshift or shutdown */ 1296 if (hw->mac.type == e1000_i350) { 1297 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1298 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1299 } 1300 1301 /* Now we check if a transition has happened */ 1302 if (link_check && sc->link_active == 0) { 1303 e1000_get_speed_and_duplex(hw, 1304 &sc->link_speed, &sc->link_duplex); 1305 if (bootverbose) { 1306 char flowctrl[IFM_ETH_FC_STRLEN]; 1307 1308 /* Get the flow control for display */ 1309 e1000_fc2str(hw->fc.current_mode, flowctrl, 1310 sizeof(flowctrl)); 1311 1312 if_printf(ifp, "Link is up %d Mbps %s, " 1313 "Flow control: %s\n", 1314 sc->link_speed, 1315 sc->link_duplex == FULL_DUPLEX ? 1316 "Full Duplex" : "Half Duplex", 1317 flowctrl); 1318 } 1319 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1320 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1321 sc->link_active = 1; 1322 1323 ifp->if_baudrate = sc->link_speed * 1000000; 1324 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1325 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1326 if_printf(ifp, "Link: thermal downshift\n"); 1327 /* Delay Link Up for Phy update */ 1328 if ((hw->mac.type == e1000_i210 || 1329 hw->mac.type == e1000_i211) && 1330 hw->phy.id == I210_I_PHY_ID) 1331 msec_delay(IGB_I210_LINK_DELAY); 1332 /* 1333 * Reset if the media type changed. 1334 * Support AutoMediaDetect for Marvell M88 PHY in i354. 1335 */ 1336 if (hw->dev_spec._82575.media_changed) { 1337 hw->dev_spec._82575.media_changed = FALSE; 1338 igb_reset(sc, TRUE); 1339 } 1340 /* This can sleep */ 1341 ifp->if_link_state = LINK_STATE_UP; 1342 if_link_state_change(ifp); 1343 } else if (!link_check && sc->link_active == 1) { 1344 ifp->if_baudrate = sc->link_speed = 0; 1345 sc->link_duplex = 0; 1346 if (bootverbose) 1347 if_printf(ifp, "Link is Down\n"); 1348 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1349 (thstat & E1000_THSTAT_PWR_DOWN)) 1350 if_printf(ifp, "Link: thermal shutdown\n"); 1351 sc->link_active = 0; 1352 /* This can sleep */ 1353 ifp->if_link_state = LINK_STATE_DOWN; 1354 if_link_state_change(ifp); 1355 } 1356 } 1357 1358 static void 1359 igb_stop(struct igb_softc *sc) 1360 { 1361 struct ifnet *ifp = &sc->arpcom.ac_if; 1362 int i; 1363 1364 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1365 1366 igb_disable_intr(sc); 1367 1368 callout_stop(&sc->timer); 1369 1370 ifp->if_flags &= ~IFF_RUNNING; 1371 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1372 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1373 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 1374 sc->tx_rings[i].tx_flags &= ~IGB_TXFLAG_ENABLED; 1375 } 1376 1377 e1000_reset_hw(&sc->hw); 1378 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1379 1380 e1000_led_off(&sc->hw); 1381 e1000_cleanup_led(&sc->hw); 1382 1383 for (i = 0; i < sc->tx_ring_cnt; ++i) 1384 igb_free_tx_ring(&sc->tx_rings[i]); 1385 for (i = 0; i < sc->rx_ring_cnt; ++i) 1386 igb_free_rx_ring(&sc->rx_rings[i]); 1387 } 1388 1389 static void 1390 igb_reset(struct igb_softc *sc, boolean_t media_reset) 1391 { 1392 struct ifnet *ifp = &sc->arpcom.ac_if; 1393 struct e1000_hw *hw = &sc->hw; 1394 struct e1000_fc_info *fc = &hw->fc; 1395 uint32_t pba = 0; 1396 uint16_t hwm; 1397 1398 /* Let the firmware know the OS is in control */ 1399 igb_get_hw_control(sc); 1400 1401 /* 1402 * Packet Buffer Allocation (PBA) 1403 * Writing PBA sets the receive portion of the buffer 1404 * the remainder is used for the transmit buffer. 1405 */ 1406 switch (hw->mac.type) { 1407 case e1000_82575: 1408 pba = E1000_PBA_32K; 1409 break; 1410 1411 case e1000_82576: 1412 case e1000_vfadapt: 1413 pba = E1000_READ_REG(hw, E1000_RXPBS); 1414 pba &= E1000_RXPBS_SIZE_MASK_82576; 1415 break; 1416 1417 case e1000_82580: 1418 case e1000_i350: 1419 case e1000_i354: 1420 case e1000_vfadapt_i350: 1421 pba = E1000_READ_REG(hw, E1000_RXPBS); 1422 pba = e1000_rxpbs_adjust_82580(pba); 1423 break; 1424 1425 case e1000_i210: 1426 case e1000_i211: 1427 pba = E1000_PBA_34K; 1428 break; 1429 1430 default: 1431 break; 1432 } 1433 1434 /* Special needs in case of Jumbo frames */ 1435 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1436 uint32_t tx_space, min_tx, min_rx; 1437 1438 pba = E1000_READ_REG(hw, E1000_PBA); 1439 tx_space = pba >> 16; 1440 pba &= 0xffff; 1441 1442 min_tx = (sc->max_frame_size + 1443 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1444 min_tx = roundup2(min_tx, 1024); 1445 min_tx >>= 10; 1446 min_rx = sc->max_frame_size; 1447 min_rx = roundup2(min_rx, 1024); 1448 min_rx >>= 10; 1449 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1450 pba = pba - (min_tx - tx_space); 1451 /* 1452 * if short on rx space, rx wins 1453 * and must trump tx adjustment 1454 */ 1455 if (pba < min_rx) 1456 pba = min_rx; 1457 } 1458 E1000_WRITE_REG(hw, E1000_PBA, pba); 1459 } 1460 1461 /* 1462 * These parameters control the automatic generation (Tx) and 1463 * response (Rx) to Ethernet PAUSE frames. 1464 * - High water mark should allow for at least two frames to be 1465 * received after sending an XOFF. 1466 * - Low water mark works best when it is very near the high water mark. 1467 * This allows the receiver to restart by sending XON when it has 1468 * drained a bit. 1469 */ 1470 hwm = min(((pba << 10) * 9 / 10), 1471 ((pba << 10) - 2 * sc->max_frame_size)); 1472 1473 if (hw->mac.type < e1000_82576) { 1474 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1475 fc->low_water = fc->high_water - 8; 1476 } else { 1477 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1478 fc->low_water = fc->high_water - 16; 1479 } 1480 fc->pause_time = IGB_FC_PAUSE_TIME; 1481 fc->send_xon = TRUE; 1482 fc->requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 1483 1484 /* Issue a global reset */ 1485 e1000_reset_hw(hw); 1486 E1000_WRITE_REG(hw, E1000_WUC, 0); 1487 1488 /* Reset for AutoMediaDetect */ 1489 if (media_reset) { 1490 e1000_setup_init_funcs(hw, TRUE); 1491 e1000_get_bus_info(hw); 1492 } 1493 1494 if (e1000_init_hw(hw) < 0) 1495 if_printf(ifp, "Hardware Initialization Failed\n"); 1496 1497 /* Setup DMA Coalescing */ 1498 igb_init_dmac(sc, pba); 1499 1500 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1501 e1000_get_phy_info(hw); 1502 e1000_check_for_link(hw); 1503 } 1504 1505 static void 1506 igb_setup_ifp(struct igb_softc *sc) 1507 { 1508 struct ifnet *ifp = &sc->arpcom.ac_if; 1509 int i; 1510 1511 ifp->if_softc = sc; 1512 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1513 ifp->if_init = igb_init; 1514 ifp->if_ioctl = igb_ioctl; 1515 ifp->if_start = igb_start; 1516 ifp->if_serialize = igb_serialize; 1517 ifp->if_deserialize = igb_deserialize; 1518 ifp->if_tryserialize = igb_tryserialize; 1519 #ifdef INVARIANTS 1520 ifp->if_serialize_assert = igb_serialize_assert; 1521 #endif 1522 #ifdef IFPOLL_ENABLE 1523 ifp->if_npoll = igb_npoll; 1524 #endif 1525 1526 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].num_rx_desc; 1527 1528 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1529 ifq_set_ready(&ifp->if_snd); 1530 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1531 1532 ifp->if_mapsubq = ifq_mapsubq_modulo; 1533 ifq_set_subq_divisor(&ifp->if_snd, 1); 1534 1535 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1536 1537 ifp->if_capabilities = 1538 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1539 if (IGB_ENABLE_HWRSS(sc)) 1540 ifp->if_capabilities |= IFCAP_RSS; 1541 ifp->if_capenable = ifp->if_capabilities; 1542 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1543 1544 /* 1545 * Tell the upper layer(s) we support long frames 1546 */ 1547 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1548 1549 /* Setup TX rings and subqueues */ 1550 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1551 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1552 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1553 1554 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1555 ifsq_set_priv(ifsq, txr); 1556 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1557 txr->ifsq = ifsq; 1558 1559 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1560 } 1561 1562 /* 1563 * Specify the media types supported by this adapter and register 1564 * callbacks to update media and link information 1565 */ 1566 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1567 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1568 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1569 0, NULL); 1570 } else { 1571 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1572 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1573 0, NULL); 1574 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1575 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1576 0, NULL); 1577 if (sc->hw.phy.type != e1000_phy_ife) { 1578 ifmedia_add(&sc->media, 1579 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1580 } 1581 } 1582 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1583 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 1584 } 1585 1586 static void 1587 igb_add_sysctl(struct igb_softc *sc) 1588 { 1589 struct sysctl_ctx_list *ctx; 1590 struct sysctl_oid *tree; 1591 #if defined(IGB_RSS_DEBUG) || defined(IGB_TSS_DEBUG) 1592 char node[32]; 1593 int i; 1594 #endif 1595 1596 ctx = device_get_sysctl_ctx(sc->dev); 1597 tree = device_get_sysctl_tree(sc->dev); 1598 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1599 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1600 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1601 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1602 "# of RX rings used"); 1603 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1604 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1605 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1606 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1607 "# of TX rings used"); 1608 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1609 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1610 "# of RX descs"); 1611 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1612 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1613 "# of TX descs"); 1614 1615 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 1616 do { \ 1617 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \ 1618 #use " interrupt rate"); \ 1619 } while (0) 1620 1621 IGB_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx); 1622 IGB_ADD_INTR_RATE_SYSCTL(sc, RX, rx); 1623 IGB_ADD_INTR_RATE_SYSCTL(sc, TX, tx); 1624 IGB_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts); 1625 1626 #undef IGB_ADD_INTR_RATE_SYSCTL 1627 1628 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1629 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1630 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1631 "# of segments per TX interrupt"); 1632 1633 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1634 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1635 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1636 "# of segments sent before write to hardware register"); 1637 1638 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1639 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1640 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1641 "# of segments received before write to hardware register"); 1642 1643 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 1644 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1645 OID_AUTO, "tx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1646 sc->tx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1647 "TX MSI-X CPU map"); 1648 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1649 OID_AUTO, "rx_msix_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1650 sc->rx_rmap_intr, 0, if_ringmap_cpumap_sysctl, "I", 1651 "RX MSI-X CPU map"); 1652 } 1653 #ifdef IFPOLL_ENABLE 1654 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1655 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1656 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1657 "TX polling CPU map"); 1658 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1659 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 1660 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 1661 "RX polling CPU map"); 1662 #endif 1663 1664 #ifdef IGB_RSS_DEBUG 1665 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 1666 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1667 "RSS debug level"); 1668 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1669 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1670 SYSCTL_ADD_ULONG(ctx, 1671 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1672 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1673 } 1674 #endif 1675 #ifdef IGB_TSS_DEBUG 1676 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1677 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1678 SYSCTL_ADD_ULONG(ctx, 1679 SYSCTL_CHILDREN(tree), OID_AUTO, node, 1680 CTLFLAG_RW, &sc->tx_rings[i].tx_packets, "TXed packets"); 1681 } 1682 #endif 1683 } 1684 1685 static int 1686 igb_alloc_rings(struct igb_softc *sc) 1687 { 1688 int error, i; 1689 1690 /* 1691 * Create top level busdma tag 1692 */ 1693 error = bus_dma_tag_create(NULL, 1, 0, 1694 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1695 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1696 &sc->parent_tag); 1697 if (error) { 1698 device_printf(sc->dev, "could not create top level DMA tag\n"); 1699 return error; 1700 } 1701 1702 /* 1703 * Allocate TX descriptor rings and buffers 1704 */ 1705 sc->tx_rings = kmalloc_cachealign( 1706 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1707 M_DEVBUF, M_WAITOK | M_ZERO); 1708 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1709 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1710 1711 /* Set up some basics */ 1712 txr->sc = sc; 1713 txr->me = i; 1714 txr->tx_intr_cpuid = -1; 1715 lwkt_serialize_init(&txr->tx_serialize); 1716 1717 error = igb_create_tx_ring(txr); 1718 if (error) 1719 return error; 1720 } 1721 1722 /* 1723 * Allocate RX descriptor rings and buffers 1724 */ 1725 sc->rx_rings = kmalloc_cachealign( 1726 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1727 M_DEVBUF, M_WAITOK | M_ZERO); 1728 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1729 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1730 1731 /* Set up some basics */ 1732 rxr->sc = sc; 1733 rxr->me = i; 1734 lwkt_serialize_init(&rxr->rx_serialize); 1735 1736 error = igb_create_rx_ring(rxr); 1737 if (error) 1738 return error; 1739 } 1740 1741 return 0; 1742 } 1743 1744 static void 1745 igb_free_rings(struct igb_softc *sc) 1746 { 1747 int i; 1748 1749 if (sc->tx_rings != NULL) { 1750 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1751 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1752 1753 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1754 } 1755 kfree(sc->tx_rings, M_DEVBUF); 1756 } 1757 1758 if (sc->rx_rings != NULL) { 1759 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1760 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1761 1762 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1763 } 1764 kfree(sc->rx_rings, M_DEVBUF); 1765 } 1766 } 1767 1768 static int 1769 igb_create_tx_ring(struct igb_tx_ring *txr) 1770 { 1771 int tsize, error, i, ntxd; 1772 1773 /* 1774 * Validate number of transmit descriptors. It must not exceed 1775 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1776 */ 1777 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1778 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1779 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1780 device_printf(txr->sc->dev, 1781 "Using %d TX descriptors instead of %d!\n", 1782 IGB_DEFAULT_TXD, ntxd); 1783 txr->num_tx_desc = IGB_DEFAULT_TXD; 1784 } else { 1785 txr->num_tx_desc = ntxd; 1786 } 1787 1788 /* 1789 * Allocate TX descriptor ring 1790 */ 1791 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1792 IGB_DBA_ALIGN); 1793 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1794 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1795 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1796 if (txr->txdma.dma_vaddr == NULL) { 1797 device_printf(txr->sc->dev, 1798 "Unable to allocate TX Descriptor memory\n"); 1799 return ENOMEM; 1800 } 1801 txr->tx_base = txr->txdma.dma_vaddr; 1802 bzero(txr->tx_base, tsize); 1803 1804 tsize = __VM_CACHELINE_ALIGN( 1805 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1806 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1807 1808 /* 1809 * Allocate TX head write-back buffer 1810 */ 1811 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1812 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1813 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1814 if (txr->tx_hdr == NULL) { 1815 device_printf(txr->sc->dev, 1816 "Unable to allocate TX head write-back buffer\n"); 1817 return ENOMEM; 1818 } 1819 1820 /* 1821 * Create DMA tag for TX buffers 1822 */ 1823 error = bus_dma_tag_create(txr->sc->parent_tag, 1824 1, 0, /* alignment, bounds */ 1825 BUS_SPACE_MAXADDR, /* lowaddr */ 1826 BUS_SPACE_MAXADDR, /* highaddr */ 1827 NULL, NULL, /* filter, filterarg */ 1828 IGB_TSO_SIZE, /* maxsize */ 1829 IGB_MAX_SCATTER, /* nsegments */ 1830 PAGE_SIZE, /* maxsegsize */ 1831 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1832 BUS_DMA_ONEBPAGE, /* flags */ 1833 &txr->tx_tag); 1834 if (error) { 1835 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1836 kfree(txr->tx_buf, M_DEVBUF); 1837 txr->tx_buf = NULL; 1838 return error; 1839 } 1840 1841 /* 1842 * Create DMA maps for TX buffers 1843 */ 1844 for (i = 0; i < txr->num_tx_desc; ++i) { 1845 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1846 1847 error = bus_dmamap_create(txr->tx_tag, 1848 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1849 if (error) { 1850 device_printf(txr->sc->dev, 1851 "Unable to create TX DMA map\n"); 1852 igb_destroy_tx_ring(txr, i); 1853 return error; 1854 } 1855 } 1856 1857 if (txr->sc->hw.mac.type == e1000_82575) 1858 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1859 1860 /* 1861 * Initialize various watermark 1862 */ 1863 txr->intr_nsegs = txr->num_tx_desc / 16; 1864 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1865 1866 return 0; 1867 } 1868 1869 static void 1870 igb_free_tx_ring(struct igb_tx_ring *txr) 1871 { 1872 int i; 1873 1874 for (i = 0; i < txr->num_tx_desc; ++i) { 1875 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1876 1877 if (txbuf->m_head != NULL) { 1878 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1879 m_freem(txbuf->m_head); 1880 txbuf->m_head = NULL; 1881 } 1882 } 1883 } 1884 1885 static void 1886 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1887 { 1888 int i; 1889 1890 if (txr->txdma.dma_vaddr != NULL) { 1891 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1892 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1893 txr->txdma.dma_map); 1894 bus_dma_tag_destroy(txr->txdma.dma_tag); 1895 txr->txdma.dma_vaddr = NULL; 1896 } 1897 1898 if (txr->tx_hdr != NULL) { 1899 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1900 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1901 txr->tx_hdr_dmap); 1902 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1903 txr->tx_hdr = NULL; 1904 } 1905 1906 if (txr->tx_buf == NULL) 1907 return; 1908 1909 for (i = 0; i < ndesc; ++i) { 1910 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1911 1912 KKASSERT(txbuf->m_head == NULL); 1913 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1914 } 1915 bus_dma_tag_destroy(txr->tx_tag); 1916 1917 kfree(txr->tx_buf, M_DEVBUF); 1918 txr->tx_buf = NULL; 1919 } 1920 1921 static void 1922 igb_init_tx_ring(struct igb_tx_ring *txr) 1923 { 1924 /* Clear the old descriptor contents */ 1925 bzero(txr->tx_base, 1926 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1927 1928 /* Clear TX head write-back buffer */ 1929 *(txr->tx_hdr) = 0; 1930 1931 /* Reset indices */ 1932 txr->next_avail_desc = 0; 1933 txr->next_to_clean = 0; 1934 txr->tx_nsegs = 0; 1935 1936 /* Set number of descriptors available */ 1937 txr->tx_avail = txr->num_tx_desc; 1938 1939 /* Enable this TX ring */ 1940 txr->tx_flags |= IGB_TXFLAG_ENABLED; 1941 } 1942 1943 static void 1944 igb_init_tx_unit(struct igb_softc *sc) 1945 { 1946 struct e1000_hw *hw = &sc->hw; 1947 uint32_t tctl; 1948 int i; 1949 1950 /* Setup the Tx Descriptor Rings */ 1951 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1952 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1953 uint64_t bus_addr = txr->txdma.dma_paddr; 1954 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1955 uint32_t txdctl = 0; 1956 uint32_t dca_txctrl; 1957 1958 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1959 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 1960 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1961 (uint32_t)(bus_addr >> 32)); 1962 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1963 (uint32_t)bus_addr); 1964 1965 /* Setup the HW Tx Head and Tail descriptor pointers */ 1966 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1967 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1968 1969 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1970 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1971 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1972 1973 /* 1974 * Don't set WB_on_EITR: 1975 * - 82575 does not have it 1976 * - It almost has no effect on 82576, see: 1977 * 82576 specification update errata #26 1978 * - It causes unnecessary bus traffic 1979 */ 1980 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1981 (uint32_t)(hdr_paddr >> 32)); 1982 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1983 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1984 1985 /* 1986 * WTHRESH is ignored by the hardware, since header 1987 * write back mode is used. 1988 */ 1989 txdctl |= IGB_TX_PTHRESH; 1990 txdctl |= IGB_TX_HTHRESH << 8; 1991 txdctl |= IGB_TX_WTHRESH << 16; 1992 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1993 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 1994 } 1995 1996 if (sc->vf_ifp) 1997 return; 1998 1999 e1000_config_collision_dist(hw); 2000 2001 /* Program the Transmit Control Register */ 2002 tctl = E1000_READ_REG(hw, E1000_TCTL); 2003 tctl &= ~E1000_TCTL_CT; 2004 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2005 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2006 2007 /* This write will effectively turn on the transmit unit. */ 2008 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2009 } 2010 2011 static boolean_t 2012 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2013 { 2014 struct e1000_adv_tx_context_desc *TXD; 2015 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2016 int ehdrlen, ctxd, ip_hlen = 0; 2017 boolean_t offload = TRUE; 2018 2019 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2020 offload = FALSE; 2021 2022 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2023 2024 ctxd = txr->next_avail_desc; 2025 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2026 2027 /* 2028 * In advanced descriptors the vlan tag must 2029 * be placed into the context descriptor, thus 2030 * we need to be here just for that setup. 2031 */ 2032 if (mp->m_flags & M_VLANTAG) { 2033 uint16_t vlantag; 2034 2035 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2036 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2037 } else if (!offload) { 2038 return FALSE; 2039 } 2040 2041 ehdrlen = mp->m_pkthdr.csum_lhlen; 2042 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2043 2044 /* Set the ether header length */ 2045 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2046 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2047 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2048 ip_hlen = mp->m_pkthdr.csum_iphlen; 2049 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2050 } 2051 vlan_macip_lens |= ip_hlen; 2052 2053 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2054 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2055 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2056 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2057 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2058 2059 /* 2060 * 82575 needs the TX context index added; the queue 2061 * index is used as TX context index here. 2062 */ 2063 if (txr->sc->hw.mac.type == e1000_82575) 2064 mss_l4len_idx = txr->me << 4; 2065 2066 /* Now copy bits into descriptor */ 2067 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2068 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2069 TXD->seqnum_seed = htole32(0); 2070 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2071 2072 /* We've consumed the first desc, adjust counters */ 2073 if (++ctxd == txr->num_tx_desc) 2074 ctxd = 0; 2075 txr->next_avail_desc = ctxd; 2076 --txr->tx_avail; 2077 2078 return offload; 2079 } 2080 2081 static void 2082 igb_txeof(struct igb_tx_ring *txr, int hdr) 2083 { 2084 int first, avail; 2085 2086 if (txr->tx_avail == txr->num_tx_desc) 2087 return; 2088 2089 first = txr->next_to_clean; 2090 if (first == hdr) 2091 return; 2092 2093 avail = txr->tx_avail; 2094 while (first != hdr) { 2095 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2096 2097 ++avail; 2098 if (txbuf->m_head) { 2099 bus_dmamap_unload(txr->tx_tag, txbuf->map); 2100 m_freem(txbuf->m_head); 2101 txbuf->m_head = NULL; 2102 } 2103 if (++first == txr->num_tx_desc) 2104 first = 0; 2105 } 2106 txr->next_to_clean = first; 2107 txr->tx_avail = avail; 2108 2109 /* 2110 * If we have a minimum free, clear OACTIVE 2111 * to tell the stack that it is OK to send packets. 2112 */ 2113 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { 2114 ifsq_clr_oactive(txr->ifsq); 2115 2116 /* 2117 * We have enough TX descriptors, turn off 2118 * the watchdog. We allow small amount of 2119 * packets (roughly intr_nsegs) pending on 2120 * the transmit ring. 2121 */ 2122 txr->tx_watchdog.wd_timer = 0; 2123 } 2124 } 2125 2126 static int 2127 igb_create_rx_ring(struct igb_rx_ring *rxr) 2128 { 2129 int rsize, i, error, nrxd; 2130 2131 /* 2132 * Validate number of receive descriptors. It must not exceed 2133 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2134 */ 2135 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2136 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2137 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2138 device_printf(rxr->sc->dev, 2139 "Using %d RX descriptors instead of %d!\n", 2140 IGB_DEFAULT_RXD, nrxd); 2141 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2142 } else { 2143 rxr->num_rx_desc = nrxd; 2144 } 2145 2146 /* 2147 * Allocate RX descriptor ring 2148 */ 2149 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2150 IGB_DBA_ALIGN); 2151 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2152 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2153 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2154 &rxr->rxdma.dma_paddr); 2155 if (rxr->rxdma.dma_vaddr == NULL) { 2156 device_printf(rxr->sc->dev, 2157 "Unable to allocate RxDescriptor memory\n"); 2158 return ENOMEM; 2159 } 2160 rxr->rx_base = rxr->rxdma.dma_vaddr; 2161 bzero(rxr->rx_base, rsize); 2162 2163 rsize = __VM_CACHELINE_ALIGN( 2164 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2165 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2166 2167 /* 2168 * Create DMA tag for RX buffers 2169 */ 2170 error = bus_dma_tag_create(rxr->sc->parent_tag, 2171 1, 0, /* alignment, bounds */ 2172 BUS_SPACE_MAXADDR, /* lowaddr */ 2173 BUS_SPACE_MAXADDR, /* highaddr */ 2174 NULL, NULL, /* filter, filterarg */ 2175 MCLBYTES, /* maxsize */ 2176 1, /* nsegments */ 2177 MCLBYTES, /* maxsegsize */ 2178 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2179 &rxr->rx_tag); 2180 if (error) { 2181 device_printf(rxr->sc->dev, 2182 "Unable to create RX payload DMA tag\n"); 2183 kfree(rxr->rx_buf, M_DEVBUF); 2184 rxr->rx_buf = NULL; 2185 return error; 2186 } 2187 2188 /* 2189 * Create spare DMA map for RX buffers 2190 */ 2191 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2192 &rxr->rx_sparemap); 2193 if (error) { 2194 device_printf(rxr->sc->dev, 2195 "Unable to create spare RX DMA maps\n"); 2196 bus_dma_tag_destroy(rxr->rx_tag); 2197 kfree(rxr->rx_buf, M_DEVBUF); 2198 rxr->rx_buf = NULL; 2199 return error; 2200 } 2201 2202 /* 2203 * Create DMA maps for RX buffers 2204 */ 2205 for (i = 0; i < rxr->num_rx_desc; i++) { 2206 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2207 2208 error = bus_dmamap_create(rxr->rx_tag, 2209 BUS_DMA_WAITOK, &rxbuf->map); 2210 if (error) { 2211 device_printf(rxr->sc->dev, 2212 "Unable to create RX DMA maps\n"); 2213 igb_destroy_rx_ring(rxr, i); 2214 return error; 2215 } 2216 } 2217 2218 /* 2219 * Initialize various watermark 2220 */ 2221 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2222 2223 return 0; 2224 } 2225 2226 static void 2227 igb_free_rx_ring(struct igb_rx_ring *rxr) 2228 { 2229 int i; 2230 2231 for (i = 0; i < rxr->num_rx_desc; ++i) { 2232 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2233 2234 if (rxbuf->m_head != NULL) { 2235 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2236 m_freem(rxbuf->m_head); 2237 rxbuf->m_head = NULL; 2238 } 2239 } 2240 2241 if (rxr->fmp != NULL) 2242 m_freem(rxr->fmp); 2243 rxr->fmp = NULL; 2244 rxr->lmp = NULL; 2245 } 2246 2247 static void 2248 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2249 { 2250 int i; 2251 2252 if (rxr->rxdma.dma_vaddr != NULL) { 2253 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2254 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2255 rxr->rxdma.dma_map); 2256 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2257 rxr->rxdma.dma_vaddr = NULL; 2258 } 2259 2260 if (rxr->rx_buf == NULL) 2261 return; 2262 2263 for (i = 0; i < ndesc; ++i) { 2264 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2265 2266 KKASSERT(rxbuf->m_head == NULL); 2267 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2268 } 2269 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2270 bus_dma_tag_destroy(rxr->rx_tag); 2271 2272 kfree(rxr->rx_buf, M_DEVBUF); 2273 rxr->rx_buf = NULL; 2274 } 2275 2276 static void 2277 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2278 { 2279 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2280 rxd->wb.upper.status_error = 0; 2281 } 2282 2283 static int 2284 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2285 { 2286 struct mbuf *m; 2287 bus_dma_segment_t seg; 2288 bus_dmamap_t map; 2289 struct igb_rx_buf *rxbuf; 2290 int error, nseg; 2291 2292 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2293 if (m == NULL) { 2294 if (wait) { 2295 if_printf(&rxr->sc->arpcom.ac_if, 2296 "Unable to allocate RX mbuf\n"); 2297 } 2298 return ENOBUFS; 2299 } 2300 m->m_len = m->m_pkthdr.len = MCLBYTES; 2301 2302 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2303 m_adj(m, ETHER_ALIGN); 2304 2305 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2306 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2307 if (error) { 2308 m_freem(m); 2309 if (wait) { 2310 if_printf(&rxr->sc->arpcom.ac_if, 2311 "Unable to load RX mbuf\n"); 2312 } 2313 return error; 2314 } 2315 2316 rxbuf = &rxr->rx_buf[i]; 2317 if (rxbuf->m_head != NULL) 2318 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2319 2320 map = rxbuf->map; 2321 rxbuf->map = rxr->rx_sparemap; 2322 rxr->rx_sparemap = map; 2323 2324 rxbuf->m_head = m; 2325 rxbuf->paddr = seg.ds_addr; 2326 2327 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2328 return 0; 2329 } 2330 2331 static int 2332 igb_init_rx_ring(struct igb_rx_ring *rxr) 2333 { 2334 int i; 2335 2336 /* Clear the ring contents */ 2337 bzero(rxr->rx_base, 2338 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2339 2340 /* Now replenish the ring mbufs */ 2341 for (i = 0; i < rxr->num_rx_desc; ++i) { 2342 int error; 2343 2344 error = igb_newbuf(rxr, i, TRUE); 2345 if (error) 2346 return error; 2347 } 2348 2349 /* Setup our descriptor indices */ 2350 rxr->next_to_check = 0; 2351 2352 rxr->fmp = NULL; 2353 rxr->lmp = NULL; 2354 rxr->discard = FALSE; 2355 2356 return 0; 2357 } 2358 2359 static void 2360 igb_init_rx_unit(struct igb_softc *sc, boolean_t polling) 2361 { 2362 struct ifnet *ifp = &sc->arpcom.ac_if; 2363 struct e1000_hw *hw = &sc->hw; 2364 uint32_t rctl, rxcsum, srrctl = 0; 2365 int i; 2366 2367 /* 2368 * Make sure receives are disabled while setting 2369 * up the descriptor ring 2370 */ 2371 rctl = E1000_READ_REG(hw, E1000_RCTL); 2372 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2373 2374 #if 0 2375 /* 2376 ** Set up for header split 2377 */ 2378 if (igb_header_split) { 2379 /* Use a standard mbuf for the header */ 2380 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2381 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2382 } else 2383 #endif 2384 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2385 2386 /* 2387 ** Set up for jumbo frames 2388 */ 2389 if (ifp->if_mtu > ETHERMTU) { 2390 rctl |= E1000_RCTL_LPE; 2391 #if 0 2392 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2393 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2394 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2395 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2396 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2397 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2398 } 2399 /* Set maximum packet len */ 2400 psize = adapter->max_frame_size; 2401 /* are we on a vlan? */ 2402 if (adapter->ifp->if_vlantrunk != NULL) 2403 psize += VLAN_TAG_SIZE; 2404 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2405 #else 2406 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2407 rctl |= E1000_RCTL_SZ_2048; 2408 #endif 2409 } else { 2410 rctl &= ~E1000_RCTL_LPE; 2411 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2412 rctl |= E1000_RCTL_SZ_2048; 2413 } 2414 2415 /* Setup the Base and Length of the Rx Descriptor Rings */ 2416 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2417 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2418 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2419 uint32_t rxdctl; 2420 2421 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2422 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2423 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2424 (uint32_t)(bus_addr >> 32)); 2425 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2426 (uint32_t)bus_addr); 2427 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2428 /* Enable this Queue */ 2429 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2430 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2431 rxdctl &= 0xFFF00000; 2432 rxdctl |= IGB_RX_PTHRESH; 2433 rxdctl |= IGB_RX_HTHRESH << 8; 2434 /* 2435 * Don't set WTHRESH to a value above 1 on 82576, see: 2436 * 82576 specification update errata #26 2437 */ 2438 rxdctl |= IGB_RX_WTHRESH << 16; 2439 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2440 } 2441 2442 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2443 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2444 2445 /* 2446 * Receive Checksum Offload for TCP and UDP 2447 * 2448 * Checksum offloading is also enabled if multiple receive 2449 * queue is to be supported, since we need it to figure out 2450 * fragments. 2451 */ 2452 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2453 /* 2454 * NOTE: 2455 * PCSD must be enabled to enable multiple 2456 * receive queues. 2457 */ 2458 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2459 E1000_RXCSUM_PCSD; 2460 } else { 2461 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2462 E1000_RXCSUM_PCSD); 2463 } 2464 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2465 2466 if (sc->rx_ring_inuse > 1) { 2467 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2468 const struct if_ringmap *rm; 2469 uint32_t reta_shift; 2470 int j, r; 2471 2472 /* 2473 * NOTE: 2474 * When we reach here, RSS has already been disabled 2475 * in igb_stop(), so we could safely configure RSS key 2476 * and redirect table. 2477 */ 2478 2479 /* 2480 * Configure RSS key 2481 */ 2482 toeplitz_get_key(key, sizeof(key)); 2483 for (i = 0; i < IGB_NRSSRK; ++i) { 2484 uint32_t rssrk; 2485 2486 rssrk = IGB_RSSRK_VAL(key, i); 2487 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2488 2489 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2490 } 2491 2492 /* 2493 * Configure RSS redirect table 2494 */ 2495 if (polling) 2496 rm = sc->rx_rmap; 2497 else 2498 rm = sc->rx_rmap_intr; 2499 if_ringmap_rdrtable(rm, sc->rdr_table, IGB_RDRTABLE_SIZE); 2500 2501 reta_shift = IGB_RETA_SHIFT; 2502 if (hw->mac.type == e1000_82575) 2503 reta_shift = IGB_RETA_SHIFT_82575; 2504 2505 r = 0; 2506 for (j = 0; j < IGB_NRETA; ++j) { 2507 uint32_t reta = 0; 2508 2509 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2510 uint32_t q; 2511 2512 q = sc->rdr_table[r] << reta_shift; 2513 reta |= q << (8 * i); 2514 ++r; 2515 } 2516 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2517 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2518 } 2519 2520 /* 2521 * Enable multiple receive queues. 2522 * Enable IPv4 RSS standard hash functions. 2523 * Disable RSS interrupt on 82575 2524 */ 2525 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2526 E1000_MRQC_ENABLE_RSS_4Q | 2527 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2528 E1000_MRQC_RSS_FIELD_IPV4); 2529 } 2530 2531 /* Setup the Receive Control Register */ 2532 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2533 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2534 E1000_RCTL_RDMTS_HALF | 2535 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2536 /* Strip CRC bytes. */ 2537 rctl |= E1000_RCTL_SECRC; 2538 /* Make sure VLAN Filters are off */ 2539 rctl &= ~E1000_RCTL_VFE; 2540 /* Don't store bad packets */ 2541 rctl &= ~E1000_RCTL_SBP; 2542 2543 /* Enable Receives */ 2544 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2545 2546 /* 2547 * Setup the HW Rx Head and Tail Descriptor Pointers 2548 * - needs to be after enable 2549 */ 2550 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2551 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2552 2553 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2554 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2555 } 2556 } 2557 2558 static void 2559 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2560 { 2561 if (--i < 0) 2562 i = rxr->num_rx_desc - 1; 2563 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2564 } 2565 2566 static void 2567 igb_rxeof(struct igb_rx_ring *rxr, int count) 2568 { 2569 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2570 union e1000_adv_rx_desc *cur; 2571 uint32_t staterr; 2572 int i, ncoll = 0, cpuid = mycpuid; 2573 2574 i = rxr->next_to_check; 2575 cur = &rxr->rx_base[i]; 2576 staterr = le32toh(cur->wb.upper.status_error); 2577 2578 if ((staterr & E1000_RXD_STAT_DD) == 0) 2579 return; 2580 2581 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2582 struct pktinfo *pi = NULL, pi0; 2583 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2584 struct mbuf *m = NULL; 2585 boolean_t eop; 2586 2587 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2588 if (eop) 2589 --count; 2590 2591 ++ncoll; 2592 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2593 !rxr->discard) { 2594 struct mbuf *mp = rxbuf->m_head; 2595 uint32_t hash, hashtype; 2596 uint16_t vlan; 2597 int len; 2598 2599 len = le16toh(cur->wb.upper.length); 2600 if ((rxr->sc->hw.mac.type == e1000_i350 || 2601 rxr->sc->hw.mac.type == e1000_i354) && 2602 (staterr & E1000_RXDEXT_STATERR_LB)) 2603 vlan = be16toh(cur->wb.upper.vlan); 2604 else 2605 vlan = le16toh(cur->wb.upper.vlan); 2606 2607 hash = le32toh(cur->wb.lower.hi_dword.rss); 2608 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2609 E1000_RXDADV_RSSTYPE_MASK; 2610 2611 IGB_RSS_DPRINTF(rxr->sc, 10, 2612 "ring%d, hash 0x%08x, hashtype %u\n", 2613 rxr->me, hash, hashtype); 2614 2615 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2616 BUS_DMASYNC_POSTREAD); 2617 2618 if (igb_newbuf(rxr, i, FALSE) != 0) { 2619 IFNET_STAT_INC(ifp, iqdrops, 1); 2620 goto discard; 2621 } 2622 2623 mp->m_len = len; 2624 if (rxr->fmp == NULL) { 2625 mp->m_pkthdr.len = len; 2626 rxr->fmp = mp; 2627 rxr->lmp = mp; 2628 } else { 2629 rxr->lmp->m_next = mp; 2630 rxr->lmp = rxr->lmp->m_next; 2631 rxr->fmp->m_pkthdr.len += len; 2632 } 2633 2634 if (eop) { 2635 m = rxr->fmp; 2636 rxr->fmp = NULL; 2637 rxr->lmp = NULL; 2638 2639 m->m_pkthdr.rcvif = ifp; 2640 IFNET_STAT_INC(ifp, ipackets, 1); 2641 2642 if (ifp->if_capenable & IFCAP_RXCSUM) 2643 igb_rxcsum(staterr, m); 2644 2645 if (staterr & E1000_RXD_STAT_VP) { 2646 m->m_pkthdr.ether_vlantag = vlan; 2647 m->m_flags |= M_VLANTAG; 2648 } 2649 2650 if (ifp->if_capenable & IFCAP_RSS) { 2651 pi = igb_rssinfo(m, &pi0, 2652 hash, hashtype, staterr); 2653 } 2654 #ifdef IGB_RSS_DEBUG 2655 rxr->rx_packets++; 2656 #endif 2657 } 2658 } else { 2659 IFNET_STAT_INC(ifp, ierrors, 1); 2660 discard: 2661 igb_setup_rxdesc(cur, rxbuf); 2662 if (!eop) 2663 rxr->discard = TRUE; 2664 else 2665 rxr->discard = FALSE; 2666 if (rxr->fmp != NULL) { 2667 m_freem(rxr->fmp); 2668 rxr->fmp = NULL; 2669 rxr->lmp = NULL; 2670 } 2671 m = NULL; 2672 } 2673 2674 if (m != NULL) 2675 ifp->if_input(ifp, m, pi, cpuid); 2676 2677 /* Advance our pointers to the next descriptor. */ 2678 if (++i == rxr->num_rx_desc) 2679 i = 0; 2680 2681 if (ncoll >= rxr->wreg_nsegs) { 2682 igb_rx_refresh(rxr, i); 2683 ncoll = 0; 2684 } 2685 2686 cur = &rxr->rx_base[i]; 2687 staterr = le32toh(cur->wb.upper.status_error); 2688 } 2689 rxr->next_to_check = i; 2690 2691 if (ncoll > 0) 2692 igb_rx_refresh(rxr, i); 2693 } 2694 2695 2696 static void 2697 igb_set_vlan(struct igb_softc *sc) 2698 { 2699 struct e1000_hw *hw = &sc->hw; 2700 uint32_t reg; 2701 #if 0 2702 struct ifnet *ifp = sc->arpcom.ac_if; 2703 #endif 2704 2705 if (sc->vf_ifp) { 2706 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2707 return; 2708 } 2709 2710 reg = E1000_READ_REG(hw, E1000_CTRL); 2711 reg |= E1000_CTRL_VME; 2712 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2713 2714 #if 0 2715 /* Enable the Filter Table */ 2716 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2717 reg = E1000_READ_REG(hw, E1000_RCTL); 2718 reg &= ~E1000_RCTL_CFIEN; 2719 reg |= E1000_RCTL_VFE; 2720 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2721 } 2722 #endif 2723 2724 /* Update the frame size */ 2725 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2726 sc->max_frame_size + VLAN_TAG_SIZE); 2727 2728 #if 0 2729 /* Don't bother with table if no vlans */ 2730 if ((adapter->num_vlans == 0) || 2731 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2732 return; 2733 /* 2734 ** A soft reset zero's out the VFTA, so 2735 ** we need to repopulate it now. 2736 */ 2737 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2738 if (adapter->shadow_vfta[i] != 0) { 2739 if (adapter->vf_ifp) 2740 e1000_vfta_set_vf(hw, 2741 adapter->shadow_vfta[i], TRUE); 2742 else 2743 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2744 i, adapter->shadow_vfta[i]); 2745 } 2746 #endif 2747 } 2748 2749 static void 2750 igb_enable_intr(struct igb_softc *sc) 2751 { 2752 int i; 2753 2754 for (i = 0; i < sc->intr_cnt; ++i) 2755 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2756 2757 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2758 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2759 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2760 else 2761 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2762 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2763 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2764 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2765 } else { 2766 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2767 } 2768 E1000_WRITE_FLUSH(&sc->hw); 2769 } 2770 2771 static void 2772 igb_disable_intr(struct igb_softc *sc) 2773 { 2774 int i; 2775 2776 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2777 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2778 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2779 } 2780 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2781 E1000_WRITE_FLUSH(&sc->hw); 2782 2783 for (i = 0; i < sc->intr_cnt; ++i) 2784 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2785 } 2786 2787 /* 2788 * Bit of a misnomer, what this really means is 2789 * to enable OS management of the system... aka 2790 * to disable special hardware management features 2791 */ 2792 static void 2793 igb_get_mgmt(struct igb_softc *sc) 2794 { 2795 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2796 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2797 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2798 2799 /* disable hardware interception of ARP */ 2800 manc &= ~E1000_MANC_ARP_EN; 2801 2802 /* enable receiving management packets to the host */ 2803 manc |= E1000_MANC_EN_MNG2HOST; 2804 manc2h |= 1 << 5; /* Mng Port 623 */ 2805 manc2h |= 1 << 6; /* Mng Port 664 */ 2806 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2807 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2808 } 2809 } 2810 2811 /* 2812 * Give control back to hardware management controller 2813 * if there is one. 2814 */ 2815 static void 2816 igb_rel_mgmt(struct igb_softc *sc) 2817 { 2818 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2819 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2820 2821 /* Re-enable hardware interception of ARP */ 2822 manc |= E1000_MANC_ARP_EN; 2823 manc &= ~E1000_MANC_EN_MNG2HOST; 2824 2825 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2826 } 2827 } 2828 2829 /* 2830 * Sets CTRL_EXT:DRV_LOAD bit. 2831 * 2832 * For ASF and Pass Through versions of f/w this means that 2833 * the driver is loaded. 2834 */ 2835 static void 2836 igb_get_hw_control(struct igb_softc *sc) 2837 { 2838 uint32_t ctrl_ext; 2839 2840 if (sc->vf_ifp) 2841 return; 2842 2843 /* Let firmware know the driver has taken over */ 2844 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2845 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2846 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2847 } 2848 2849 /* 2850 * Resets CTRL_EXT:DRV_LOAD bit. 2851 * 2852 * For ASF and Pass Through versions of f/w this means that the 2853 * driver is no longer loaded. 2854 */ 2855 static void 2856 igb_rel_hw_control(struct igb_softc *sc) 2857 { 2858 uint32_t ctrl_ext; 2859 2860 if (sc->vf_ifp) 2861 return; 2862 2863 /* Let firmware taken over control of h/w */ 2864 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2865 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2866 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2867 } 2868 2869 static boolean_t 2870 igb_is_valid_ether_addr(const uint8_t *addr) 2871 { 2872 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2873 2874 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2875 return FALSE; 2876 return TRUE; 2877 } 2878 2879 /* 2880 * Enable PCI Wake On Lan capability 2881 */ 2882 static void 2883 igb_enable_wol(device_t dev) 2884 { 2885 uint16_t cap, status; 2886 uint8_t id; 2887 2888 /* First find the capabilities pointer*/ 2889 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2890 2891 /* Read the PM Capabilities */ 2892 id = pci_read_config(dev, cap, 1); 2893 if (id != PCIY_PMG) /* Something wrong */ 2894 return; 2895 2896 /* 2897 * OK, we have the power capabilities, 2898 * so now get the status register 2899 */ 2900 cap += PCIR_POWER_STATUS; 2901 status = pci_read_config(dev, cap, 2); 2902 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2903 pci_write_config(dev, cap, status, 2); 2904 } 2905 2906 static void 2907 igb_update_stats_counters(struct igb_softc *sc) 2908 { 2909 struct e1000_hw *hw = &sc->hw; 2910 struct e1000_hw_stats *stats; 2911 struct ifnet *ifp = &sc->arpcom.ac_if; 2912 2913 /* 2914 * The virtual function adapter has only a 2915 * small controlled set of stats, do only 2916 * those and return. 2917 */ 2918 if (sc->vf_ifp) { 2919 igb_update_vf_stats_counters(sc); 2920 return; 2921 } 2922 stats = sc->stats; 2923 2924 if (sc->hw.phy.media_type == e1000_media_type_copper || 2925 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2926 stats->symerrs += 2927 E1000_READ_REG(hw,E1000_SYMERRS); 2928 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2929 } 2930 2931 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2932 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2933 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2934 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2935 2936 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2937 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2938 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2939 stats->dc += E1000_READ_REG(hw, E1000_DC); 2940 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2941 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2942 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2943 2944 /* 2945 * For watchdog management we need to know if we have been 2946 * paused during the last interval, so capture that here. 2947 */ 2948 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2949 stats->xoffrxc += sc->pause_frames; 2950 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2951 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2952 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2953 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2954 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2955 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2956 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2957 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2958 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2959 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2960 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2961 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2962 2963 /* For the 64-bit byte counters the low dword must be read first. */ 2964 /* Both registers clear on the read of the high dword */ 2965 2966 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2967 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2968 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2969 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2970 2971 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2972 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2973 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2974 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2975 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2976 2977 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2978 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2979 2980 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2981 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2982 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2983 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2984 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2985 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 2986 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 2987 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 2988 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 2989 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 2990 2991 /* Interrupt Counts */ 2992 2993 stats->iac += E1000_READ_REG(hw, E1000_IAC); 2994 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 2995 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 2996 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 2997 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 2998 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 2999 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3000 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3001 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3002 3003 /* Host to Card Statistics */ 3004 3005 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3006 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3007 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3008 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3009 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3010 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3011 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3012 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3013 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3014 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3015 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3016 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3017 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3018 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3019 3020 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3021 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3022 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3023 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3024 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3025 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3026 3027 IFNET_STAT_SET(ifp, collisions, stats->colc); 3028 3029 /* Rx Errors */ 3030 IFNET_STAT_SET(ifp, ierrors, 3031 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3032 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3033 3034 /* Tx Errors */ 3035 IFNET_STAT_SET(ifp, oerrors, 3036 stats->ecol + stats->latecol + sc->watchdog_events); 3037 3038 /* Driver specific counters */ 3039 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3040 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3041 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3042 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3043 sc->packet_buf_alloc_tx = 3044 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3045 sc->packet_buf_alloc_rx = 3046 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3047 } 3048 3049 static void 3050 igb_vf_init_stats(struct igb_softc *sc) 3051 { 3052 struct e1000_hw *hw = &sc->hw; 3053 struct e1000_vf_stats *stats; 3054 3055 stats = sc->stats; 3056 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3057 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3058 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3059 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3060 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3061 } 3062 3063 static void 3064 igb_update_vf_stats_counters(struct igb_softc *sc) 3065 { 3066 struct e1000_hw *hw = &sc->hw; 3067 struct e1000_vf_stats *stats; 3068 3069 if (sc->link_speed == 0) 3070 return; 3071 3072 stats = sc->stats; 3073 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3074 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3075 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3076 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3077 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3078 } 3079 3080 #ifdef IFPOLL_ENABLE 3081 3082 static void 3083 igb_npoll_status(struct ifnet *ifp) 3084 { 3085 struct igb_softc *sc = ifp->if_softc; 3086 uint32_t reg_icr; 3087 3088 ASSERT_SERIALIZED(&sc->main_serialize); 3089 3090 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3091 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3092 sc->hw.mac.get_link_status = 1; 3093 igb_update_link_status(sc); 3094 } 3095 } 3096 3097 static void 3098 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3099 { 3100 struct igb_tx_ring *txr = arg; 3101 3102 ASSERT_SERIALIZED(&txr->tx_serialize); 3103 3104 igb_txeof(txr, *(txr->tx_hdr)); 3105 if (!ifsq_is_empty(txr->ifsq)) 3106 ifsq_devstart(txr->ifsq); 3107 } 3108 3109 static void 3110 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3111 { 3112 struct igb_rx_ring *rxr = arg; 3113 3114 ASSERT_SERIALIZED(&rxr->rx_serialize); 3115 3116 igb_rxeof(rxr, cycle); 3117 } 3118 3119 static void 3120 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3121 { 3122 struct igb_softc *sc = ifp->if_softc; 3123 int i, txr_cnt, rxr_cnt; 3124 3125 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3126 3127 if (info) { 3128 int cpu; 3129 3130 info->ifpi_status.status_func = igb_npoll_status; 3131 info->ifpi_status.serializer = &sc->main_serialize; 3132 3133 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3134 for (i = 0; i < txr_cnt; ++i) { 3135 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3136 3137 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 3138 KKASSERT(cpu < netisr_ncpus); 3139 info->ifpi_tx[cpu].poll_func = igb_npoll_tx; 3140 info->ifpi_tx[cpu].arg = txr; 3141 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; 3142 ifsq_set_cpuid(txr->ifsq, cpu); 3143 } 3144 3145 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3146 for (i = 0; i < rxr_cnt; ++i) { 3147 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3148 3149 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 3150 KKASSERT(cpu < netisr_ncpus); 3151 info->ifpi_rx[cpu].poll_func = igb_npoll_rx; 3152 info->ifpi_rx[cpu].arg = rxr; 3153 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize; 3154 } 3155 } else { 3156 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3157 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3158 3159 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3160 } 3161 } 3162 if (ifp->if_flags & IFF_RUNNING) 3163 igb_init(sc); 3164 } 3165 3166 #endif /* IFPOLL_ENABLE */ 3167 3168 static void 3169 igb_intr(void *xsc) 3170 { 3171 struct igb_softc *sc = xsc; 3172 struct ifnet *ifp = &sc->arpcom.ac_if; 3173 uint32_t eicr; 3174 3175 ASSERT_SERIALIZED(&sc->main_serialize); 3176 3177 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3178 3179 if (eicr == 0) 3180 return; 3181 3182 if (ifp->if_flags & IFF_RUNNING) { 3183 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3184 int i; 3185 3186 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3187 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3188 3189 if (eicr & rxr->rx_intr_mask) { 3190 lwkt_serialize_enter(&rxr->rx_serialize); 3191 igb_rxeof(rxr, -1); 3192 lwkt_serialize_exit(&rxr->rx_serialize); 3193 } 3194 } 3195 3196 if (eicr & txr->tx_intr_mask) { 3197 lwkt_serialize_enter(&txr->tx_serialize); 3198 igb_txeof(txr, *(txr->tx_hdr)); 3199 if (!ifsq_is_empty(txr->ifsq)) 3200 ifsq_devstart(txr->ifsq); 3201 lwkt_serialize_exit(&txr->tx_serialize); 3202 } 3203 } 3204 3205 if (eicr & E1000_EICR_OTHER) { 3206 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3207 3208 /* Link status change */ 3209 if (icr & E1000_ICR_LSC) { 3210 sc->hw.mac.get_link_status = 1; 3211 igb_update_link_status(sc); 3212 } 3213 } 3214 3215 /* 3216 * Reading EICR has the side effect to clear interrupt mask, 3217 * so all interrupts need to be enabled here. 3218 */ 3219 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3220 } 3221 3222 static void 3223 igb_intr_shared(void *xsc) 3224 { 3225 struct igb_softc *sc = xsc; 3226 struct ifnet *ifp = &sc->arpcom.ac_if; 3227 uint32_t reg_icr; 3228 3229 ASSERT_SERIALIZED(&sc->main_serialize); 3230 3231 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3232 3233 /* Hot eject? */ 3234 if (reg_icr == 0xffffffff) 3235 return; 3236 3237 /* Definitely not our interrupt. */ 3238 if (reg_icr == 0x0) 3239 return; 3240 3241 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3242 return; 3243 3244 if (ifp->if_flags & IFF_RUNNING) { 3245 if (reg_icr & 3246 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3247 int i; 3248 3249 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3250 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3251 3252 lwkt_serialize_enter(&rxr->rx_serialize); 3253 igb_rxeof(rxr, -1); 3254 lwkt_serialize_exit(&rxr->rx_serialize); 3255 } 3256 } 3257 3258 if (reg_icr & E1000_ICR_TXDW) { 3259 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3260 3261 lwkt_serialize_enter(&txr->tx_serialize); 3262 igb_txeof(txr, *(txr->tx_hdr)); 3263 if (!ifsq_is_empty(txr->ifsq)) 3264 ifsq_devstart(txr->ifsq); 3265 lwkt_serialize_exit(&txr->tx_serialize); 3266 } 3267 } 3268 3269 /* Link status change */ 3270 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3271 sc->hw.mac.get_link_status = 1; 3272 igb_update_link_status(sc); 3273 } 3274 3275 if (reg_icr & E1000_ICR_RXO) 3276 sc->rx_overruns++; 3277 } 3278 3279 static int 3280 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3281 int *segs_used, int *idx) 3282 { 3283 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3284 bus_dmamap_t map; 3285 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3286 union e1000_adv_tx_desc *txd = NULL; 3287 struct mbuf *m_head = *m_headp; 3288 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3289 int maxsegs, nsegs, i, j, error; 3290 uint32_t hdrlen = 0; 3291 3292 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3293 error = igb_tso_pullup(txr, m_headp); 3294 if (error) 3295 return error; 3296 m_head = *m_headp; 3297 } 3298 3299 /* Set basic descriptor constants */ 3300 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3301 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3302 if (m_head->m_flags & M_VLANTAG) 3303 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3304 3305 /* 3306 * Map the packet for DMA. 3307 */ 3308 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3309 tx_buf_mapped = tx_buf; 3310 map = tx_buf->map; 3311 3312 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3313 if (maxsegs > IGB_MAX_SCATTER) 3314 maxsegs = IGB_MAX_SCATTER; 3315 3316 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3317 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3318 if (error) { 3319 if (error == ENOBUFS) 3320 txr->sc->mbuf_defrag_failed++; 3321 else 3322 txr->sc->no_tx_dma_setup++; 3323 3324 m_freem(*m_headp); 3325 *m_headp = NULL; 3326 return error; 3327 } 3328 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3329 3330 m_head = *m_headp; 3331 3332 /* 3333 * Set up the TX context descriptor, if any hardware offloading is 3334 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3335 * TX descriptor. 3336 * 3337 * Unlike these chips' predecessors (em/emx), TX context descriptor 3338 * will _not_ interfere TX data fetching pipelining. 3339 */ 3340 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3341 igb_tso_ctx(txr, m_head, &hdrlen); 3342 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3343 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3344 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3345 txr->tx_nsegs++; 3346 (*segs_used)++; 3347 } else if (igb_txcsum_ctx(txr, m_head)) { 3348 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3349 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3350 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3351 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3352 txr->tx_nsegs++; 3353 (*segs_used)++; 3354 } 3355 3356 *segs_used += nsegs; 3357 txr->tx_nsegs += nsegs; 3358 if (txr->tx_nsegs >= txr->intr_nsegs) { 3359 /* 3360 * Report Status (RS) is turned on every intr_nsegs 3361 * descriptors (roughly). 3362 */ 3363 txr->tx_nsegs = 0; 3364 cmd_rs = E1000_ADVTXD_DCMD_RS; 3365 } 3366 3367 /* Calculate payload length */ 3368 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3369 << E1000_ADVTXD_PAYLEN_SHIFT); 3370 3371 /* 3372 * 82575 needs the TX context index added; the queue 3373 * index is used as TX context index here. 3374 */ 3375 if (txr->sc->hw.mac.type == e1000_82575) 3376 olinfo_status |= txr->me << 4; 3377 3378 /* Set up our transmit descriptors */ 3379 i = txr->next_avail_desc; 3380 for (j = 0; j < nsegs; j++) { 3381 bus_size_t seg_len; 3382 bus_addr_t seg_addr; 3383 3384 tx_buf = &txr->tx_buf[i]; 3385 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3386 seg_addr = segs[j].ds_addr; 3387 seg_len = segs[j].ds_len; 3388 3389 txd->read.buffer_addr = htole64(seg_addr); 3390 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3391 txd->read.olinfo_status = htole32(olinfo_status); 3392 if (++i == txr->num_tx_desc) 3393 i = 0; 3394 tx_buf->m_head = NULL; 3395 } 3396 3397 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3398 txr->next_avail_desc = i; 3399 txr->tx_avail -= nsegs; 3400 3401 tx_buf->m_head = m_head; 3402 tx_buf_mapped->map = tx_buf->map; 3403 tx_buf->map = map; 3404 3405 /* 3406 * Last Descriptor of Packet needs End Of Packet (EOP) 3407 */ 3408 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3409 3410 /* 3411 * Defer TDT updating, until enough descrptors are setup 3412 */ 3413 *idx = i; 3414 #ifdef IGB_TSS_DEBUG 3415 ++txr->tx_packets; 3416 #endif 3417 3418 return 0; 3419 } 3420 3421 static void 3422 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3423 { 3424 struct igb_softc *sc = ifp->if_softc; 3425 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3426 struct mbuf *m_head; 3427 int idx = -1, nsegs = 0; 3428 3429 KKASSERT(txr->ifsq == ifsq); 3430 ASSERT_SERIALIZED(&txr->tx_serialize); 3431 3432 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3433 return; 3434 3435 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3436 ifsq_purge(ifsq); 3437 return; 3438 } 3439 3440 while (!ifsq_is_empty(ifsq)) { 3441 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { 3442 ifsq_set_oactive(ifsq); 3443 /* Set watchdog on */ 3444 txr->tx_watchdog.wd_timer = 5; 3445 break; 3446 } 3447 3448 m_head = ifsq_dequeue(ifsq); 3449 if (m_head == NULL) 3450 break; 3451 3452 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3453 IFNET_STAT_INC(ifp, oerrors, 1); 3454 continue; 3455 } 3456 3457 /* 3458 * TX interrupt are aggressively aggregated, so increasing 3459 * opackets at TX interrupt time will make the opackets 3460 * statistics vastly inaccurate; we do the opackets increment 3461 * now. 3462 */ 3463 IFNET_STAT_INC(ifp, opackets, 1); 3464 3465 if (nsegs >= txr->wreg_nsegs) { 3466 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3467 idx = -1; 3468 nsegs = 0; 3469 } 3470 3471 /* Send a copy of the frame to the BPF listener */ 3472 ETHER_BPF_MTAP(ifp, m_head); 3473 } 3474 if (idx >= 0) 3475 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3476 } 3477 3478 static void 3479 igb_watchdog(struct ifaltq_subque *ifsq) 3480 { 3481 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3482 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3483 struct igb_softc *sc = ifp->if_softc; 3484 int i; 3485 3486 KKASSERT(txr->ifsq == ifsq); 3487 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3488 3489 /* 3490 * If flow control has paused us since last checking 3491 * it invalidates the watchdog timing, so dont run it. 3492 */ 3493 if (sc->pause_frames) { 3494 sc->pause_frames = 0; 3495 txr->tx_watchdog.wd_timer = 5; 3496 return; 3497 } 3498 3499 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3500 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3501 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3502 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3503 if_printf(ifp, "TX(%d) desc avail = %d, " 3504 "Next TX to Clean = %d\n", 3505 txr->me, txr->tx_avail, txr->next_to_clean); 3506 3507 IFNET_STAT_INC(ifp, oerrors, 1); 3508 sc->watchdog_events++; 3509 3510 igb_init(sc); 3511 for (i = 0; i < sc->tx_ring_inuse; ++i) 3512 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3513 } 3514 3515 static void 3516 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3517 { 3518 uint32_t eitr = 0; 3519 3520 if (rate > 0) { 3521 if (sc->hw.mac.type == e1000_82575) { 3522 eitr = 1000000000 / 256 / rate; 3523 /* 3524 * NOTE: 3525 * Document is wrong on the 2 bits left shift 3526 */ 3527 } else { 3528 eitr = 1000000 / rate; 3529 eitr <<= IGB_EITR_INTVL_SHIFT; 3530 } 3531 3532 if (eitr == 0) { 3533 /* Don't disable it */ 3534 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3535 } else if (eitr > IGB_EITR_INTVL_MASK) { 3536 /* Don't allow it to be too large */ 3537 eitr = IGB_EITR_INTVL_MASK; 3538 } 3539 } 3540 if (sc->hw.mac.type == e1000_82575) 3541 eitr |= eitr << 16; 3542 else 3543 eitr |= E1000_EITR_CNT_IGNR; 3544 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3545 } 3546 3547 static void 3548 igb_add_intr_rate_sysctl(struct igb_softc *sc, int use, 3549 const char *name, const char *desc) 3550 { 3551 int i; 3552 3553 for (i = 0; i < sc->intr_cnt; ++i) { 3554 if (sc->intr_data[i].intr_use == use) { 3555 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 3556 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 3557 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 3558 sc, use, igb_sysctl_intr_rate, "I", desc); 3559 break; 3560 } 3561 } 3562 } 3563 3564 static int 3565 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3566 { 3567 struct igb_softc *sc = (void *)arg1; 3568 int use = arg2; 3569 struct ifnet *ifp = &sc->arpcom.ac_if; 3570 int error, rate, i; 3571 struct igb_intr_data *intr; 3572 3573 rate = 0; 3574 for (i = 0; i < sc->intr_cnt; ++i) { 3575 intr = &sc->intr_data[i]; 3576 if (intr->intr_use == use) { 3577 rate = intr->intr_rate; 3578 break; 3579 } 3580 } 3581 3582 error = sysctl_handle_int(oidp, &rate, 0, req); 3583 if (error || req->newptr == NULL) 3584 return error; 3585 if (rate <= 0) 3586 return EINVAL; 3587 3588 ifnet_serialize_all(ifp); 3589 3590 for (i = 0; i < sc->intr_cnt; ++i) { 3591 intr = &sc->intr_data[i]; 3592 if (intr->intr_use == use && intr->intr_rate != rate) { 3593 intr->intr_rate = rate; 3594 if (ifp->if_flags & IFF_RUNNING) 3595 igb_set_eitr(sc, i, rate); 3596 } 3597 } 3598 3599 ifnet_deserialize_all(ifp); 3600 3601 return error; 3602 } 3603 3604 static int 3605 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3606 { 3607 struct igb_softc *sc = (void *)arg1; 3608 struct ifnet *ifp = &sc->arpcom.ac_if; 3609 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3610 int error, nsegs; 3611 3612 nsegs = txr->intr_nsegs; 3613 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3614 if (error || req->newptr == NULL) 3615 return error; 3616 if (nsegs <= 0) 3617 return EINVAL; 3618 3619 ifnet_serialize_all(ifp); 3620 3621 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { 3622 error = EINVAL; 3623 } else { 3624 int i; 3625 3626 error = 0; 3627 for (i = 0; i < sc->tx_ring_cnt; ++i) 3628 sc->tx_rings[i].intr_nsegs = nsegs; 3629 } 3630 3631 ifnet_deserialize_all(ifp); 3632 3633 return error; 3634 } 3635 3636 static int 3637 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3638 { 3639 struct igb_softc *sc = (void *)arg1; 3640 struct ifnet *ifp = &sc->arpcom.ac_if; 3641 int error, nsegs, i; 3642 3643 nsegs = sc->rx_rings[0].wreg_nsegs; 3644 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3645 if (error || req->newptr == NULL) 3646 return error; 3647 3648 ifnet_serialize_all(ifp); 3649 for (i = 0; i < sc->rx_ring_cnt; ++i) 3650 sc->rx_rings[i].wreg_nsegs = nsegs; 3651 ifnet_deserialize_all(ifp); 3652 3653 return 0; 3654 } 3655 3656 static int 3657 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3658 { 3659 struct igb_softc *sc = (void *)arg1; 3660 struct ifnet *ifp = &sc->arpcom.ac_if; 3661 int error, nsegs, i; 3662 3663 nsegs = sc->tx_rings[0].wreg_nsegs; 3664 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3665 if (error || req->newptr == NULL) 3666 return error; 3667 3668 ifnet_serialize_all(ifp); 3669 for (i = 0; i < sc->tx_ring_cnt; ++i) 3670 sc->tx_rings[i].wreg_nsegs = nsegs; 3671 ifnet_deserialize_all(ifp); 3672 3673 return 0; 3674 } 3675 3676 static void 3677 igb_init_intr(struct igb_softc *sc) 3678 { 3679 int i; 3680 3681 igb_set_intr_mask(sc); 3682 3683 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3684 igb_init_unshared_intr(sc); 3685 3686 for (i = 0; i < sc->intr_cnt; ++i) 3687 igb_set_eitr(sc, i, sc->intr_data[i].intr_rate); 3688 } 3689 3690 static void 3691 igb_init_unshared_intr(struct igb_softc *sc) 3692 { 3693 struct e1000_hw *hw = &sc->hw; 3694 const struct igb_rx_ring *rxr; 3695 const struct igb_tx_ring *txr; 3696 uint32_t ivar, index; 3697 int i; 3698 3699 /* 3700 * Enable extended mode 3701 */ 3702 if (sc->hw.mac.type != e1000_82575) { 3703 uint32_t gpie; 3704 int ivar_max; 3705 3706 gpie = E1000_GPIE_NSICR; 3707 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3708 gpie |= E1000_GPIE_MSIX_MODE | 3709 E1000_GPIE_EIAME | 3710 E1000_GPIE_PBA; 3711 } 3712 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3713 3714 /* 3715 * Clear IVARs 3716 */ 3717 switch (sc->hw.mac.type) { 3718 case e1000_82576: 3719 ivar_max = IGB_MAX_IVAR_82576; 3720 break; 3721 3722 case e1000_82580: 3723 ivar_max = IGB_MAX_IVAR_82580; 3724 break; 3725 3726 case e1000_i350: 3727 ivar_max = IGB_MAX_IVAR_I350; 3728 break; 3729 3730 case e1000_i354: 3731 ivar_max = IGB_MAX_IVAR_I354; 3732 break; 3733 3734 case e1000_vfadapt: 3735 case e1000_vfadapt_i350: 3736 ivar_max = IGB_MAX_IVAR_VF; 3737 break; 3738 3739 case e1000_i210: 3740 ivar_max = IGB_MAX_IVAR_I210; 3741 break; 3742 3743 case e1000_i211: 3744 ivar_max = IGB_MAX_IVAR_I211; 3745 break; 3746 3747 default: 3748 panic("unknown mac type %d\n", sc->hw.mac.type); 3749 } 3750 for (i = 0; i < ivar_max; ++i) 3751 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3752 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3753 } else { 3754 uint32_t tmp; 3755 3756 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3757 ("82575 w/ MSI-X")); 3758 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3759 tmp |= E1000_CTRL_EXT_IRCA; 3760 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3761 } 3762 3763 /* 3764 * Map TX/RX interrupts to EICR 3765 */ 3766 switch (sc->hw.mac.type) { 3767 case e1000_82580: 3768 case e1000_i350: 3769 case e1000_i354: 3770 case e1000_vfadapt: 3771 case e1000_vfadapt_i350: 3772 case e1000_i210: 3773 case e1000_i211: 3774 /* RX entries */ 3775 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3776 rxr = &sc->rx_rings[i]; 3777 3778 index = i >> 1; 3779 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3780 3781 if (i & 1) { 3782 ivar &= 0xff00ffff; 3783 ivar |= 3784 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3785 } else { 3786 ivar &= 0xffffff00; 3787 ivar |= 3788 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3789 } 3790 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3791 } 3792 /* TX entries */ 3793 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3794 txr = &sc->tx_rings[i]; 3795 3796 index = i >> 1; 3797 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3798 3799 if (i & 1) { 3800 ivar &= 0x00ffffff; 3801 ivar |= 3802 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3803 } else { 3804 ivar &= 0xffff00ff; 3805 ivar |= 3806 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3807 } 3808 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3809 } 3810 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3811 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3812 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3813 } 3814 break; 3815 3816 case e1000_82576: 3817 /* RX entries */ 3818 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3819 rxr = &sc->rx_rings[i]; 3820 3821 index = i & 0x7; /* Each IVAR has two entries */ 3822 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3823 3824 if (i < 8) { 3825 ivar &= 0xffffff00; 3826 ivar |= 3827 (rxr->rx_intr_vec | E1000_IVAR_VALID); 3828 } else { 3829 ivar &= 0xff00ffff; 3830 ivar |= 3831 (rxr->rx_intr_vec | E1000_IVAR_VALID) << 16; 3832 } 3833 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3834 } 3835 /* TX entries */ 3836 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3837 txr = &sc->tx_rings[i]; 3838 3839 index = i & 0x7; /* Each IVAR has two entries */ 3840 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3841 3842 if (i < 8) { 3843 ivar &= 0xffff00ff; 3844 ivar |= 3845 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; 3846 } else { 3847 ivar &= 0x00ffffff; 3848 ivar |= 3849 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; 3850 } 3851 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3852 } 3853 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3854 ivar = (sc->sts_msix_vec | E1000_IVAR_VALID) << 8; 3855 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3856 } 3857 break; 3858 3859 case e1000_82575: 3860 /* 3861 * Enable necessary interrupt bits. 3862 * 3863 * The name of the register is confusing; in addition to 3864 * configuring the first vector of MSI-X, it also configures 3865 * which bits of EICR could be set by the hardware even when 3866 * MSI or line interrupt is used; it thus controls interrupt 3867 * generation. It MUST be configured explicitly; the default 3868 * value mentioned in the datasheet is wrong: RX queue0 and 3869 * TX queue0 are NOT enabled by default. 3870 */ 3871 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3872 break; 3873 3874 default: 3875 panic("unknown mac type %d\n", sc->hw.mac.type); 3876 } 3877 } 3878 3879 static int 3880 igb_setup_intr(struct igb_softc *sc) 3881 { 3882 int i; 3883 3884 for (i = 0; i < sc->intr_cnt; ++i) { 3885 struct igb_intr_data *intr = &sc->intr_data[i]; 3886 int error; 3887 3888 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 3889 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 3890 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 3891 if (error) { 3892 device_printf(sc->dev, "can't setup %dth intr\n", i); 3893 igb_teardown_intr(sc, i); 3894 return error; 3895 } 3896 } 3897 return 0; 3898 } 3899 3900 static void 3901 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) 3902 { 3903 if (txr->sc->hw.mac.type == e1000_82575) { 3904 txr->tx_intr_vec = 0; /* unused */ 3905 switch (txr->me) { 3906 case 0: 3907 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3908 break; 3909 case 1: 3910 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3911 break; 3912 case 2: 3913 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 3914 break; 3915 case 3: 3916 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 3917 break; 3918 default: 3919 panic("unsupported # of TX ring, %d\n", txr->me); 3920 } 3921 } else { 3922 int intr_vec = *intr_vec0; 3923 3924 txr->tx_intr_vec = intr_vec % intr_vecmax; 3925 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 3926 3927 *intr_vec0 = intr_vec + 1; 3928 } 3929 } 3930 3931 static void 3932 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_vec0, int intr_vecmax) 3933 { 3934 if (rxr->sc->hw.mac.type == e1000_82575) { 3935 rxr->rx_intr_vec = 0; /* unused */ 3936 switch (rxr->me) { 3937 case 0: 3938 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 3939 break; 3940 case 1: 3941 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 3942 break; 3943 case 2: 3944 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 3945 break; 3946 case 3: 3947 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 3948 break; 3949 default: 3950 panic("unsupported # of RX ring, %d\n", rxr->me); 3951 } 3952 } else { 3953 int intr_vec = *intr_vec0; 3954 3955 rxr->rx_intr_vec = intr_vec % intr_vecmax; 3956 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 3957 3958 *intr_vec0 = intr_vec + 1; 3959 } 3960 } 3961 3962 static void 3963 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3964 { 3965 struct igb_softc *sc = ifp->if_softc; 3966 3967 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 3968 } 3969 3970 static void 3971 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3972 { 3973 struct igb_softc *sc = ifp->if_softc; 3974 3975 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 3976 } 3977 3978 static int 3979 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3980 { 3981 struct igb_softc *sc = ifp->if_softc; 3982 3983 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 3984 slz); 3985 } 3986 3987 #ifdef INVARIANTS 3988 3989 static void 3990 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3991 boolean_t serialized) 3992 { 3993 struct igb_softc *sc = ifp->if_softc; 3994 3995 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 3996 slz, serialized); 3997 } 3998 3999 #endif /* INVARIANTS */ 4000 4001 static void 4002 igb_set_intr_mask(struct igb_softc *sc) 4003 { 4004 int i; 4005 4006 sc->intr_mask = sc->sts_intr_mask; 4007 for (i = 0; i < sc->rx_ring_inuse; ++i) 4008 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4009 for (i = 0; i < sc->tx_ring_inuse; ++i) 4010 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4011 if (bootverbose) { 4012 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4013 sc->intr_mask); 4014 } 4015 } 4016 4017 static int 4018 igb_alloc_intr(struct igb_softc *sc) 4019 { 4020 struct igb_tx_ring *txr; 4021 struct igb_intr_data *intr; 4022 int i, intr_vec, intr_vecmax; 4023 u_int intr_flags; 4024 4025 igb_alloc_msix(sc); 4026 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 4027 igb_set_ring_inuse(sc, FALSE); 4028 goto done; 4029 } 4030 4031 /* 4032 * Reset some settings changed by igb_alloc_msix(). 4033 */ 4034 if (sc->rx_rmap_intr != NULL) { 4035 if_ringmap_free(sc->rx_rmap_intr); 4036 sc->rx_rmap_intr = NULL; 4037 } 4038 if (sc->tx_rmap_intr != NULL) { 4039 if_ringmap_free(sc->tx_rmap_intr); 4040 sc->tx_rmap_intr = NULL; 4041 } 4042 if (sc->intr_data != NULL) { 4043 kfree(sc->intr_data, M_DEVBUF); 4044 sc->intr_data = NULL; 4045 } 4046 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4047 txr = &sc->tx_rings[i]; 4048 txr->tx_intr_vec = 0; 4049 txr->tx_intr_mask = 0; 4050 txr->tx_intr_cpuid = -1; 4051 } 4052 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4053 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4054 4055 rxr->rx_intr_vec = 0; 4056 rxr->rx_intr_mask = 0; 4057 rxr->rx_txr = NULL; 4058 } 4059 4060 sc->intr_cnt = 1; 4061 sc->intr_data = kmalloc(sizeof(struct igb_intr_data), M_DEVBUF, 4062 M_WAITOK | M_ZERO); 4063 intr = &sc->intr_data[0]; 4064 4065 /* 4066 * Allocate MSI/legacy interrupt resource 4067 */ 4068 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4069 &intr->intr_rid, &intr_flags); 4070 4071 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4072 int unshared; 4073 4074 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4075 if (!unshared) { 4076 sc->flags |= IGB_FLAG_SHARED_INTR; 4077 if (bootverbose) 4078 device_printf(sc->dev, "IRQ shared\n"); 4079 } else { 4080 intr_flags &= ~RF_SHAREABLE; 4081 if (bootverbose) 4082 device_printf(sc->dev, "IRQ unshared\n"); 4083 } 4084 } 4085 4086 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4087 &intr->intr_rid, intr_flags); 4088 if (intr->intr_res == NULL) { 4089 device_printf(sc->dev, "Unable to allocate bus resource: " 4090 "interrupt\n"); 4091 return ENXIO; 4092 } 4093 4094 intr->intr_serialize = &sc->main_serialize; 4095 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 4096 intr->intr_func = (sc->flags & IGB_FLAG_SHARED_INTR) ? 4097 igb_intr_shared : igb_intr; 4098 intr->intr_funcarg = sc; 4099 intr->intr_rate = IGB_INTR_RATE; 4100 intr->intr_use = IGB_INTR_USE_RXTX; 4101 4102 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 4103 4104 /* 4105 * Setup MSI/legacy interrupt mask 4106 */ 4107 switch (sc->hw.mac.type) { 4108 case e1000_82575: 4109 intr_vecmax = IGB_MAX_TXRXINT_82575; 4110 break; 4111 4112 case e1000_82576: 4113 intr_vecmax = IGB_MAX_TXRXINT_82576; 4114 break; 4115 4116 case e1000_82580: 4117 intr_vecmax = IGB_MAX_TXRXINT_82580; 4118 break; 4119 4120 case e1000_i350: 4121 intr_vecmax = IGB_MAX_TXRXINT_I350; 4122 break; 4123 4124 case e1000_i354: 4125 intr_vecmax = IGB_MAX_TXRXINT_I354; 4126 break; 4127 4128 case e1000_i210: 4129 intr_vecmax = IGB_MAX_TXRXINT_I210; 4130 break; 4131 4132 case e1000_i211: 4133 intr_vecmax = IGB_MAX_TXRXINT_I211; 4134 break; 4135 4136 default: 4137 intr_vecmax = IGB_MIN_TXRXINT; 4138 break; 4139 } 4140 intr_vec = 0; 4141 for (i = 0; i < sc->tx_ring_cnt; ++i) 4142 igb_set_txintr_mask(&sc->tx_rings[i], &intr_vec, intr_vecmax); 4143 for (i = 0; i < sc->rx_ring_cnt; ++i) 4144 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_vec, intr_vecmax); 4145 sc->sts_intr_mask = E1000_EICR_OTHER; 4146 4147 igb_set_ring_inuse(sc, FALSE); 4148 KKASSERT(sc->rx_ring_inuse <= IGB_MIN_RING_RSS); 4149 if (sc->rx_ring_inuse == IGB_MIN_RING_RSS) { 4150 /* 4151 * Allocate RX ring map for RSS setup. 4152 */ 4153 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, 4154 IGB_MIN_RING_RSS, IGB_MIN_RING_RSS); 4155 KASSERT(if_ringmap_count(sc->rx_rmap_intr) == 4156 sc->rx_ring_inuse, ("RX ring inuse mismatch")); 4157 } 4158 done: 4159 igb_set_intr_mask(sc); 4160 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4161 txr = &sc->tx_rings[i]; 4162 if (txr->tx_intr_cpuid < 0) 4163 txr->tx_intr_cpuid = 0; 4164 } 4165 return 0; 4166 } 4167 4168 static void 4169 igb_free_intr(struct igb_softc *sc) 4170 { 4171 if (sc->intr_data == NULL) 4172 return; 4173 4174 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4175 struct igb_intr_data *intr = &sc->intr_data[0]; 4176 4177 KKASSERT(sc->intr_cnt == 1); 4178 if (intr->intr_res != NULL) { 4179 bus_release_resource(sc->dev, SYS_RES_IRQ, 4180 intr->intr_rid, intr->intr_res); 4181 } 4182 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4183 pci_release_msi(sc->dev); 4184 4185 kfree(sc->intr_data, M_DEVBUF); 4186 } else { 4187 igb_free_msix(sc, TRUE); 4188 } 4189 } 4190 4191 static void 4192 igb_teardown_intr(struct igb_softc *sc, int intr_cnt) 4193 { 4194 int i; 4195 4196 if (sc->intr_data == NULL) 4197 return; 4198 4199 for (i = 0; i < intr_cnt; ++i) { 4200 struct igb_intr_data *intr = &sc->intr_data[i]; 4201 4202 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 4203 } 4204 } 4205 4206 static void 4207 igb_alloc_msix(struct igb_softc *sc) 4208 { 4209 int msix_enable, msix_cnt, msix_ring, alloc_cnt; 4210 int i, x, error; 4211 int ring_cnt, ring_cntmax; 4212 struct igb_intr_data *intr; 4213 boolean_t setup = FALSE; 4214 4215 /* 4216 * Don't enable MSI-X on 82575, see: 4217 * 82575 specification update errata #25 4218 */ 4219 if (sc->hw.mac.type == e1000_82575) 4220 return; 4221 4222 /* Don't enable MSI-X on VF */ 4223 if (sc->vf_ifp) 4224 return; 4225 4226 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4227 igb_msix_enable); 4228 if (!msix_enable) 4229 return; 4230 4231 msix_cnt = pci_msix_count(sc->dev); 4232 #ifdef IGB_MSIX_DEBUG 4233 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4234 #endif 4235 if (msix_cnt <= 1) { 4236 /* One MSI-X model does not make sense. */ 4237 return; 4238 } 4239 if (bootverbose) 4240 device_printf(sc->dev, "MSI-X count %d\n", msix_cnt); 4241 msix_ring = msix_cnt - 1; /* -1 for status */ 4242 4243 /* 4244 * Configure # of RX/TX rings usable by MSI-X. 4245 */ 4246 igb_get_rxring_cnt(sc, &ring_cnt, &ring_cntmax); 4247 if (ring_cntmax > msix_ring) 4248 ring_cntmax = msix_ring; 4249 sc->rx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4250 4251 igb_get_txring_cnt(sc, &ring_cnt, &ring_cntmax); 4252 if (ring_cntmax > msix_ring) 4253 ring_cntmax = msix_ring; 4254 sc->tx_rmap_intr = if_ringmap_alloc(sc->dev, ring_cnt, ring_cntmax); 4255 4256 if_ringmap_match(sc->dev, sc->rx_rmap_intr, sc->tx_rmap_intr); 4257 sc->rx_ring_msix = if_ringmap_count(sc->rx_rmap_intr); 4258 KASSERT(sc->rx_ring_msix <= sc->rx_ring_cnt, 4259 ("total RX ring count %d, MSI-X RX ring count %d", 4260 sc->rx_ring_cnt, sc->rx_ring_msix)); 4261 sc->tx_ring_msix = if_ringmap_count(sc->tx_rmap_intr); 4262 KASSERT(sc->tx_ring_msix <= sc->tx_ring_cnt, 4263 ("total TX ring count %d, MSI-X TX ring count %d", 4264 sc->tx_ring_cnt, sc->tx_ring_msix)); 4265 4266 /* 4267 * Aggregate TX/RX MSI-X 4268 */ 4269 ring_cntmax = sc->rx_ring_msix; 4270 if (ring_cntmax < sc->tx_ring_msix) 4271 ring_cntmax = sc->tx_ring_msix; 4272 KASSERT(ring_cntmax <= msix_ring, 4273 ("invalid ring count max %d, MSI-X count for rings %d", 4274 ring_cntmax, msix_ring)); 4275 4276 alloc_cnt = ring_cntmax + 1; /* +1 for status */ 4277 if (bootverbose) { 4278 device_printf(sc->dev, "MSI-X alloc %d, " 4279 "RX ring %d, TX ring %d\n", alloc_cnt, 4280 sc->rx_ring_msix, sc->tx_ring_msix); 4281 } 4282 4283 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4284 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4285 &sc->msix_mem_rid, RF_ACTIVE); 4286 if (sc->msix_mem_res == NULL) { 4287 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR_ALT); 4288 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4289 &sc->msix_mem_rid, RF_ACTIVE); 4290 if (sc->msix_mem_res == NULL) { 4291 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4292 return; 4293 } 4294 } 4295 4296 sc->intr_cnt = alloc_cnt; 4297 sc->intr_data = kmalloc(sizeof(struct igb_intr_data) * sc->intr_cnt, 4298 M_DEVBUF, M_WAITOK | M_ZERO); 4299 for (x = 0; x < sc->intr_cnt; ++x) { 4300 intr = &sc->intr_data[x]; 4301 intr->intr_rid = -1; 4302 intr->intr_rate = IGB_INTR_RATE; 4303 } 4304 4305 x = 0; 4306 for (i = 0; i < sc->rx_ring_msix; ++i) { 4307 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4308 struct igb_tx_ring *txr = NULL; 4309 int cpuid, j; 4310 4311 KKASSERT(x < sc->intr_cnt); 4312 rxr->rx_intr_vec = x; 4313 rxr->rx_intr_mask = 1 << rxr->rx_intr_vec; 4314 4315 cpuid = if_ringmap_cpumap(sc->rx_rmap_intr, i); 4316 4317 /* 4318 * Try finding TX ring to piggyback. 4319 */ 4320 for (j = 0; j < sc->tx_ring_msix; ++j) { 4321 if (cpuid == 4322 if_ringmap_cpumap(sc->tx_rmap_intr, j)) { 4323 txr = &sc->tx_rings[j]; 4324 KKASSERT(txr->tx_intr_cpuid < 0); 4325 break; 4326 } 4327 } 4328 rxr->rx_txr = txr; 4329 4330 intr = &sc->intr_data[x++]; 4331 intr->intr_serialize = &rxr->rx_serialize; 4332 intr->intr_cpuid = cpuid; 4333 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4334 intr->intr_funcarg = rxr; 4335 if (txr != NULL) { 4336 intr->intr_func = igb_msix_rxtx; 4337 intr->intr_use = IGB_INTR_USE_RXTX; 4338 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4339 "%s rx%dtx%d", device_get_nameunit(sc->dev), 4340 i, txr->me); 4341 4342 txr->tx_intr_vec = rxr->rx_intr_vec; 4343 txr->tx_intr_mask = rxr->rx_intr_mask; 4344 txr->tx_intr_cpuid = intr->intr_cpuid; 4345 } else { 4346 intr->intr_func = igb_msix_rx; 4347 intr->intr_rate = IGB_MSIX_RX_RATE; 4348 intr->intr_use = IGB_INTR_USE_RX; 4349 4350 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4351 "%s rx%d", device_get_nameunit(sc->dev), i); 4352 } 4353 intr->intr_desc = intr->intr_desc0; 4354 } 4355 4356 for (i = 0; i < sc->tx_ring_msix; ++i) { 4357 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4358 4359 if (txr->tx_intr_cpuid >= 0) { 4360 /* Piggybacked by RX ring. */ 4361 continue; 4362 } 4363 4364 KKASSERT(x < sc->intr_cnt); 4365 txr->tx_intr_vec = x; 4366 txr->tx_intr_mask = 1 << txr->tx_intr_vec; 4367 4368 intr = &sc->intr_data[x++]; 4369 intr->intr_serialize = &txr->tx_serialize; 4370 intr->intr_func = igb_msix_tx; 4371 intr->intr_funcarg = txr; 4372 intr->intr_rate = IGB_MSIX_TX_RATE; 4373 intr->intr_use = IGB_INTR_USE_TX; 4374 4375 intr->intr_cpuid = if_ringmap_cpumap(sc->tx_rmap_intr, i); 4376 KKASSERT(intr->intr_cpuid < netisr_ncpus); 4377 txr->tx_intr_cpuid = intr->intr_cpuid; 4378 4379 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4380 device_get_nameunit(sc->dev), i); 4381 intr->intr_desc = intr->intr_desc0; 4382 } 4383 4384 /* 4385 * Link status 4386 */ 4387 KKASSERT(x < sc->intr_cnt); 4388 sc->sts_msix_vec = x; 4389 sc->sts_intr_mask = 1 << sc->sts_msix_vec; 4390 4391 intr = &sc->intr_data[x++]; 4392 intr->intr_serialize = &sc->main_serialize; 4393 intr->intr_func = igb_msix_status; 4394 intr->intr_funcarg = sc; 4395 intr->intr_cpuid = 0; 4396 intr->intr_use = IGB_INTR_USE_STATUS; 4397 4398 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4399 device_get_nameunit(sc->dev)); 4400 intr->intr_desc = intr->intr_desc0; 4401 4402 KKASSERT(x == sc->intr_cnt); 4403 4404 error = pci_setup_msix(sc->dev); 4405 if (error) { 4406 device_printf(sc->dev, "Setup MSI-X failed\n"); 4407 goto back; 4408 } 4409 setup = TRUE; 4410 4411 for (i = 0; i < sc->intr_cnt; ++i) { 4412 intr = &sc->intr_data[i]; 4413 4414 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4415 intr->intr_cpuid); 4416 if (error) { 4417 device_printf(sc->dev, 4418 "Unable to allocate MSI-X %d on cpu%d\n", i, 4419 intr->intr_cpuid); 4420 goto back; 4421 } 4422 4423 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4424 &intr->intr_rid, RF_ACTIVE); 4425 if (intr->intr_res == NULL) { 4426 device_printf(sc->dev, 4427 "Unable to allocate MSI-X %d resource\n", i); 4428 error = ENOMEM; 4429 goto back; 4430 } 4431 } 4432 4433 pci_enable_msix(sc->dev); 4434 sc->intr_type = PCI_INTR_TYPE_MSIX; 4435 back: 4436 if (error) 4437 igb_free_msix(sc, setup); 4438 } 4439 4440 static void 4441 igb_free_msix(struct igb_softc *sc, boolean_t setup) 4442 { 4443 int i; 4444 4445 KKASSERT(sc->intr_cnt > 1); 4446 4447 for (i = 0; i < sc->intr_cnt; ++i) { 4448 struct igb_intr_data *intr = &sc->intr_data[i]; 4449 4450 if (intr->intr_res != NULL) { 4451 bus_release_resource(sc->dev, SYS_RES_IRQ, 4452 intr->intr_rid, intr->intr_res); 4453 } 4454 if (intr->intr_rid >= 0) 4455 pci_release_msix_vector(sc->dev, intr->intr_rid); 4456 } 4457 if (setup) 4458 pci_teardown_msix(sc->dev); 4459 4460 sc->intr_cnt = 0; 4461 kfree(sc->intr_data, M_DEVBUF); 4462 sc->intr_data = NULL; 4463 } 4464 4465 static void 4466 igb_msix_rx(void *arg) 4467 { 4468 struct igb_rx_ring *rxr = arg; 4469 4470 ASSERT_SERIALIZED(&rxr->rx_serialize); 4471 igb_rxeof(rxr, -1); 4472 4473 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4474 } 4475 4476 static void 4477 igb_msix_tx(void *arg) 4478 { 4479 struct igb_tx_ring *txr = arg; 4480 4481 ASSERT_SERIALIZED(&txr->tx_serialize); 4482 4483 igb_txeof(txr, *(txr->tx_hdr)); 4484 if (!ifsq_is_empty(txr->ifsq)) 4485 ifsq_devstart(txr->ifsq); 4486 4487 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4488 } 4489 4490 static void 4491 igb_msix_status(void *arg) 4492 { 4493 struct igb_softc *sc = arg; 4494 uint32_t icr; 4495 4496 ASSERT_SERIALIZED(&sc->main_serialize); 4497 4498 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4499 if (icr & E1000_ICR_LSC) { 4500 sc->hw.mac.get_link_status = 1; 4501 igb_update_link_status(sc); 4502 } 4503 4504 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4505 } 4506 4507 static void 4508 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4509 { 4510 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4511 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4512 if (bootverbose) { 4513 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4514 sc->rx_ring_inuse, sc->rx_ring_cnt, 4515 sc->tx_ring_inuse, sc->tx_ring_cnt); 4516 } 4517 } 4518 4519 static int 4520 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4521 { 4522 if (!IGB_ENABLE_HWRSS(sc)) 4523 return 1; 4524 4525 if (polling) 4526 return sc->rx_ring_cnt; 4527 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4528 return IGB_MIN_RING_RSS; 4529 else 4530 return sc->rx_ring_msix; 4531 } 4532 4533 static int 4534 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4535 { 4536 if (!IGB_ENABLE_HWTSS(sc)) 4537 return 1; 4538 4539 if (polling) 4540 return sc->tx_ring_cnt; 4541 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4542 return IGB_MIN_RING; 4543 else 4544 return sc->tx_ring_msix; 4545 } 4546 4547 static int 4548 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4549 { 4550 int hoff, iphlen, thoff; 4551 struct mbuf *m; 4552 4553 m = *mp; 4554 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4555 4556 iphlen = m->m_pkthdr.csum_iphlen; 4557 thoff = m->m_pkthdr.csum_thlen; 4558 hoff = m->m_pkthdr.csum_lhlen; 4559 4560 KASSERT(iphlen > 0, ("invalid ip hlen")); 4561 KASSERT(thoff > 0, ("invalid tcp hlen")); 4562 KASSERT(hoff > 0, ("invalid ether hlen")); 4563 4564 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4565 m = m_pullup(m, hoff + iphlen + thoff); 4566 if (m == NULL) { 4567 *mp = NULL; 4568 return ENOBUFS; 4569 } 4570 *mp = m; 4571 } 4572 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4573 struct ip *ip; 4574 4575 ip = mtodoff(m, struct ip *, hoff); 4576 ip->ip_len = 0; 4577 } 4578 4579 return 0; 4580 } 4581 4582 static void 4583 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4584 { 4585 struct e1000_adv_tx_context_desc *TXD; 4586 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4587 int hoff, ctxd, iphlen, thoff; 4588 4589 iphlen = m->m_pkthdr.csum_iphlen; 4590 thoff = m->m_pkthdr.csum_thlen; 4591 hoff = m->m_pkthdr.csum_lhlen; 4592 4593 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4594 4595 ctxd = txr->next_avail_desc; 4596 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4597 4598 if (m->m_flags & M_VLANTAG) { 4599 uint16_t vlantag; 4600 4601 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4602 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4603 } 4604 4605 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4606 vlan_macip_lens |= iphlen; 4607 4608 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4609 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4610 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4611 4612 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4613 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4614 4615 /* 4616 * 82575 needs the TX context index added; the queue 4617 * index is used as TX context index here. 4618 */ 4619 if (txr->sc->hw.mac.type == e1000_82575) 4620 mss_l4len_idx |= txr->me << 4; 4621 4622 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4623 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4624 TXD->seqnum_seed = htole32(0); 4625 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4626 4627 /* We've consumed the first desc, adjust counters */ 4628 if (++ctxd == txr->num_tx_desc) 4629 ctxd = 0; 4630 txr->next_avail_desc = ctxd; 4631 --txr->tx_avail; 4632 4633 *hlen = hoff + iphlen + thoff; 4634 } 4635 4636 static void 4637 igb_setup_serialize(struct igb_softc *sc) 4638 { 4639 int i = 0, j; 4640 4641 /* Main + RX + TX */ 4642 sc->serialize_cnt = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 4643 sc->serializes = 4644 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4645 M_DEVBUF, M_WAITOK | M_ZERO); 4646 4647 /* 4648 * Setup serializes 4649 * 4650 * NOTE: Order is critical 4651 */ 4652 4653 KKASSERT(i < sc->serialize_cnt); 4654 sc->serializes[i++] = &sc->main_serialize; 4655 4656 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4657 KKASSERT(i < sc->serialize_cnt); 4658 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4659 } 4660 4661 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4662 KKASSERT(i < sc->serialize_cnt); 4663 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4664 } 4665 4666 KKASSERT(i == sc->serialize_cnt); 4667 } 4668 4669 static void 4670 igb_msix_rxtx(void *arg) 4671 { 4672 struct igb_rx_ring *rxr = arg; 4673 struct igb_tx_ring *txr; 4674 int hdr; 4675 4676 ASSERT_SERIALIZED(&rxr->rx_serialize); 4677 4678 igb_rxeof(rxr, -1); 4679 4680 /* 4681 * NOTE: 4682 * Since next_to_clean is only changed by igb_txeof(), 4683 * which is called only in interrupt handler, the 4684 * check w/o holding tx serializer is MPSAFE. 4685 */ 4686 txr = rxr->rx_txr; 4687 hdr = *(txr->tx_hdr); 4688 if (hdr != txr->next_to_clean) { 4689 lwkt_serialize_enter(&txr->tx_serialize); 4690 igb_txeof(txr, hdr); 4691 if (!ifsq_is_empty(txr->ifsq)) 4692 ifsq_devstart(txr->ifsq); 4693 lwkt_serialize_exit(&txr->tx_serialize); 4694 } 4695 4696 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4697 } 4698 4699 static void 4700 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4701 { 4702 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4703 sc->timer_cpuid = 0; /* XXX fixed */ 4704 else 4705 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4706 } 4707 4708 static void 4709 igb_init_dmac(struct igb_softc *sc, uint32_t pba) 4710 { 4711 struct e1000_hw *hw = &sc->hw; 4712 uint32_t reg; 4713 4714 if (hw->mac.type == e1000_i211) 4715 return; 4716 4717 if (hw->mac.type > e1000_82580) { 4718 uint32_t dmac; 4719 uint16_t hwm; 4720 4721 if (sc->dma_coalesce == 0) { /* Disabling it */ 4722 reg = ~E1000_DMACR_DMAC_EN; 4723 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4724 return; 4725 } else { 4726 if_printf(&sc->arpcom.ac_if, 4727 "DMA Coalescing enabled\n"); 4728 } 4729 4730 /* Set starting threshold */ 4731 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 4732 4733 hwm = 64 * pba - sc->max_frame_size / 16; 4734 if (hwm < 64 * (pba - 6)) 4735 hwm = 64 * (pba - 6); 4736 reg = E1000_READ_REG(hw, E1000_FCRTC); 4737 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 4738 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 4739 & E1000_FCRTC_RTH_COAL_MASK); 4740 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 4741 4742 dmac = pba - sc->max_frame_size / 512; 4743 if (dmac < pba - 10) 4744 dmac = pba - 10; 4745 reg = E1000_READ_REG(hw, E1000_DMACR); 4746 reg &= ~E1000_DMACR_DMACTHR_MASK; 4747 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT) 4748 & E1000_DMACR_DMACTHR_MASK); 4749 4750 /* transition to L0x or L1 if available..*/ 4751 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 4752 4753 /* 4754 * Check if status is 2.5Gb backplane connection 4755 * before configuration of watchdog timer, which 4756 * is in msec values in 12.8usec intervals watchdog 4757 * timer = msec values in 32usec intervals for non 4758 * 2.5Gb connection. 4759 */ 4760 if (hw->mac.type == e1000_i354) { 4761 int status = E1000_READ_REG(hw, E1000_STATUS); 4762 4763 if ((status & E1000_STATUS_2P5_SKU) && 4764 !(status & E1000_STATUS_2P5_SKU_OVER)) 4765 reg |= ((sc->dma_coalesce * 5) >> 6); 4766 else 4767 reg |= (sc->dma_coalesce >> 5); 4768 } else { 4769 reg |= (sc->dma_coalesce >> 5); 4770 } 4771 4772 E1000_WRITE_REG(hw, E1000_DMACR, reg); 4773 4774 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 4775 4776 /* Set the interval before transition */ 4777 reg = E1000_READ_REG(hw, E1000_DMCTLX); 4778 if (hw->mac.type == e1000_i350) 4779 reg |= IGB_DMCTLX_DCFLUSH_DIS; 4780 /* 4781 * In 2.5Gb connection, TTLX unit is 0.4 usec, which 4782 * is 0x4*2 = 0xA. But delay is still 4 usec. 4783 */ 4784 if (hw->mac.type == e1000_i354) { 4785 int status = E1000_READ_REG(hw, E1000_STATUS); 4786 4787 if ((status & E1000_STATUS_2P5_SKU) && 4788 !(status & E1000_STATUS_2P5_SKU_OVER)) 4789 reg |= 0xA; 4790 else 4791 reg |= 0x4; 4792 } else { 4793 reg |= 0x4; 4794 } 4795 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 4796 4797 /* Free space in tx packet buffer to wake from DMA coal */ 4798 E1000_WRITE_REG(hw, E1000_DMCTXTH, 4799 (IGB_TXPBSIZE - (2 * sc->max_frame_size)) >> 6); 4800 4801 /* Make low power state decision controlled by DMA coal */ 4802 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4803 reg &= ~E1000_PCIEMISC_LX_DECISION; 4804 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 4805 } else if (hw->mac.type == e1000_82580) { 4806 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 4807 E1000_WRITE_REG(hw, E1000_PCIEMISC, 4808 reg & ~E1000_PCIEMISC_LX_DECISION); 4809 E1000_WRITE_REG(hw, E1000_DMACR, 0); 4810 } 4811 } 4812