1 /* 2 * Copyright (c) 2001-2011, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/tcp.h> 68 #include <netinet/udp.h> 69 70 #include <bus/pci/pcivar.h> 71 #include <bus/pci/pcireg.h> 72 73 #include <dev/netif/ig_hal/e1000_api.h> 74 #include <dev/netif/ig_hal/e1000_82575.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 IGB_DEVICE(I210_COPPER), 125 IGB_DEVICE(I210_COPPER_IT), 126 IGB_DEVICE(I210_COPPER_OEM1), 127 IGB_DEVICE(I210_FIBER), 128 IGB_DEVICE(I210_SERDES), 129 IGB_DEVICE(I210_SGMII), 130 IGB_DEVICE(I211_COPPER), 131 132 /* required last entry */ 133 IGB_DEVICE_NULL 134 }; 135 136 static int igb_probe(device_t); 137 static int igb_attach(device_t); 138 static int igb_detach(device_t); 139 static int igb_shutdown(device_t); 140 static int igb_suspend(device_t); 141 static int igb_resume(device_t); 142 143 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 144 static void igb_setup_ifp(struct igb_softc *); 145 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 146 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 147 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 148 static void igb_add_sysctl(struct igb_softc *); 149 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 150 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS); 151 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 152 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 153 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 154 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 155 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t); 156 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t); 157 static void igb_set_timer_cpuid(struct igb_softc *, boolean_t); 158 #ifdef IFPOLL_ENABLE 159 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 160 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 161 #endif 162 163 static void igb_vf_init_stats(struct igb_softc *); 164 static void igb_reset(struct igb_softc *); 165 static void igb_update_stats_counters(struct igb_softc *); 166 static void igb_update_vf_stats_counters(struct igb_softc *); 167 static void igb_update_link_status(struct igb_softc *); 168 static void igb_init_tx_unit(struct igb_softc *); 169 static void igb_init_rx_unit(struct igb_softc *); 170 171 static void igb_set_vlan(struct igb_softc *); 172 static void igb_set_multi(struct igb_softc *); 173 static void igb_set_promisc(struct igb_softc *); 174 static void igb_disable_promisc(struct igb_softc *); 175 176 static int igb_alloc_rings(struct igb_softc *); 177 static void igb_free_rings(struct igb_softc *); 178 static int igb_create_tx_ring(struct igb_tx_ring *); 179 static int igb_create_rx_ring(struct igb_rx_ring *); 180 static void igb_free_tx_ring(struct igb_tx_ring *); 181 static void igb_free_rx_ring(struct igb_rx_ring *); 182 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 183 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 184 static void igb_init_tx_ring(struct igb_tx_ring *); 185 static int igb_init_rx_ring(struct igb_rx_ring *); 186 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 187 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 188 static void igb_rx_refresh(struct igb_rx_ring *, int); 189 static void igb_setup_serializer(struct igb_softc *); 190 191 static void igb_stop(struct igb_softc *); 192 static void igb_init(void *); 193 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 194 static void igb_media_status(struct ifnet *, struct ifmediareq *); 195 static int igb_media_change(struct ifnet *); 196 static void igb_timer(void *); 197 static void igb_watchdog(struct ifaltq_subque *); 198 static void igb_start(struct ifnet *, struct ifaltq_subque *); 199 #ifdef IFPOLL_ENABLE 200 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 201 static void igb_npoll_rx(struct ifnet *, void *, int); 202 static void igb_npoll_tx(struct ifnet *, void *, int); 203 static void igb_npoll_status(struct ifnet *); 204 #endif 205 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 206 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 207 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 208 #ifdef INVARIANTS 209 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 210 boolean_t); 211 #endif 212 213 static void igb_intr(void *); 214 static void igb_intr_shared(void *); 215 static void igb_rxeof(struct igb_rx_ring *, int); 216 static void igb_txeof(struct igb_tx_ring *); 217 static void igb_set_eitr(struct igb_softc *, int, int); 218 static void igb_enable_intr(struct igb_softc *); 219 static void igb_disable_intr(struct igb_softc *); 220 static void igb_init_unshared_intr(struct igb_softc *); 221 static void igb_init_intr(struct igb_softc *); 222 static int igb_setup_intr(struct igb_softc *); 223 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 224 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 225 static void igb_set_intr_mask(struct igb_softc *); 226 static int igb_alloc_intr(struct igb_softc *); 227 static void igb_free_intr(struct igb_softc *); 228 static void igb_teardown_intr(struct igb_softc *); 229 static void igb_msix_try_alloc(struct igb_softc *); 230 static void igb_msix_rx_conf(struct igb_softc *, int, int *, int); 231 static void igb_msix_tx_conf(struct igb_softc *, int, int *, int); 232 static void igb_msix_free(struct igb_softc *, boolean_t); 233 static int igb_msix_setup(struct igb_softc *); 234 static void igb_msix_teardown(struct igb_softc *, int); 235 static void igb_msix_rx(void *); 236 static void igb_msix_tx(void *); 237 static void igb_msix_status(void *); 238 static void igb_msix_rxtx(void *); 239 240 /* Management and WOL Support */ 241 static void igb_get_mgmt(struct igb_softc *); 242 static void igb_rel_mgmt(struct igb_softc *); 243 static void igb_get_hw_control(struct igb_softc *); 244 static void igb_rel_hw_control(struct igb_softc *); 245 static void igb_enable_wol(device_t); 246 247 static device_method_t igb_methods[] = { 248 /* Device interface */ 249 DEVMETHOD(device_probe, igb_probe), 250 DEVMETHOD(device_attach, igb_attach), 251 DEVMETHOD(device_detach, igb_detach), 252 DEVMETHOD(device_shutdown, igb_shutdown), 253 DEVMETHOD(device_suspend, igb_suspend), 254 DEVMETHOD(device_resume, igb_resume), 255 DEVMETHOD_END 256 }; 257 258 static driver_t igb_driver = { 259 "igb", 260 igb_methods, 261 sizeof(struct igb_softc), 262 }; 263 264 static devclass_t igb_devclass; 265 266 DECLARE_DUMMY_MODULE(if_igb); 267 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 268 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 269 270 static int igb_rxd = IGB_DEFAULT_RXD; 271 static int igb_txd = IGB_DEFAULT_TXD; 272 static int igb_rxr = 0; 273 static int igb_txr = 0; 274 static int igb_msi_enable = 1; 275 static int igb_msix_enable = 1; 276 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 277 static int igb_fc_setting = e1000_fc_full; 278 279 /* 280 * DMA Coalescing, only for i350 - default to off, 281 * this feature is for power savings 282 */ 283 static int igb_dma_coalesce = 0; 284 285 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 286 TUNABLE_INT("hw.igb.txd", &igb_txd); 287 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 288 TUNABLE_INT("hw.igb.txr", &igb_txr); 289 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 290 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 291 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); 292 293 /* i350 specific */ 294 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 295 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 296 297 static __inline void 298 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 299 { 300 /* Ignore Checksum bit is set */ 301 if (staterr & E1000_RXD_STAT_IXSM) 302 return; 303 304 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 305 E1000_RXD_STAT_IPCS) 306 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 307 308 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 309 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 310 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 311 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 312 mp->m_pkthdr.csum_data = htons(0xffff); 313 } 314 } 315 } 316 317 static __inline struct pktinfo * 318 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 319 uint32_t hash, uint32_t hashtype, uint32_t staterr) 320 { 321 switch (hashtype) { 322 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 323 pi->pi_netisr = NETISR_IP; 324 pi->pi_flags = 0; 325 pi->pi_l3proto = IPPROTO_TCP; 326 break; 327 328 case E1000_RXDADV_RSSTYPE_IPV4: 329 if (staterr & E1000_RXD_STAT_IXSM) 330 return NULL; 331 332 if ((staterr & 333 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 334 E1000_RXD_STAT_TCPCS) { 335 pi->pi_netisr = NETISR_IP; 336 pi->pi_flags = 0; 337 pi->pi_l3proto = IPPROTO_UDP; 338 break; 339 } 340 /* FALL THROUGH */ 341 default: 342 return NULL; 343 } 344 345 m->m_flags |= M_HASH; 346 m->m_pkthdr.hash = toeplitz_hash(hash); 347 return pi; 348 } 349 350 static int 351 igb_probe(device_t dev) 352 { 353 const struct igb_device *d; 354 uint16_t vid, did; 355 356 vid = pci_get_vendor(dev); 357 did = pci_get_device(dev); 358 359 for (d = igb_devices; d->desc != NULL; ++d) { 360 if (vid == d->vid && did == d->did) { 361 device_set_desc(dev, d->desc); 362 return 0; 363 } 364 } 365 return ENXIO; 366 } 367 368 static int 369 igb_attach(device_t dev) 370 { 371 struct igb_softc *sc = device_get_softc(dev); 372 uint16_t eeprom_data; 373 int error = 0, ring_max; 374 #ifdef IFPOLL_ENABLE 375 int offset, offset_def; 376 #endif 377 378 #ifdef notyet 379 /* SYSCTL stuff */ 380 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 382 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 383 igb_sysctl_nvm_info, "I", "NVM Information"); 384 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 385 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 386 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, 387 adapter, 0, igb_set_flowcntl, "I", "Flow Control"); 388 #endif 389 390 callout_init_mp(&sc->timer); 391 lwkt_serialize_init(&sc->main_serialize); 392 393 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 394 device_get_unit(dev)); 395 sc->dev = sc->osdep.dev = dev; 396 397 /* 398 * Determine hardware and mac type 399 */ 400 sc->hw.vendor_id = pci_get_vendor(dev); 401 sc->hw.device_id = pci_get_device(dev); 402 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 403 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 404 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 405 406 if (e1000_set_mac_type(&sc->hw)) 407 return ENXIO; 408 409 /* Are we a VF device? */ 410 if (sc->hw.mac.type == e1000_vfadapt || 411 sc->hw.mac.type == e1000_vfadapt_i350) 412 sc->vf_ifp = 1; 413 else 414 sc->vf_ifp = 0; 415 416 /* 417 * Configure total supported RX/TX ring count 418 */ 419 switch (sc->hw.mac.type) { 420 case e1000_82575: 421 ring_max = IGB_MAX_RING_82575; 422 break; 423 424 case e1000_82576: 425 ring_max = IGB_MAX_RING_82576; 426 break; 427 428 case e1000_82580: 429 ring_max = IGB_MAX_RING_82580; 430 break; 431 432 case e1000_i350: 433 ring_max = IGB_MAX_RING_I350; 434 break; 435 436 case e1000_i210: 437 ring_max = IGB_MAX_RING_I210; 438 break; 439 440 case e1000_i211: 441 ring_max = IGB_MAX_RING_I211; 442 break; 443 444 default: 445 ring_max = IGB_MIN_RING; 446 break; 447 } 448 449 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr); 450 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max); 451 #ifdef IGB_RSS_DEBUG 452 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt); 453 #endif 454 sc->rx_ring_inuse = sc->rx_ring_cnt; 455 456 sc->tx_ring_cnt = device_getenv_int(dev, "txr", igb_txr); 457 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 458 #ifdef IGB_TSS_DEBUG 459 sc->tx_ring_cnt = device_getenv_int(dev, "txr_debug", sc->tx_ring_cnt); 460 #endif 461 sc->tx_ring_inuse = sc->tx_ring_cnt; 462 463 /* Enable bus mastering */ 464 pci_enable_busmaster(dev); 465 466 /* 467 * Allocate IO memory 468 */ 469 sc->mem_rid = PCIR_BAR(0); 470 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 471 RF_ACTIVE); 472 if (sc->mem_res == NULL) { 473 device_printf(dev, "Unable to allocate bus resource: memory\n"); 474 error = ENXIO; 475 goto failed; 476 } 477 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 478 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 479 480 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 481 482 /* Save PCI command register for Shared Code */ 483 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 484 sc->hw.back = &sc->osdep; 485 486 /* Do Shared Code initialization */ 487 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 488 device_printf(dev, "Setup of Shared code failed\n"); 489 error = ENXIO; 490 goto failed; 491 } 492 493 e1000_get_bus_info(&sc->hw); 494 495 sc->hw.mac.autoneg = DO_AUTO_NEG; 496 sc->hw.phy.autoneg_wait_to_complete = FALSE; 497 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 498 499 /* Copper options */ 500 if (sc->hw.phy.media_type == e1000_media_type_copper) { 501 sc->hw.phy.mdix = AUTO_ALL_MODES; 502 sc->hw.phy.disable_polarity_correction = FALSE; 503 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 504 } 505 506 /* Set the frame limits assuming standard ethernet sized frames. */ 507 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 508 509 /* Allocate RX/TX rings */ 510 error = igb_alloc_rings(sc); 511 if (error) 512 goto failed; 513 514 #ifdef IFPOLL_ENABLE 515 /* 516 * NPOLLING RX CPU offset 517 */ 518 if (sc->rx_ring_cnt == ncpus2) { 519 offset = 0; 520 } else { 521 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 522 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 523 if (offset >= ncpus2 || 524 offset % sc->rx_ring_cnt != 0) { 525 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 526 offset, offset_def); 527 offset = offset_def; 528 } 529 } 530 sc->rx_npoll_off = offset; 531 532 /* 533 * NPOLLING TX CPU offset 534 */ 535 if (sc->tx_ring_cnt == ncpus2) { 536 offset = 0; 537 } else { 538 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 539 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 540 if (offset >= ncpus2 || 541 offset % sc->tx_ring_cnt != 0) { 542 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 543 offset, offset_def); 544 offset = offset_def; 545 } 546 } 547 sc->tx_npoll_off = offset; 548 #endif 549 550 /* Allocate interrupt */ 551 error = igb_alloc_intr(sc); 552 if (error) 553 goto failed; 554 555 /* Setup serializers */ 556 igb_setup_serializer(sc); 557 558 /* Allocate the appropriate stats memory */ 559 if (sc->vf_ifp) { 560 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 561 M_WAITOK | M_ZERO); 562 igb_vf_init_stats(sc); 563 } else { 564 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 565 M_WAITOK | M_ZERO); 566 } 567 568 /* Allocate multicast array memory. */ 569 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 570 M_DEVBUF, M_WAITOK); 571 572 /* Some adapter-specific advanced features */ 573 if (sc->hw.mac.type >= e1000_i350) { 574 #ifdef notyet 575 igb_set_sysctl_value(adapter, "dma_coalesce", 576 "configure dma coalesce", 577 &adapter->dma_coalesce, igb_dma_coalesce); 578 igb_set_sysctl_value(adapter, "eee_disabled", 579 "enable Energy Efficient Ethernet", 580 &adapter->hw.dev_spec._82575.eee_disable, 581 igb_eee_disabled); 582 #else 583 sc->dma_coalesce = igb_dma_coalesce; 584 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 585 #endif 586 if (sc->hw.phy.media_type == e1000_media_type_copper) 587 e1000_set_eee_i350(&sc->hw); 588 } 589 590 /* 591 * Start from a known state, this is important in reading the nvm and 592 * mac from that. 593 */ 594 e1000_reset_hw(&sc->hw); 595 596 /* Make sure we have a good EEPROM before we read from it */ 597 if (sc->hw.mac.type != e1000_i210 && sc->hw.mac.type != e1000_i211 && 598 e1000_validate_nvm_checksum(&sc->hw) < 0) { 599 /* 600 * Some PCI-E parts fail the first check due to 601 * the link being in sleep state, call it again, 602 * if it fails a second time its a real issue. 603 */ 604 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 605 device_printf(dev, 606 "The EEPROM Checksum Is Not Valid\n"); 607 error = EIO; 608 goto failed; 609 } 610 } 611 612 /* Copy the permanent MAC address out of the EEPROM */ 613 if (e1000_read_mac_addr(&sc->hw) < 0) { 614 device_printf(dev, "EEPROM read error while reading MAC" 615 " address\n"); 616 error = EIO; 617 goto failed; 618 } 619 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 620 device_printf(dev, "Invalid MAC address\n"); 621 error = EIO; 622 goto failed; 623 } 624 625 /* Setup OS specific network interface */ 626 igb_setup_ifp(sc); 627 628 /* Add sysctl tree, must after igb_setup_ifp() */ 629 igb_add_sysctl(sc); 630 631 /* Now get a good starting state */ 632 igb_reset(sc); 633 634 /* Initialize statistics */ 635 igb_update_stats_counters(sc); 636 637 sc->hw.mac.get_link_status = 1; 638 igb_update_link_status(sc); 639 640 /* Indicate SOL/IDER usage */ 641 if (e1000_check_reset_block(&sc->hw)) { 642 device_printf(dev, 643 "PHY reset is blocked due to SOL/IDER session.\n"); 644 } 645 646 /* Determine if we have to control management hardware */ 647 if (e1000_enable_mng_pass_thru(&sc->hw)) 648 sc->flags |= IGB_FLAG_HAS_MGMT; 649 650 /* 651 * Setup Wake-on-Lan 652 */ 653 /* APME bit in EEPROM is mapped to WUC.APME */ 654 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 655 if (eeprom_data) 656 sc->wol = E1000_WUFC_MAG; 657 /* XXX disable WOL */ 658 sc->wol = 0; 659 660 #ifdef notyet 661 /* Register for VLAN events */ 662 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 663 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 664 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 665 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 666 #endif 667 668 #ifdef notyet 669 igb_add_hw_stats(adapter); 670 #endif 671 672 /* 673 * Disable interrupt to prevent spurious interrupts (line based 674 * interrupt, MSI or even MSI-X), which had been observed on 675 * several types of LOMs, from being handled. 676 */ 677 igb_disable_intr(sc); 678 679 error = igb_setup_intr(sc); 680 if (error) { 681 ether_ifdetach(&sc->arpcom.ac_if); 682 goto failed; 683 } 684 return 0; 685 686 failed: 687 igb_detach(dev); 688 return error; 689 } 690 691 static int 692 igb_detach(device_t dev) 693 { 694 struct igb_softc *sc = device_get_softc(dev); 695 696 if (device_is_attached(dev)) { 697 struct ifnet *ifp = &sc->arpcom.ac_if; 698 699 ifnet_serialize_all(ifp); 700 701 igb_stop(sc); 702 703 e1000_phy_hw_reset(&sc->hw); 704 705 /* Give control back to firmware */ 706 igb_rel_mgmt(sc); 707 igb_rel_hw_control(sc); 708 709 if (sc->wol) { 710 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 711 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 712 igb_enable_wol(dev); 713 } 714 715 igb_teardown_intr(sc); 716 717 ifnet_deserialize_all(ifp); 718 719 ether_ifdetach(ifp); 720 } else if (sc->mem_res != NULL) { 721 igb_rel_hw_control(sc); 722 } 723 bus_generic_detach(dev); 724 725 if (sc->sysctl_tree != NULL) 726 sysctl_ctx_free(&sc->sysctl_ctx); 727 728 igb_free_intr(sc); 729 730 if (sc->msix_mem_res != NULL) { 731 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 732 sc->msix_mem_res); 733 } 734 if (sc->mem_res != NULL) { 735 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 736 sc->mem_res); 737 } 738 739 igb_free_rings(sc); 740 741 if (sc->mta != NULL) 742 kfree(sc->mta, M_DEVBUF); 743 if (sc->stats != NULL) 744 kfree(sc->stats, M_DEVBUF); 745 if (sc->serializes != NULL) 746 kfree(sc->serializes, M_DEVBUF); 747 748 return 0; 749 } 750 751 static int 752 igb_shutdown(device_t dev) 753 { 754 return igb_suspend(dev); 755 } 756 757 static int 758 igb_suspend(device_t dev) 759 { 760 struct igb_softc *sc = device_get_softc(dev); 761 struct ifnet *ifp = &sc->arpcom.ac_if; 762 763 ifnet_serialize_all(ifp); 764 765 igb_stop(sc); 766 767 igb_rel_mgmt(sc); 768 igb_rel_hw_control(sc); 769 770 if (sc->wol) { 771 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 772 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 773 igb_enable_wol(dev); 774 } 775 776 ifnet_deserialize_all(ifp); 777 778 return bus_generic_suspend(dev); 779 } 780 781 static int 782 igb_resume(device_t dev) 783 { 784 struct igb_softc *sc = device_get_softc(dev); 785 struct ifnet *ifp = &sc->arpcom.ac_if; 786 int i; 787 788 ifnet_serialize_all(ifp); 789 790 igb_init(sc); 791 igb_get_mgmt(sc); 792 793 for (i = 0; i < sc->tx_ring_inuse; ++i) 794 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 795 796 ifnet_deserialize_all(ifp); 797 798 return bus_generic_resume(dev); 799 } 800 801 static int 802 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 803 { 804 struct igb_softc *sc = ifp->if_softc; 805 struct ifreq *ifr = (struct ifreq *)data; 806 int max_frame_size, mask, reinit; 807 int error = 0; 808 809 ASSERT_IFNET_SERIALIZED_ALL(ifp); 810 811 switch (command) { 812 case SIOCSIFMTU: 813 max_frame_size = 9234; 814 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 815 ETHER_CRC_LEN) { 816 error = EINVAL; 817 break; 818 } 819 820 ifp->if_mtu = ifr->ifr_mtu; 821 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 822 ETHER_CRC_LEN; 823 824 if (ifp->if_flags & IFF_RUNNING) 825 igb_init(sc); 826 break; 827 828 case SIOCSIFFLAGS: 829 if (ifp->if_flags & IFF_UP) { 830 if (ifp->if_flags & IFF_RUNNING) { 831 if ((ifp->if_flags ^ sc->if_flags) & 832 (IFF_PROMISC | IFF_ALLMULTI)) { 833 igb_disable_promisc(sc); 834 igb_set_promisc(sc); 835 } 836 } else { 837 igb_init(sc); 838 } 839 } else if (ifp->if_flags & IFF_RUNNING) { 840 igb_stop(sc); 841 } 842 sc->if_flags = ifp->if_flags; 843 break; 844 845 case SIOCADDMULTI: 846 case SIOCDELMULTI: 847 if (ifp->if_flags & IFF_RUNNING) { 848 igb_disable_intr(sc); 849 igb_set_multi(sc); 850 #ifdef IFPOLL_ENABLE 851 if (!(ifp->if_flags & IFF_NPOLLING)) 852 #endif 853 igb_enable_intr(sc); 854 } 855 break; 856 857 case SIOCSIFMEDIA: 858 /* Check SOL/IDER usage */ 859 if (e1000_check_reset_block(&sc->hw)) { 860 if_printf(ifp, "Media change is " 861 "blocked due to SOL/IDER session.\n"); 862 break; 863 } 864 /* FALL THROUGH */ 865 866 case SIOCGIFMEDIA: 867 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 868 break; 869 870 case SIOCSIFCAP: 871 reinit = 0; 872 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 873 if (mask & IFCAP_RXCSUM) { 874 ifp->if_capenable ^= IFCAP_RXCSUM; 875 reinit = 1; 876 } 877 if (mask & IFCAP_VLAN_HWTAGGING) { 878 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 879 reinit = 1; 880 } 881 if (mask & IFCAP_TXCSUM) { 882 ifp->if_capenable ^= IFCAP_TXCSUM; 883 if (ifp->if_capenable & IFCAP_TXCSUM) 884 ifp->if_hwassist |= IGB_CSUM_FEATURES; 885 else 886 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 887 } 888 if (mask & IFCAP_TSO) { 889 ifp->if_capenable ^= IFCAP_TSO; 890 if (ifp->if_capenable & IFCAP_TSO) 891 ifp->if_hwassist |= CSUM_TSO; 892 else 893 ifp->if_hwassist &= ~CSUM_TSO; 894 } 895 if (mask & IFCAP_RSS) 896 ifp->if_capenable ^= IFCAP_RSS; 897 if (reinit && (ifp->if_flags & IFF_RUNNING)) 898 igb_init(sc); 899 break; 900 901 default: 902 error = ether_ioctl(ifp, command, data); 903 break; 904 } 905 return error; 906 } 907 908 static void 909 igb_init(void *xsc) 910 { 911 struct igb_softc *sc = xsc; 912 struct ifnet *ifp = &sc->arpcom.ac_if; 913 boolean_t polling; 914 int i; 915 916 ASSERT_IFNET_SERIALIZED_ALL(ifp); 917 918 igb_stop(sc); 919 920 /* Get the latest mac address, User can use a LAA */ 921 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 922 923 /* Put the address into the Receive Address Array */ 924 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 925 926 igb_reset(sc); 927 igb_update_link_status(sc); 928 929 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 930 931 /* Configure for OS presence */ 932 igb_get_mgmt(sc); 933 934 polling = FALSE; 935 #ifdef IFPOLL_ENABLE 936 if (ifp->if_flags & IFF_NPOLLING) 937 polling = TRUE; 938 #endif 939 940 /* Configured used RX/TX rings */ 941 igb_set_ring_inuse(sc, polling); 942 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 943 944 /* Initialize interrupt */ 945 igb_init_intr(sc); 946 947 /* Prepare transmit descriptors and buffers */ 948 for (i = 0; i < sc->tx_ring_inuse; ++i) 949 igb_init_tx_ring(&sc->tx_rings[i]); 950 igb_init_tx_unit(sc); 951 952 /* Setup Multicast table */ 953 igb_set_multi(sc); 954 955 #if 0 956 /* 957 * Figure out the desired mbuf pool 958 * for doing jumbo/packetsplit 959 */ 960 if (adapter->max_frame_size <= 2048) 961 adapter->rx_mbuf_sz = MCLBYTES; 962 else if (adapter->max_frame_size <= 4096) 963 adapter->rx_mbuf_sz = MJUMPAGESIZE; 964 else 965 adapter->rx_mbuf_sz = MJUM9BYTES; 966 #endif 967 968 /* Prepare receive descriptors and buffers */ 969 for (i = 0; i < sc->rx_ring_inuse; ++i) { 970 int error; 971 972 error = igb_init_rx_ring(&sc->rx_rings[i]); 973 if (error) { 974 if_printf(ifp, "Could not setup receive structures\n"); 975 igb_stop(sc); 976 return; 977 } 978 } 979 igb_init_rx_unit(sc); 980 981 /* Enable VLAN support */ 982 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 983 igb_set_vlan(sc); 984 985 /* Don't lose promiscuous settings */ 986 igb_set_promisc(sc); 987 988 ifp->if_flags |= IFF_RUNNING; 989 for (i = 0; i < sc->tx_ring_inuse; ++i) { 990 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 991 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 992 } 993 994 igb_set_timer_cpuid(sc, polling); 995 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 996 e1000_clear_hw_cntrs_base_generic(&sc->hw); 997 998 /* This clears any pending interrupts */ 999 E1000_READ_REG(&sc->hw, E1000_ICR); 1000 1001 /* 1002 * Only enable interrupts if we are not polling, make sure 1003 * they are off otherwise. 1004 */ 1005 if (polling) { 1006 igb_disable_intr(sc); 1007 } else { 1008 igb_enable_intr(sc); 1009 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 1010 } 1011 1012 /* Set Energy Efficient Ethernet */ 1013 if (sc->hw.phy.media_type == e1000_media_type_copper) 1014 e1000_set_eee_i350(&sc->hw); 1015 } 1016 1017 static void 1018 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1019 { 1020 struct igb_softc *sc = ifp->if_softc; 1021 1022 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1023 1024 if ((ifp->if_flags & IFF_RUNNING) == 0) 1025 sc->hw.mac.get_link_status = 1; 1026 igb_update_link_status(sc); 1027 1028 ifmr->ifm_status = IFM_AVALID; 1029 ifmr->ifm_active = IFM_ETHER; 1030 1031 if (!sc->link_active) 1032 return; 1033 1034 ifmr->ifm_status |= IFM_ACTIVE; 1035 1036 switch (sc->link_speed) { 1037 case 10: 1038 ifmr->ifm_active |= IFM_10_T; 1039 break; 1040 1041 case 100: 1042 /* 1043 * Support for 100Mb SFP - these are Fiber 1044 * but the media type appears as serdes 1045 */ 1046 if (sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1047 ifmr->ifm_active |= IFM_100_FX; 1048 else 1049 ifmr->ifm_active |= IFM_100_TX; 1050 break; 1051 1052 case 1000: 1053 ifmr->ifm_active |= IFM_1000_T; 1054 break; 1055 } 1056 1057 if (sc->link_duplex == FULL_DUPLEX) 1058 ifmr->ifm_active |= IFM_FDX; 1059 else 1060 ifmr->ifm_active |= IFM_HDX; 1061 } 1062 1063 static int 1064 igb_media_change(struct ifnet *ifp) 1065 { 1066 struct igb_softc *sc = ifp->if_softc; 1067 struct ifmedia *ifm = &sc->media; 1068 1069 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1070 1071 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1072 return EINVAL; 1073 1074 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1075 case IFM_AUTO: 1076 sc->hw.mac.autoneg = DO_AUTO_NEG; 1077 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1078 break; 1079 1080 case IFM_1000_LX: 1081 case IFM_1000_SX: 1082 case IFM_1000_T: 1083 sc->hw.mac.autoneg = DO_AUTO_NEG; 1084 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1085 break; 1086 1087 case IFM_100_TX: 1088 sc->hw.mac.autoneg = FALSE; 1089 sc->hw.phy.autoneg_advertised = 0; 1090 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1091 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1092 else 1093 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1094 break; 1095 1096 case IFM_10_T: 1097 sc->hw.mac.autoneg = FALSE; 1098 sc->hw.phy.autoneg_advertised = 0; 1099 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1100 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1101 else 1102 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1103 break; 1104 1105 default: 1106 if_printf(ifp, "Unsupported media type\n"); 1107 break; 1108 } 1109 1110 igb_init(sc); 1111 1112 return 0; 1113 } 1114 1115 static void 1116 igb_set_promisc(struct igb_softc *sc) 1117 { 1118 struct ifnet *ifp = &sc->arpcom.ac_if; 1119 struct e1000_hw *hw = &sc->hw; 1120 uint32_t reg; 1121 1122 if (sc->vf_ifp) { 1123 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1124 return; 1125 } 1126 1127 reg = E1000_READ_REG(hw, E1000_RCTL); 1128 if (ifp->if_flags & IFF_PROMISC) { 1129 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1130 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1131 } else if (ifp->if_flags & IFF_ALLMULTI) { 1132 reg |= E1000_RCTL_MPE; 1133 reg &= ~E1000_RCTL_UPE; 1134 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1135 } 1136 } 1137 1138 static void 1139 igb_disable_promisc(struct igb_softc *sc) 1140 { 1141 struct e1000_hw *hw = &sc->hw; 1142 uint32_t reg; 1143 1144 if (sc->vf_ifp) { 1145 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1146 return; 1147 } 1148 reg = E1000_READ_REG(hw, E1000_RCTL); 1149 reg &= ~E1000_RCTL_UPE; 1150 reg &= ~E1000_RCTL_MPE; 1151 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1152 } 1153 1154 static void 1155 igb_set_multi(struct igb_softc *sc) 1156 { 1157 struct ifnet *ifp = &sc->arpcom.ac_if; 1158 struct ifmultiaddr *ifma; 1159 uint32_t reg_rctl = 0; 1160 uint8_t *mta; 1161 int mcnt = 0; 1162 1163 mta = sc->mta; 1164 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1165 1166 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1167 if (ifma->ifma_addr->sa_family != AF_LINK) 1168 continue; 1169 1170 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1171 break; 1172 1173 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1174 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1175 mcnt++; 1176 } 1177 1178 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1179 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1180 reg_rctl |= E1000_RCTL_MPE; 1181 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1182 } else { 1183 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1184 } 1185 } 1186 1187 static void 1188 igb_timer(void *xsc) 1189 { 1190 struct igb_softc *sc = xsc; 1191 1192 lwkt_serialize_enter(&sc->main_serialize); 1193 1194 igb_update_link_status(sc); 1195 igb_update_stats_counters(sc); 1196 1197 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1198 1199 lwkt_serialize_exit(&sc->main_serialize); 1200 } 1201 1202 static void 1203 igb_update_link_status(struct igb_softc *sc) 1204 { 1205 struct ifnet *ifp = &sc->arpcom.ac_if; 1206 struct e1000_hw *hw = &sc->hw; 1207 uint32_t link_check, thstat, ctrl; 1208 1209 link_check = thstat = ctrl = 0; 1210 1211 /* Get the cached link value or read for real */ 1212 switch (hw->phy.media_type) { 1213 case e1000_media_type_copper: 1214 if (hw->mac.get_link_status) { 1215 /* Do the work to read phy */ 1216 e1000_check_for_link(hw); 1217 link_check = !hw->mac.get_link_status; 1218 } else { 1219 link_check = TRUE; 1220 } 1221 break; 1222 1223 case e1000_media_type_fiber: 1224 e1000_check_for_link(hw); 1225 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1226 break; 1227 1228 case e1000_media_type_internal_serdes: 1229 e1000_check_for_link(hw); 1230 link_check = hw->mac.serdes_has_link; 1231 break; 1232 1233 /* VF device is type_unknown */ 1234 case e1000_media_type_unknown: 1235 e1000_check_for_link(hw); 1236 link_check = !hw->mac.get_link_status; 1237 /* Fall thru */ 1238 default: 1239 break; 1240 } 1241 1242 /* Check for thermal downshift or shutdown */ 1243 if (hw->mac.type == e1000_i350) { 1244 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1245 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1246 } 1247 1248 /* Now we check if a transition has happened */ 1249 if (link_check && sc->link_active == 0) { 1250 e1000_get_speed_and_duplex(hw, 1251 &sc->link_speed, &sc->link_duplex); 1252 if (bootverbose) { 1253 const char *flowctl; 1254 1255 /* Get the flow control for display */ 1256 switch (hw->fc.current_mode) { 1257 case e1000_fc_rx_pause: 1258 flowctl = "RX"; 1259 break; 1260 1261 case e1000_fc_tx_pause: 1262 flowctl = "TX"; 1263 break; 1264 1265 case e1000_fc_full: 1266 flowctl = "Full"; 1267 break; 1268 1269 default: 1270 flowctl = "None"; 1271 break; 1272 } 1273 1274 if_printf(ifp, "Link is up %d Mbps %s, " 1275 "Flow control: %s\n", 1276 sc->link_speed, 1277 sc->link_duplex == FULL_DUPLEX ? 1278 "Full Duplex" : "Half Duplex", 1279 flowctl); 1280 } 1281 sc->link_active = 1; 1282 1283 ifp->if_baudrate = sc->link_speed * 1000000; 1284 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1285 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1286 if_printf(ifp, "Link: thermal downshift\n"); 1287 /* This can sleep */ 1288 ifp->if_link_state = LINK_STATE_UP; 1289 if_link_state_change(ifp); 1290 } else if (!link_check && sc->link_active == 1) { 1291 ifp->if_baudrate = sc->link_speed = 0; 1292 sc->link_duplex = 0; 1293 if (bootverbose) 1294 if_printf(ifp, "Link is Down\n"); 1295 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1296 (thstat & E1000_THSTAT_PWR_DOWN)) 1297 if_printf(ifp, "Link: thermal shutdown\n"); 1298 sc->link_active = 0; 1299 /* This can sleep */ 1300 ifp->if_link_state = LINK_STATE_DOWN; 1301 if_link_state_change(ifp); 1302 } 1303 } 1304 1305 static void 1306 igb_stop(struct igb_softc *sc) 1307 { 1308 struct ifnet *ifp = &sc->arpcom.ac_if; 1309 int i; 1310 1311 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1312 1313 igb_disable_intr(sc); 1314 1315 callout_stop(&sc->timer); 1316 1317 ifp->if_flags &= ~IFF_RUNNING; 1318 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1319 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1320 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 1321 sc->tx_rings[i].tx_flags &= ~IGB_TXFLAG_ENABLED; 1322 } 1323 1324 e1000_reset_hw(&sc->hw); 1325 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1326 1327 e1000_led_off(&sc->hw); 1328 e1000_cleanup_led(&sc->hw); 1329 1330 for (i = 0; i < sc->tx_ring_cnt; ++i) 1331 igb_free_tx_ring(&sc->tx_rings[i]); 1332 for (i = 0; i < sc->rx_ring_cnt; ++i) 1333 igb_free_rx_ring(&sc->rx_rings[i]); 1334 } 1335 1336 static void 1337 igb_reset(struct igb_softc *sc) 1338 { 1339 struct ifnet *ifp = &sc->arpcom.ac_if; 1340 struct e1000_hw *hw = &sc->hw; 1341 struct e1000_fc_info *fc = &hw->fc; 1342 uint32_t pba = 0; 1343 uint16_t hwm; 1344 1345 /* Let the firmware know the OS is in control */ 1346 igb_get_hw_control(sc); 1347 1348 /* 1349 * Packet Buffer Allocation (PBA) 1350 * Writing PBA sets the receive portion of the buffer 1351 * the remainder is used for the transmit buffer. 1352 */ 1353 switch (hw->mac.type) { 1354 case e1000_82575: 1355 pba = E1000_PBA_32K; 1356 break; 1357 1358 case e1000_82576: 1359 case e1000_vfadapt: 1360 pba = E1000_READ_REG(hw, E1000_RXPBS); 1361 pba &= E1000_RXPBS_SIZE_MASK_82576; 1362 break; 1363 1364 case e1000_82580: 1365 case e1000_i350: 1366 case e1000_vfadapt_i350: 1367 pba = E1000_READ_REG(hw, E1000_RXPBS); 1368 pba = e1000_rxpbs_adjust_82580(pba); 1369 break; 1370 1371 case e1000_i210: 1372 case e1000_i211: 1373 pba = E1000_PBA_34K; 1374 break; 1375 1376 default: 1377 break; 1378 } 1379 1380 /* Special needs in case of Jumbo frames */ 1381 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1382 uint32_t tx_space, min_tx, min_rx; 1383 1384 pba = E1000_READ_REG(hw, E1000_PBA); 1385 tx_space = pba >> 16; 1386 pba &= 0xffff; 1387 1388 min_tx = (sc->max_frame_size + 1389 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1390 min_tx = roundup2(min_tx, 1024); 1391 min_tx >>= 10; 1392 min_rx = sc->max_frame_size; 1393 min_rx = roundup2(min_rx, 1024); 1394 min_rx >>= 10; 1395 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1396 pba = pba - (min_tx - tx_space); 1397 /* 1398 * if short on rx space, rx wins 1399 * and must trump tx adjustment 1400 */ 1401 if (pba < min_rx) 1402 pba = min_rx; 1403 } 1404 E1000_WRITE_REG(hw, E1000_PBA, pba); 1405 } 1406 1407 /* 1408 * These parameters control the automatic generation (Tx) and 1409 * response (Rx) to Ethernet PAUSE frames. 1410 * - High water mark should allow for at least two frames to be 1411 * received after sending an XOFF. 1412 * - Low water mark works best when it is very near the high water mark. 1413 * This allows the receiver to restart by sending XON when it has 1414 * drained a bit. 1415 */ 1416 hwm = min(((pba << 10) * 9 / 10), 1417 ((pba << 10) - 2 * sc->max_frame_size)); 1418 1419 if (hw->mac.type < e1000_82576) { 1420 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1421 fc->low_water = fc->high_water - 8; 1422 } else { 1423 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1424 fc->low_water = fc->high_water - 16; 1425 } 1426 fc->pause_time = IGB_FC_PAUSE_TIME; 1427 fc->send_xon = TRUE; 1428 fc->requested_mode = e1000_fc_default; 1429 1430 /* Issue a global reset */ 1431 e1000_reset_hw(hw); 1432 E1000_WRITE_REG(hw, E1000_WUC, 0); 1433 1434 if (e1000_init_hw(hw) < 0) 1435 if_printf(ifp, "Hardware Initialization Failed\n"); 1436 1437 /* Setup DMA Coalescing */ 1438 if (hw->mac.type > e1000_82580 && hw->mac.type != e1000_i211) { 1439 uint32_t dmac; 1440 uint32_t reg; 1441 1442 if (sc->dma_coalesce == 0) { 1443 /* 1444 * Disabled 1445 */ 1446 reg = E1000_READ_REG(hw, E1000_DMACR); 1447 reg &= ~E1000_DMACR_DMAC_EN; 1448 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1449 goto reset_out; 1450 } 1451 1452 /* Set starting thresholds */ 1453 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 1454 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 1455 1456 hwm = 64 * pba - sc->max_frame_size / 16; 1457 if (hwm < 64 * (pba - 6)) 1458 hwm = 64 * (pba - 6); 1459 reg = E1000_READ_REG(hw, E1000_FCRTC); 1460 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 1461 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 1462 & E1000_FCRTC_RTH_COAL_MASK); 1463 E1000_WRITE_REG(hw, E1000_FCRTC, reg); 1464 1465 dmac = pba - sc->max_frame_size / 512; 1466 if (dmac < pba - 10) 1467 dmac = pba - 10; 1468 reg = E1000_READ_REG(hw, E1000_DMACR); 1469 reg &= ~E1000_DMACR_DMACTHR_MASK; 1470 reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT) 1471 & E1000_DMACR_DMACTHR_MASK); 1472 /* Transition to L0x or L1 if available.. */ 1473 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 1474 /* timer = value in sc->dma_coalesce in 32usec intervals */ 1475 reg |= (sc->dma_coalesce >> 5); 1476 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1477 1478 /* Set the interval before transition */ 1479 reg = E1000_READ_REG(hw, E1000_DMCTLX); 1480 reg |= 0x80000004; 1481 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 1482 1483 /* Free space in tx packet buffer to wake from DMA coal */ 1484 E1000_WRITE_REG(hw, E1000_DMCTXTH, 1485 (20480 - (2 * sc->max_frame_size)) >> 6); 1486 1487 /* Make low power state decision controlled by DMA coal */ 1488 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1489 reg &= ~E1000_PCIEMISC_LX_DECISION; 1490 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 1491 if_printf(ifp, "DMA Coalescing enabled\n"); 1492 } else if (hw->mac.type == e1000_82580) { 1493 uint32_t reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1494 1495 E1000_WRITE_REG(hw, E1000_DMACR, 0); 1496 E1000_WRITE_REG(hw, E1000_PCIEMISC, 1497 reg & ~E1000_PCIEMISC_LX_DECISION); 1498 } 1499 1500 reset_out: 1501 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1502 e1000_get_phy_info(hw); 1503 e1000_check_for_link(hw); 1504 } 1505 1506 static void 1507 igb_setup_ifp(struct igb_softc *sc) 1508 { 1509 struct ifnet *ifp = &sc->arpcom.ac_if; 1510 int i; 1511 1512 ifp->if_softc = sc; 1513 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1514 ifp->if_init = igb_init; 1515 ifp->if_ioctl = igb_ioctl; 1516 ifp->if_start = igb_start; 1517 ifp->if_serialize = igb_serialize; 1518 ifp->if_deserialize = igb_deserialize; 1519 ifp->if_tryserialize = igb_tryserialize; 1520 #ifdef INVARIANTS 1521 ifp->if_serialize_assert = igb_serialize_assert; 1522 #endif 1523 #ifdef IFPOLL_ENABLE 1524 ifp->if_npoll = igb_npoll; 1525 #endif 1526 1527 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1528 ifq_set_ready(&ifp->if_snd); 1529 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1530 1531 ifp->if_mapsubq = ifq_mapsubq_mask; 1532 ifq_set_subq_mask(&ifp->if_snd, 0); 1533 1534 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1535 1536 ifp->if_capabilities = 1537 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1538 if (IGB_ENABLE_HWRSS(sc)) 1539 ifp->if_capabilities |= IFCAP_RSS; 1540 ifp->if_capenable = ifp->if_capabilities; 1541 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1542 1543 /* 1544 * Tell the upper layer(s) we support long frames 1545 */ 1546 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1547 1548 /* Setup TX rings and subqueues */ 1549 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1550 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1551 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1552 1553 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1554 ifsq_set_priv(ifsq, txr); 1555 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1556 txr->ifsq = ifsq; 1557 1558 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog); 1559 } 1560 1561 /* 1562 * Specify the media types supported by this adapter and register 1563 * callbacks to update media and link information 1564 */ 1565 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status); 1566 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1567 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1568 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1569 0, NULL); 1570 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1571 } else { 1572 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1573 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1574 0, NULL); 1575 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1576 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1577 0, NULL); 1578 if (sc->hw.phy.type != e1000_phy_ife) { 1579 ifmedia_add(&sc->media, 1580 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1581 ifmedia_add(&sc->media, 1582 IFM_ETHER | IFM_1000_T, 0, NULL); 1583 } 1584 } 1585 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1586 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1587 } 1588 1589 static void 1590 igb_add_sysctl(struct igb_softc *sc) 1591 { 1592 char node[32]; 1593 int i; 1594 1595 sysctl_ctx_init(&sc->sysctl_ctx); 1596 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1597 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1598 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, ""); 1599 if (sc->sysctl_tree == NULL) { 1600 device_printf(sc->dev, "can't add sysctl node\n"); 1601 return; 1602 } 1603 1604 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1605 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1606 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1607 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1608 "# of RX rings used"); 1609 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1610 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 1611 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1612 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 1613 "# of TX rings used"); 1614 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1615 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1616 "# of RX descs"); 1617 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1618 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1619 "# of TX descs"); 1620 1621 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 1622 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1623 SYSCTL_CHILDREN(sc->sysctl_tree), 1624 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1625 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate"); 1626 } else { 1627 for (i = 0; i < sc->msix_cnt; ++i) { 1628 struct igb_msix_data *msix = &sc->msix_data[i]; 1629 1630 ksnprintf(node, sizeof(node), "msix%d_rate", i); 1631 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1632 SYSCTL_CHILDREN(sc->sysctl_tree), 1633 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW, 1634 msix, 0, igb_sysctl_msix_rate, "I", 1635 msix->msix_rate_desc); 1636 } 1637 } 1638 1639 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1640 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1641 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1642 "# of segments per TX interrupt"); 1643 1644 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1645 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1646 sc, 0, igb_sysctl_tx_wreg_nsegs, "I", 1647 "# of segments sent before write to hardware register"); 1648 1649 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1650 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1651 sc, 0, igb_sysctl_rx_wreg_nsegs, "I", 1652 "# of segments received before write to hardware register"); 1653 1654 #ifdef IFPOLL_ENABLE 1655 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1656 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 1657 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1658 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1659 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 1660 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1661 #endif 1662 1663 #ifdef IGB_RSS_DEBUG 1664 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1665 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1666 "RSS debug level"); 1667 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1668 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1669 SYSCTL_ADD_ULONG(&sc->sysctl_ctx, 1670 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node, 1671 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1672 } 1673 #endif 1674 #ifdef IGB_TSS_DEBUG 1675 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1676 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 1677 SYSCTL_ADD_ULONG(&sc->sysctl_ctx, 1678 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node, 1679 CTLFLAG_RW, &sc->tx_rings[i].tx_packets, "TXed packets"); 1680 } 1681 #endif 1682 } 1683 1684 static int 1685 igb_alloc_rings(struct igb_softc *sc) 1686 { 1687 int error, i; 1688 1689 /* 1690 * Create top level busdma tag 1691 */ 1692 error = bus_dma_tag_create(NULL, 1, 0, 1693 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1694 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1695 &sc->parent_tag); 1696 if (error) { 1697 device_printf(sc->dev, "could not create top level DMA tag\n"); 1698 return error; 1699 } 1700 1701 /* 1702 * Allocate TX descriptor rings and buffers 1703 */ 1704 sc->tx_rings = kmalloc_cachealign( 1705 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1706 M_DEVBUF, M_WAITOK | M_ZERO); 1707 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1708 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1709 1710 /* Set up some basics */ 1711 txr->sc = sc; 1712 txr->me = i; 1713 lwkt_serialize_init(&txr->tx_serialize); 1714 1715 error = igb_create_tx_ring(txr); 1716 if (error) 1717 return error; 1718 } 1719 1720 /* 1721 * Allocate RX descriptor rings and buffers 1722 */ 1723 sc->rx_rings = kmalloc_cachealign( 1724 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1725 M_DEVBUF, M_WAITOK | M_ZERO); 1726 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1727 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1728 1729 /* Set up some basics */ 1730 rxr->sc = sc; 1731 rxr->me = i; 1732 lwkt_serialize_init(&rxr->rx_serialize); 1733 1734 error = igb_create_rx_ring(rxr); 1735 if (error) 1736 return error; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void 1743 igb_free_rings(struct igb_softc *sc) 1744 { 1745 int i; 1746 1747 if (sc->tx_rings != NULL) { 1748 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1749 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1750 1751 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1752 } 1753 kfree(sc->tx_rings, M_DEVBUF); 1754 } 1755 1756 if (sc->rx_rings != NULL) { 1757 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1758 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1759 1760 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1761 } 1762 kfree(sc->rx_rings, M_DEVBUF); 1763 } 1764 } 1765 1766 static int 1767 igb_create_tx_ring(struct igb_tx_ring *txr) 1768 { 1769 int tsize, error, i, ntxd; 1770 1771 /* 1772 * Validate number of transmit descriptors. It must not exceed 1773 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1774 */ 1775 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1776 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1777 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1778 device_printf(txr->sc->dev, 1779 "Using %d TX descriptors instead of %d!\n", 1780 IGB_DEFAULT_TXD, ntxd); 1781 txr->num_tx_desc = IGB_DEFAULT_TXD; 1782 } else { 1783 txr->num_tx_desc = ntxd; 1784 } 1785 1786 /* 1787 * Allocate TX descriptor ring 1788 */ 1789 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1790 IGB_DBA_ALIGN); 1791 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1792 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1793 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1794 if (txr->txdma.dma_vaddr == NULL) { 1795 device_printf(txr->sc->dev, 1796 "Unable to allocate TX Descriptor memory\n"); 1797 return ENOMEM; 1798 } 1799 txr->tx_base = txr->txdma.dma_vaddr; 1800 bzero(txr->tx_base, tsize); 1801 1802 tsize = __VM_CACHELINE_ALIGN( 1803 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1804 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1805 1806 /* 1807 * Allocate TX head write-back buffer 1808 */ 1809 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1810 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1811 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1812 if (txr->tx_hdr == NULL) { 1813 device_printf(txr->sc->dev, 1814 "Unable to allocate TX head write-back buffer\n"); 1815 return ENOMEM; 1816 } 1817 1818 /* 1819 * Create DMA tag for TX buffers 1820 */ 1821 error = bus_dma_tag_create(txr->sc->parent_tag, 1822 1, 0, /* alignment, bounds */ 1823 BUS_SPACE_MAXADDR, /* lowaddr */ 1824 BUS_SPACE_MAXADDR, /* highaddr */ 1825 NULL, NULL, /* filter, filterarg */ 1826 IGB_TSO_SIZE, /* maxsize */ 1827 IGB_MAX_SCATTER, /* nsegments */ 1828 PAGE_SIZE, /* maxsegsize */ 1829 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1830 BUS_DMA_ONEBPAGE, /* flags */ 1831 &txr->tx_tag); 1832 if (error) { 1833 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1834 kfree(txr->tx_buf, M_DEVBUF); 1835 txr->tx_buf = NULL; 1836 return error; 1837 } 1838 1839 /* 1840 * Create DMA maps for TX buffers 1841 */ 1842 for (i = 0; i < txr->num_tx_desc; ++i) { 1843 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1844 1845 error = bus_dmamap_create(txr->tx_tag, 1846 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1847 if (error) { 1848 device_printf(txr->sc->dev, 1849 "Unable to create TX DMA map\n"); 1850 igb_destroy_tx_ring(txr, i); 1851 return error; 1852 } 1853 } 1854 1855 if (txr->sc->hw.mac.type == e1000_82575) 1856 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; 1857 1858 /* 1859 * Initialize various watermark 1860 */ 1861 txr->spare_desc = IGB_TX_SPARE; 1862 txr->intr_nsegs = txr->num_tx_desc / 16; 1863 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; 1864 txr->oact_hi_desc = txr->num_tx_desc / 2; 1865 txr->oact_lo_desc = txr->num_tx_desc / 8; 1866 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX) 1867 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX; 1868 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED) 1869 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED; 1870 1871 return 0; 1872 } 1873 1874 static void 1875 igb_free_tx_ring(struct igb_tx_ring *txr) 1876 { 1877 int i; 1878 1879 for (i = 0; i < txr->num_tx_desc; ++i) { 1880 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1881 1882 if (txbuf->m_head != NULL) { 1883 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1884 m_freem(txbuf->m_head); 1885 txbuf->m_head = NULL; 1886 } 1887 } 1888 } 1889 1890 static void 1891 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1892 { 1893 int i; 1894 1895 if (txr->txdma.dma_vaddr != NULL) { 1896 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1897 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1898 txr->txdma.dma_map); 1899 bus_dma_tag_destroy(txr->txdma.dma_tag); 1900 txr->txdma.dma_vaddr = NULL; 1901 } 1902 1903 if (txr->tx_hdr != NULL) { 1904 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1905 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1906 txr->tx_hdr_dmap); 1907 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1908 txr->tx_hdr = NULL; 1909 } 1910 1911 if (txr->tx_buf == NULL) 1912 return; 1913 1914 for (i = 0; i < ndesc; ++i) { 1915 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1916 1917 KKASSERT(txbuf->m_head == NULL); 1918 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1919 } 1920 bus_dma_tag_destroy(txr->tx_tag); 1921 1922 kfree(txr->tx_buf, M_DEVBUF); 1923 txr->tx_buf = NULL; 1924 } 1925 1926 static void 1927 igb_init_tx_ring(struct igb_tx_ring *txr) 1928 { 1929 /* Clear the old descriptor contents */ 1930 bzero(txr->tx_base, 1931 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1932 1933 /* Clear TX head write-back buffer */ 1934 *(txr->tx_hdr) = 0; 1935 1936 /* Reset indices */ 1937 txr->next_avail_desc = 0; 1938 txr->next_to_clean = 0; 1939 txr->tx_nsegs = 0; 1940 1941 /* Set number of descriptors available */ 1942 txr->tx_avail = txr->num_tx_desc; 1943 1944 /* Enable this TX ring */ 1945 txr->tx_flags |= IGB_TXFLAG_ENABLED; 1946 } 1947 1948 static void 1949 igb_init_tx_unit(struct igb_softc *sc) 1950 { 1951 struct e1000_hw *hw = &sc->hw; 1952 uint32_t tctl; 1953 int i; 1954 1955 /* Setup the Tx Descriptor Rings */ 1956 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1957 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1958 uint64_t bus_addr = txr->txdma.dma_paddr; 1959 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1960 uint32_t txdctl = 0; 1961 uint32_t dca_txctrl; 1962 1963 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1964 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 1965 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1966 (uint32_t)(bus_addr >> 32)); 1967 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1968 (uint32_t)bus_addr); 1969 1970 /* Setup the HW Tx Head and Tail descriptor pointers */ 1971 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1972 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1973 1974 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1975 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1976 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1977 1978 /* 1979 * Don't set WB_on_EITR: 1980 * - 82575 does not have it 1981 * - It almost has no effect on 82576, see: 1982 * 82576 specification update errata #26 1983 * - It causes unnecessary bus traffic 1984 */ 1985 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1986 (uint32_t)(hdr_paddr >> 32)); 1987 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1988 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1989 1990 /* 1991 * WTHRESH is ignored by the hardware, since header 1992 * write back mode is used. 1993 */ 1994 txdctl |= IGB_TX_PTHRESH; 1995 txdctl |= IGB_TX_HTHRESH << 8; 1996 txdctl |= IGB_TX_WTHRESH << 16; 1997 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1998 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 1999 } 2000 2001 if (sc->vf_ifp) 2002 return; 2003 2004 e1000_config_collision_dist(hw); 2005 2006 /* Program the Transmit Control Register */ 2007 tctl = E1000_READ_REG(hw, E1000_TCTL); 2008 tctl &= ~E1000_TCTL_CT; 2009 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2010 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2011 2012 /* This write will effectively turn on the transmit unit. */ 2013 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2014 } 2015 2016 static boolean_t 2017 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 2018 { 2019 struct e1000_adv_tx_context_desc *TXD; 2020 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 2021 int ehdrlen, ctxd, ip_hlen = 0; 2022 boolean_t offload = TRUE; 2023 2024 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 2025 offload = FALSE; 2026 2027 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 2028 2029 ctxd = txr->next_avail_desc; 2030 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 2031 2032 /* 2033 * In advanced descriptors the vlan tag must 2034 * be placed into the context descriptor, thus 2035 * we need to be here just for that setup. 2036 */ 2037 if (mp->m_flags & M_VLANTAG) { 2038 uint16_t vlantag; 2039 2040 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 2041 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 2042 } else if (!offload) { 2043 return FALSE; 2044 } 2045 2046 ehdrlen = mp->m_pkthdr.csum_lhlen; 2047 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2048 2049 /* Set the ether header length */ 2050 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 2051 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2052 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 2053 ip_hlen = mp->m_pkthdr.csum_iphlen; 2054 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2055 } 2056 vlan_macip_lens |= ip_hlen; 2057 2058 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 2059 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 2060 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 2061 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 2062 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 2063 2064 /* 2065 * 82575 needs the TX context index added; the queue 2066 * index is used as TX context index here. 2067 */ 2068 if (txr->sc->hw.mac.type == e1000_82575) 2069 mss_l4len_idx = txr->me << 4; 2070 2071 /* Now copy bits into descriptor */ 2072 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2073 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2074 TXD->seqnum_seed = htole32(0); 2075 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2076 2077 /* We've consumed the first desc, adjust counters */ 2078 if (++ctxd == txr->num_tx_desc) 2079 ctxd = 0; 2080 txr->next_avail_desc = ctxd; 2081 --txr->tx_avail; 2082 2083 return offload; 2084 } 2085 2086 static void 2087 igb_txeof(struct igb_tx_ring *txr) 2088 { 2089 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 2090 int first, hdr, avail; 2091 2092 if (txr->tx_avail == txr->num_tx_desc) 2093 return; 2094 2095 first = txr->next_to_clean; 2096 hdr = *(txr->tx_hdr); 2097 2098 if (first == hdr) 2099 return; 2100 2101 avail = txr->tx_avail; 2102 while (first != hdr) { 2103 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 2104 2105 ++avail; 2106 if (txbuf->m_head) { 2107 bus_dmamap_unload(txr->tx_tag, txbuf->map); 2108 m_freem(txbuf->m_head); 2109 txbuf->m_head = NULL; 2110 IFNET_STAT_INC(ifp, opackets, 1); 2111 } 2112 if (++first == txr->num_tx_desc) 2113 first = 0; 2114 } 2115 txr->next_to_clean = first; 2116 txr->tx_avail = avail; 2117 2118 /* 2119 * If we have a minimum free, clear OACTIVE 2120 * to tell the stack that it is OK to send packets. 2121 */ 2122 if (IGB_IS_NOT_OACTIVE(txr)) { 2123 ifsq_clr_oactive(txr->ifsq); 2124 2125 /* 2126 * We have enough TX descriptors, turn off 2127 * the watchdog. We allow small amount of 2128 * packets (roughly intr_nsegs) pending on 2129 * the transmit ring. 2130 */ 2131 txr->tx_watchdog.wd_timer = 0; 2132 } 2133 } 2134 2135 static int 2136 igb_create_rx_ring(struct igb_rx_ring *rxr) 2137 { 2138 int rsize, i, error, nrxd; 2139 2140 /* 2141 * Validate number of receive descriptors. It must not exceed 2142 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2143 */ 2144 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2145 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2146 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2147 device_printf(rxr->sc->dev, 2148 "Using %d RX descriptors instead of %d!\n", 2149 IGB_DEFAULT_RXD, nrxd); 2150 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2151 } else { 2152 rxr->num_rx_desc = nrxd; 2153 } 2154 2155 /* 2156 * Allocate RX descriptor ring 2157 */ 2158 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2159 IGB_DBA_ALIGN); 2160 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2161 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2162 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2163 &rxr->rxdma.dma_paddr); 2164 if (rxr->rxdma.dma_vaddr == NULL) { 2165 device_printf(rxr->sc->dev, 2166 "Unable to allocate RxDescriptor memory\n"); 2167 return ENOMEM; 2168 } 2169 rxr->rx_base = rxr->rxdma.dma_vaddr; 2170 bzero(rxr->rx_base, rsize); 2171 2172 rsize = __VM_CACHELINE_ALIGN( 2173 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2174 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2175 2176 /* 2177 * Create DMA tag for RX buffers 2178 */ 2179 error = bus_dma_tag_create(rxr->sc->parent_tag, 2180 1, 0, /* alignment, bounds */ 2181 BUS_SPACE_MAXADDR, /* lowaddr */ 2182 BUS_SPACE_MAXADDR, /* highaddr */ 2183 NULL, NULL, /* filter, filterarg */ 2184 MCLBYTES, /* maxsize */ 2185 1, /* nsegments */ 2186 MCLBYTES, /* maxsegsize */ 2187 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2188 &rxr->rx_tag); 2189 if (error) { 2190 device_printf(rxr->sc->dev, 2191 "Unable to create RX payload DMA tag\n"); 2192 kfree(rxr->rx_buf, M_DEVBUF); 2193 rxr->rx_buf = NULL; 2194 return error; 2195 } 2196 2197 /* 2198 * Create spare DMA map for RX buffers 2199 */ 2200 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2201 &rxr->rx_sparemap); 2202 if (error) { 2203 device_printf(rxr->sc->dev, 2204 "Unable to create spare RX DMA maps\n"); 2205 bus_dma_tag_destroy(rxr->rx_tag); 2206 kfree(rxr->rx_buf, M_DEVBUF); 2207 rxr->rx_buf = NULL; 2208 return error; 2209 } 2210 2211 /* 2212 * Create DMA maps for RX buffers 2213 */ 2214 for (i = 0; i < rxr->num_rx_desc; i++) { 2215 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2216 2217 error = bus_dmamap_create(rxr->rx_tag, 2218 BUS_DMA_WAITOK, &rxbuf->map); 2219 if (error) { 2220 device_printf(rxr->sc->dev, 2221 "Unable to create RX DMA maps\n"); 2222 igb_destroy_rx_ring(rxr, i); 2223 return error; 2224 } 2225 } 2226 2227 /* 2228 * Initialize various watermark 2229 */ 2230 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS; 2231 2232 return 0; 2233 } 2234 2235 static void 2236 igb_free_rx_ring(struct igb_rx_ring *rxr) 2237 { 2238 int i; 2239 2240 for (i = 0; i < rxr->num_rx_desc; ++i) { 2241 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2242 2243 if (rxbuf->m_head != NULL) { 2244 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2245 m_freem(rxbuf->m_head); 2246 rxbuf->m_head = NULL; 2247 } 2248 } 2249 2250 if (rxr->fmp != NULL) 2251 m_freem(rxr->fmp); 2252 rxr->fmp = NULL; 2253 rxr->lmp = NULL; 2254 } 2255 2256 static void 2257 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2258 { 2259 int i; 2260 2261 if (rxr->rxdma.dma_vaddr != NULL) { 2262 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2263 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2264 rxr->rxdma.dma_map); 2265 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2266 rxr->rxdma.dma_vaddr = NULL; 2267 } 2268 2269 if (rxr->rx_buf == NULL) 2270 return; 2271 2272 for (i = 0; i < ndesc; ++i) { 2273 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2274 2275 KKASSERT(rxbuf->m_head == NULL); 2276 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2277 } 2278 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2279 bus_dma_tag_destroy(rxr->rx_tag); 2280 2281 kfree(rxr->rx_buf, M_DEVBUF); 2282 rxr->rx_buf = NULL; 2283 } 2284 2285 static void 2286 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2287 { 2288 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2289 rxd->wb.upper.status_error = 0; 2290 } 2291 2292 static int 2293 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2294 { 2295 struct mbuf *m; 2296 bus_dma_segment_t seg; 2297 bus_dmamap_t map; 2298 struct igb_rx_buf *rxbuf; 2299 int error, nseg; 2300 2301 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2302 if (m == NULL) { 2303 if (wait) { 2304 if_printf(&rxr->sc->arpcom.ac_if, 2305 "Unable to allocate RX mbuf\n"); 2306 } 2307 return ENOBUFS; 2308 } 2309 m->m_len = m->m_pkthdr.len = MCLBYTES; 2310 2311 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2312 m_adj(m, ETHER_ALIGN); 2313 2314 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2315 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2316 if (error) { 2317 m_freem(m); 2318 if (wait) { 2319 if_printf(&rxr->sc->arpcom.ac_if, 2320 "Unable to load RX mbuf\n"); 2321 } 2322 return error; 2323 } 2324 2325 rxbuf = &rxr->rx_buf[i]; 2326 if (rxbuf->m_head != NULL) 2327 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2328 2329 map = rxbuf->map; 2330 rxbuf->map = rxr->rx_sparemap; 2331 rxr->rx_sparemap = map; 2332 2333 rxbuf->m_head = m; 2334 rxbuf->paddr = seg.ds_addr; 2335 2336 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2337 return 0; 2338 } 2339 2340 static int 2341 igb_init_rx_ring(struct igb_rx_ring *rxr) 2342 { 2343 int i; 2344 2345 /* Clear the ring contents */ 2346 bzero(rxr->rx_base, 2347 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2348 2349 /* Now replenish the ring mbufs */ 2350 for (i = 0; i < rxr->num_rx_desc; ++i) { 2351 int error; 2352 2353 error = igb_newbuf(rxr, i, TRUE); 2354 if (error) 2355 return error; 2356 } 2357 2358 /* Setup our descriptor indices */ 2359 rxr->next_to_check = 0; 2360 2361 rxr->fmp = NULL; 2362 rxr->lmp = NULL; 2363 rxr->discard = FALSE; 2364 2365 return 0; 2366 } 2367 2368 static void 2369 igb_init_rx_unit(struct igb_softc *sc) 2370 { 2371 struct ifnet *ifp = &sc->arpcom.ac_if; 2372 struct e1000_hw *hw = &sc->hw; 2373 uint32_t rctl, rxcsum, srrctl = 0; 2374 int i; 2375 2376 /* 2377 * Make sure receives are disabled while setting 2378 * up the descriptor ring 2379 */ 2380 rctl = E1000_READ_REG(hw, E1000_RCTL); 2381 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2382 2383 #if 0 2384 /* 2385 ** Set up for header split 2386 */ 2387 if (igb_header_split) { 2388 /* Use a standard mbuf for the header */ 2389 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2390 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2391 } else 2392 #endif 2393 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2394 2395 /* 2396 ** Set up for jumbo frames 2397 */ 2398 if (ifp->if_mtu > ETHERMTU) { 2399 rctl |= E1000_RCTL_LPE; 2400 #if 0 2401 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2402 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2403 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2404 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2405 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2406 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2407 } 2408 /* Set maximum packet len */ 2409 psize = adapter->max_frame_size; 2410 /* are we on a vlan? */ 2411 if (adapter->ifp->if_vlantrunk != NULL) 2412 psize += VLAN_TAG_SIZE; 2413 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2414 #else 2415 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2416 rctl |= E1000_RCTL_SZ_2048; 2417 #endif 2418 } else { 2419 rctl &= ~E1000_RCTL_LPE; 2420 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2421 rctl |= E1000_RCTL_SZ_2048; 2422 } 2423 2424 /* Setup the Base and Length of the Rx Descriptor Rings */ 2425 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2426 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2427 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2428 uint32_t rxdctl; 2429 2430 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2431 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2432 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2433 (uint32_t)(bus_addr >> 32)); 2434 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2435 (uint32_t)bus_addr); 2436 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2437 /* Enable this Queue */ 2438 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2439 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2440 rxdctl &= 0xFFF00000; 2441 rxdctl |= IGB_RX_PTHRESH; 2442 rxdctl |= IGB_RX_HTHRESH << 8; 2443 /* 2444 * Don't set WTHRESH to a value above 1 on 82576, see: 2445 * 82576 specification update errata #26 2446 */ 2447 rxdctl |= IGB_RX_WTHRESH << 16; 2448 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2449 } 2450 2451 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2452 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2453 2454 /* 2455 * Receive Checksum Offload for TCP and UDP 2456 * 2457 * Checksum offloading is also enabled if multiple receive 2458 * queue is to be supported, since we need it to figure out 2459 * fragments. 2460 */ 2461 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2462 /* 2463 * NOTE: 2464 * PCSD must be enabled to enable multiple 2465 * receive queues. 2466 */ 2467 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2468 E1000_RXCSUM_PCSD; 2469 } else { 2470 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2471 E1000_RXCSUM_PCSD); 2472 } 2473 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2474 2475 if (IGB_ENABLE_HWRSS(sc)) { 2476 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2477 uint32_t reta_shift; 2478 int j, r; 2479 2480 /* 2481 * NOTE: 2482 * When we reach here, RSS has already been disabled 2483 * in igb_stop(), so we could safely configure RSS key 2484 * and redirect table. 2485 */ 2486 2487 /* 2488 * Configure RSS key 2489 */ 2490 toeplitz_get_key(key, sizeof(key)); 2491 for (i = 0; i < IGB_NRSSRK; ++i) { 2492 uint32_t rssrk; 2493 2494 rssrk = IGB_RSSRK_VAL(key, i); 2495 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2496 2497 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2498 } 2499 2500 /* 2501 * Configure RSS redirect table in following fashion: 2502 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2503 */ 2504 reta_shift = IGB_RETA_SHIFT; 2505 if (hw->mac.type == e1000_82575) 2506 reta_shift = IGB_RETA_SHIFT_82575; 2507 2508 r = 0; 2509 for (j = 0; j < IGB_NRETA; ++j) { 2510 uint32_t reta = 0; 2511 2512 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2513 uint32_t q; 2514 2515 q = (r % sc->rx_ring_inuse) << reta_shift; 2516 reta |= q << (8 * i); 2517 ++r; 2518 } 2519 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2520 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2521 } 2522 2523 /* 2524 * Enable multiple receive queues. 2525 * Enable IPv4 RSS standard hash functions. 2526 * Disable RSS interrupt on 82575 2527 */ 2528 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2529 E1000_MRQC_ENABLE_RSS_4Q | 2530 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2531 E1000_MRQC_RSS_FIELD_IPV4); 2532 } 2533 2534 /* Setup the Receive Control Register */ 2535 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2536 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2537 E1000_RCTL_RDMTS_HALF | 2538 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2539 /* Strip CRC bytes. */ 2540 rctl |= E1000_RCTL_SECRC; 2541 /* Make sure VLAN Filters are off */ 2542 rctl &= ~E1000_RCTL_VFE; 2543 /* Don't store bad packets */ 2544 rctl &= ~E1000_RCTL_SBP; 2545 2546 /* Enable Receives */ 2547 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2548 2549 /* 2550 * Setup the HW Rx Head and Tail Descriptor Pointers 2551 * - needs to be after enable 2552 */ 2553 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2554 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2555 2556 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2557 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2558 } 2559 } 2560 2561 static void 2562 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2563 { 2564 if (--i < 0) 2565 i = rxr->num_rx_desc - 1; 2566 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2567 } 2568 2569 static void 2570 igb_rxeof(struct igb_rx_ring *rxr, int count) 2571 { 2572 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2573 union e1000_adv_rx_desc *cur; 2574 uint32_t staterr; 2575 int i, ncoll = 0; 2576 2577 i = rxr->next_to_check; 2578 cur = &rxr->rx_base[i]; 2579 staterr = le32toh(cur->wb.upper.status_error); 2580 2581 if ((staterr & E1000_RXD_STAT_DD) == 0) 2582 return; 2583 2584 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2585 struct pktinfo *pi = NULL, pi0; 2586 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2587 struct mbuf *m = NULL; 2588 boolean_t eop; 2589 2590 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2591 if (eop) 2592 --count; 2593 2594 ++ncoll; 2595 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2596 !rxr->discard) { 2597 struct mbuf *mp = rxbuf->m_head; 2598 uint32_t hash, hashtype; 2599 uint16_t vlan; 2600 int len; 2601 2602 len = le16toh(cur->wb.upper.length); 2603 if (rxr->sc->hw.mac.type == e1000_i350 && 2604 (staterr & E1000_RXDEXT_STATERR_LB)) 2605 vlan = be16toh(cur->wb.upper.vlan); 2606 else 2607 vlan = le16toh(cur->wb.upper.vlan); 2608 2609 hash = le32toh(cur->wb.lower.hi_dword.rss); 2610 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2611 E1000_RXDADV_RSSTYPE_MASK; 2612 2613 IGB_RSS_DPRINTF(rxr->sc, 10, 2614 "ring%d, hash 0x%08x, hashtype %u\n", 2615 rxr->me, hash, hashtype); 2616 2617 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2618 BUS_DMASYNC_POSTREAD); 2619 2620 if (igb_newbuf(rxr, i, FALSE) != 0) { 2621 IFNET_STAT_INC(ifp, iqdrops, 1); 2622 goto discard; 2623 } 2624 2625 mp->m_len = len; 2626 if (rxr->fmp == NULL) { 2627 mp->m_pkthdr.len = len; 2628 rxr->fmp = mp; 2629 rxr->lmp = mp; 2630 } else { 2631 rxr->lmp->m_next = mp; 2632 rxr->lmp = rxr->lmp->m_next; 2633 rxr->fmp->m_pkthdr.len += len; 2634 } 2635 2636 if (eop) { 2637 m = rxr->fmp; 2638 rxr->fmp = NULL; 2639 rxr->lmp = NULL; 2640 2641 m->m_pkthdr.rcvif = ifp; 2642 IFNET_STAT_INC(ifp, ipackets, 1); 2643 2644 if (ifp->if_capenable & IFCAP_RXCSUM) 2645 igb_rxcsum(staterr, m); 2646 2647 if (staterr & E1000_RXD_STAT_VP) { 2648 m->m_pkthdr.ether_vlantag = vlan; 2649 m->m_flags |= M_VLANTAG; 2650 } 2651 2652 if (ifp->if_capenable & IFCAP_RSS) { 2653 pi = igb_rssinfo(m, &pi0, 2654 hash, hashtype, staterr); 2655 } 2656 #ifdef IGB_RSS_DEBUG 2657 rxr->rx_packets++; 2658 #endif 2659 } 2660 } else { 2661 IFNET_STAT_INC(ifp, ierrors, 1); 2662 discard: 2663 igb_setup_rxdesc(cur, rxbuf); 2664 if (!eop) 2665 rxr->discard = TRUE; 2666 else 2667 rxr->discard = FALSE; 2668 if (rxr->fmp != NULL) { 2669 m_freem(rxr->fmp); 2670 rxr->fmp = NULL; 2671 rxr->lmp = NULL; 2672 } 2673 m = NULL; 2674 } 2675 2676 if (m != NULL) 2677 ether_input_pkt(ifp, m, pi); 2678 2679 /* Advance our pointers to the next descriptor. */ 2680 if (++i == rxr->num_rx_desc) 2681 i = 0; 2682 2683 if (ncoll >= rxr->wreg_nsegs) { 2684 igb_rx_refresh(rxr, i); 2685 ncoll = 0; 2686 } 2687 2688 cur = &rxr->rx_base[i]; 2689 staterr = le32toh(cur->wb.upper.status_error); 2690 } 2691 rxr->next_to_check = i; 2692 2693 if (ncoll > 0) 2694 igb_rx_refresh(rxr, i); 2695 } 2696 2697 2698 static void 2699 igb_set_vlan(struct igb_softc *sc) 2700 { 2701 struct e1000_hw *hw = &sc->hw; 2702 uint32_t reg; 2703 #if 0 2704 struct ifnet *ifp = sc->arpcom.ac_if; 2705 #endif 2706 2707 if (sc->vf_ifp) { 2708 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2709 return; 2710 } 2711 2712 reg = E1000_READ_REG(hw, E1000_CTRL); 2713 reg |= E1000_CTRL_VME; 2714 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2715 2716 #if 0 2717 /* Enable the Filter Table */ 2718 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2719 reg = E1000_READ_REG(hw, E1000_RCTL); 2720 reg &= ~E1000_RCTL_CFIEN; 2721 reg |= E1000_RCTL_VFE; 2722 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2723 } 2724 #endif 2725 2726 /* Update the frame size */ 2727 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2728 sc->max_frame_size + VLAN_TAG_SIZE); 2729 2730 #if 0 2731 /* Don't bother with table if no vlans */ 2732 if ((adapter->num_vlans == 0) || 2733 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2734 return; 2735 /* 2736 ** A soft reset zero's out the VFTA, so 2737 ** we need to repopulate it now. 2738 */ 2739 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2740 if (adapter->shadow_vfta[i] != 0) { 2741 if (adapter->vf_ifp) 2742 e1000_vfta_set_vf(hw, 2743 adapter->shadow_vfta[i], TRUE); 2744 else 2745 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2746 i, adapter->shadow_vfta[i]); 2747 } 2748 #endif 2749 } 2750 2751 static void 2752 igb_enable_intr(struct igb_softc *sc) 2753 { 2754 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2755 lwkt_serialize_handler_enable(&sc->main_serialize); 2756 } else { 2757 int i; 2758 2759 for (i = 0; i < sc->msix_cnt; ++i) { 2760 lwkt_serialize_handler_enable( 2761 sc->msix_data[i].msix_serialize); 2762 } 2763 } 2764 2765 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2766 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2767 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2768 else 2769 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2770 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2771 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2772 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2773 } else { 2774 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2775 } 2776 E1000_WRITE_FLUSH(&sc->hw); 2777 } 2778 2779 static void 2780 igb_disable_intr(struct igb_softc *sc) 2781 { 2782 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2783 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2784 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2785 } 2786 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2787 E1000_WRITE_FLUSH(&sc->hw); 2788 2789 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2790 lwkt_serialize_handler_disable(&sc->main_serialize); 2791 } else { 2792 int i; 2793 2794 for (i = 0; i < sc->msix_cnt; ++i) { 2795 lwkt_serialize_handler_disable( 2796 sc->msix_data[i].msix_serialize); 2797 } 2798 } 2799 } 2800 2801 /* 2802 * Bit of a misnomer, what this really means is 2803 * to enable OS management of the system... aka 2804 * to disable special hardware management features 2805 */ 2806 static void 2807 igb_get_mgmt(struct igb_softc *sc) 2808 { 2809 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2810 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2811 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2812 2813 /* disable hardware interception of ARP */ 2814 manc &= ~E1000_MANC_ARP_EN; 2815 2816 /* enable receiving management packets to the host */ 2817 manc |= E1000_MANC_EN_MNG2HOST; 2818 manc2h |= 1 << 5; /* Mng Port 623 */ 2819 manc2h |= 1 << 6; /* Mng Port 664 */ 2820 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2821 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2822 } 2823 } 2824 2825 /* 2826 * Give control back to hardware management controller 2827 * if there is one. 2828 */ 2829 static void 2830 igb_rel_mgmt(struct igb_softc *sc) 2831 { 2832 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2833 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2834 2835 /* Re-enable hardware interception of ARP */ 2836 manc |= E1000_MANC_ARP_EN; 2837 manc &= ~E1000_MANC_EN_MNG2HOST; 2838 2839 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2840 } 2841 } 2842 2843 /* 2844 * Sets CTRL_EXT:DRV_LOAD bit. 2845 * 2846 * For ASF and Pass Through versions of f/w this means that 2847 * the driver is loaded. 2848 */ 2849 static void 2850 igb_get_hw_control(struct igb_softc *sc) 2851 { 2852 uint32_t ctrl_ext; 2853 2854 if (sc->vf_ifp) 2855 return; 2856 2857 /* Let firmware know the driver has taken over */ 2858 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2859 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2860 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2861 } 2862 2863 /* 2864 * Resets CTRL_EXT:DRV_LOAD bit. 2865 * 2866 * For ASF and Pass Through versions of f/w this means that the 2867 * driver is no longer loaded. 2868 */ 2869 static void 2870 igb_rel_hw_control(struct igb_softc *sc) 2871 { 2872 uint32_t ctrl_ext; 2873 2874 if (sc->vf_ifp) 2875 return; 2876 2877 /* Let firmware taken over control of h/w */ 2878 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2879 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2880 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2881 } 2882 2883 static int 2884 igb_is_valid_ether_addr(const uint8_t *addr) 2885 { 2886 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2887 2888 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2889 return FALSE; 2890 return TRUE; 2891 } 2892 2893 /* 2894 * Enable PCI Wake On Lan capability 2895 */ 2896 static void 2897 igb_enable_wol(device_t dev) 2898 { 2899 uint16_t cap, status; 2900 uint8_t id; 2901 2902 /* First find the capabilities pointer*/ 2903 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2904 2905 /* Read the PM Capabilities */ 2906 id = pci_read_config(dev, cap, 1); 2907 if (id != PCIY_PMG) /* Something wrong */ 2908 return; 2909 2910 /* 2911 * OK, we have the power capabilities, 2912 * so now get the status register 2913 */ 2914 cap += PCIR_POWER_STATUS; 2915 status = pci_read_config(dev, cap, 2); 2916 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2917 pci_write_config(dev, cap, status, 2); 2918 } 2919 2920 static void 2921 igb_update_stats_counters(struct igb_softc *sc) 2922 { 2923 struct e1000_hw *hw = &sc->hw; 2924 struct e1000_hw_stats *stats; 2925 struct ifnet *ifp = &sc->arpcom.ac_if; 2926 2927 /* 2928 * The virtual function adapter has only a 2929 * small controlled set of stats, do only 2930 * those and return. 2931 */ 2932 if (sc->vf_ifp) { 2933 igb_update_vf_stats_counters(sc); 2934 return; 2935 } 2936 stats = sc->stats; 2937 2938 if (sc->hw.phy.media_type == e1000_media_type_copper || 2939 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2940 stats->symerrs += 2941 E1000_READ_REG(hw,E1000_SYMERRS); 2942 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2943 } 2944 2945 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2946 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2947 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2948 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2949 2950 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2951 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2952 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2953 stats->dc += E1000_READ_REG(hw, E1000_DC); 2954 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2955 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2956 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2957 2958 /* 2959 * For watchdog management we need to know if we have been 2960 * paused during the last interval, so capture that here. 2961 */ 2962 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2963 stats->xoffrxc += sc->pause_frames; 2964 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2965 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2966 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2967 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2968 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2969 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2970 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2971 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2972 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2973 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2974 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2975 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2976 2977 /* For the 64-bit byte counters the low dword must be read first. */ 2978 /* Both registers clear on the read of the high dword */ 2979 2980 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2981 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2982 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2983 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2984 2985 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2986 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2987 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2988 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2989 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2990 2991 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2992 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2993 2994 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2995 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2996 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2997 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2998 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2999 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 3000 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 3001 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 3002 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 3003 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 3004 3005 /* Interrupt Counts */ 3006 3007 stats->iac += E1000_READ_REG(hw, E1000_IAC); 3008 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 3009 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 3010 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 3011 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 3012 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 3013 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 3014 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 3015 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 3016 3017 /* Host to Card Statistics */ 3018 3019 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 3020 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 3021 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 3022 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 3023 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 3024 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 3025 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 3026 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 3027 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 3028 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 3029 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 3030 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 3031 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 3032 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 3033 3034 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 3035 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 3036 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 3037 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 3038 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 3039 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 3040 3041 IFNET_STAT_SET(ifp, collisions, stats->colc); 3042 3043 /* Rx Errors */ 3044 IFNET_STAT_SET(ifp, ierrors, 3045 stats->rxerrc + stats->crcerrs + stats->algnerrc + 3046 stats->ruc + stats->roc + stats->mpc + stats->cexterr); 3047 3048 /* Tx Errors */ 3049 IFNET_STAT_SET(ifp, oerrors, 3050 stats->ecol + stats->latecol + sc->watchdog_events); 3051 3052 /* Driver specific counters */ 3053 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 3054 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 3055 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 3056 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 3057 sc->packet_buf_alloc_tx = 3058 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 3059 sc->packet_buf_alloc_rx = 3060 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 3061 } 3062 3063 static void 3064 igb_vf_init_stats(struct igb_softc *sc) 3065 { 3066 struct e1000_hw *hw = &sc->hw; 3067 struct e1000_vf_stats *stats; 3068 3069 stats = sc->stats; 3070 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 3071 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 3072 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 3073 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 3074 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 3075 } 3076 3077 static void 3078 igb_update_vf_stats_counters(struct igb_softc *sc) 3079 { 3080 struct e1000_hw *hw = &sc->hw; 3081 struct e1000_vf_stats *stats; 3082 3083 if (sc->link_speed == 0) 3084 return; 3085 3086 stats = sc->stats; 3087 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 3088 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 3089 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 3090 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 3091 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 3092 } 3093 3094 #ifdef IFPOLL_ENABLE 3095 3096 static void 3097 igb_npoll_status(struct ifnet *ifp) 3098 { 3099 struct igb_softc *sc = ifp->if_softc; 3100 uint32_t reg_icr; 3101 3102 ASSERT_SERIALIZED(&sc->main_serialize); 3103 3104 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3105 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3106 sc->hw.mac.get_link_status = 1; 3107 igb_update_link_status(sc); 3108 } 3109 } 3110 3111 static void 3112 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3113 { 3114 struct igb_tx_ring *txr = arg; 3115 3116 ASSERT_SERIALIZED(&txr->tx_serialize); 3117 3118 igb_txeof(txr); 3119 if (!ifsq_is_empty(txr->ifsq)) 3120 ifsq_devstart(txr->ifsq); 3121 } 3122 3123 static void 3124 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3125 { 3126 struct igb_rx_ring *rxr = arg; 3127 3128 ASSERT_SERIALIZED(&rxr->rx_serialize); 3129 3130 igb_rxeof(rxr, cycle); 3131 } 3132 3133 static void 3134 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3135 { 3136 struct igb_softc *sc = ifp->if_softc; 3137 int i, txr_cnt, rxr_cnt; 3138 3139 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3140 3141 if (info) { 3142 int off; 3143 3144 info->ifpi_status.status_func = igb_npoll_status; 3145 info->ifpi_status.serializer = &sc->main_serialize; 3146 3147 txr_cnt = igb_get_txring_inuse(sc, TRUE); 3148 off = sc->tx_npoll_off; 3149 for (i = 0; i < txr_cnt; ++i) { 3150 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3151 int idx = i + off; 3152 3153 KKASSERT(idx < ncpus2); 3154 info->ifpi_tx[idx].poll_func = igb_npoll_tx; 3155 info->ifpi_tx[idx].arg = txr; 3156 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 3157 ifsq_set_cpuid(txr->ifsq, idx); 3158 } 3159 3160 rxr_cnt = igb_get_rxring_inuse(sc, TRUE); 3161 off = sc->rx_npoll_off; 3162 for (i = 0; i < rxr_cnt; ++i) { 3163 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3164 int idx = i + off; 3165 3166 KKASSERT(idx < ncpus2); 3167 info->ifpi_rx[idx].poll_func = igb_npoll_rx; 3168 info->ifpi_rx[idx].arg = rxr; 3169 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 3170 } 3171 3172 if (ifp->if_flags & IFF_RUNNING) { 3173 if (rxr_cnt == sc->rx_ring_inuse && 3174 txr_cnt == sc->tx_ring_inuse) { 3175 igb_set_timer_cpuid(sc, TRUE); 3176 igb_disable_intr(sc); 3177 } else { 3178 igb_init(sc); 3179 } 3180 } 3181 } else { 3182 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3183 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3184 3185 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3186 } 3187 3188 if (ifp->if_flags & IFF_RUNNING) { 3189 txr_cnt = igb_get_txring_inuse(sc, FALSE); 3190 rxr_cnt = igb_get_rxring_inuse(sc, FALSE); 3191 3192 if (rxr_cnt == sc->rx_ring_inuse && 3193 txr_cnt == sc->tx_ring_inuse) { 3194 igb_set_timer_cpuid(sc, FALSE); 3195 igb_enable_intr(sc); 3196 } else { 3197 igb_init(sc); 3198 } 3199 } 3200 } 3201 } 3202 3203 #endif /* IFPOLL_ENABLE */ 3204 3205 static void 3206 igb_intr(void *xsc) 3207 { 3208 struct igb_softc *sc = xsc; 3209 struct ifnet *ifp = &sc->arpcom.ac_if; 3210 uint32_t eicr; 3211 3212 ASSERT_SERIALIZED(&sc->main_serialize); 3213 3214 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3215 3216 if (eicr == 0) 3217 return; 3218 3219 if (ifp->if_flags & IFF_RUNNING) { 3220 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3221 int i; 3222 3223 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3224 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3225 3226 if (eicr & rxr->rx_intr_mask) { 3227 lwkt_serialize_enter(&rxr->rx_serialize); 3228 igb_rxeof(rxr, -1); 3229 lwkt_serialize_exit(&rxr->rx_serialize); 3230 } 3231 } 3232 3233 if (eicr & txr->tx_intr_mask) { 3234 lwkt_serialize_enter(&txr->tx_serialize); 3235 igb_txeof(txr); 3236 if (!ifsq_is_empty(txr->ifsq)) 3237 ifsq_devstart(txr->ifsq); 3238 lwkt_serialize_exit(&txr->tx_serialize); 3239 } 3240 } 3241 3242 if (eicr & E1000_EICR_OTHER) { 3243 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3244 3245 /* Link status change */ 3246 if (icr & E1000_ICR_LSC) { 3247 sc->hw.mac.get_link_status = 1; 3248 igb_update_link_status(sc); 3249 } 3250 } 3251 3252 /* 3253 * Reading EICR has the side effect to clear interrupt mask, 3254 * so all interrupts need to be enabled here. 3255 */ 3256 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3257 } 3258 3259 static void 3260 igb_intr_shared(void *xsc) 3261 { 3262 struct igb_softc *sc = xsc; 3263 struct ifnet *ifp = &sc->arpcom.ac_if; 3264 uint32_t reg_icr; 3265 3266 ASSERT_SERIALIZED(&sc->main_serialize); 3267 3268 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3269 3270 /* Hot eject? */ 3271 if (reg_icr == 0xffffffff) 3272 return; 3273 3274 /* Definitely not our interrupt. */ 3275 if (reg_icr == 0x0) 3276 return; 3277 3278 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3279 return; 3280 3281 if (ifp->if_flags & IFF_RUNNING) { 3282 if (reg_icr & 3283 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3284 int i; 3285 3286 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3287 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3288 3289 lwkt_serialize_enter(&rxr->rx_serialize); 3290 igb_rxeof(rxr, -1); 3291 lwkt_serialize_exit(&rxr->rx_serialize); 3292 } 3293 } 3294 3295 if (reg_icr & E1000_ICR_TXDW) { 3296 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3297 3298 lwkt_serialize_enter(&txr->tx_serialize); 3299 igb_txeof(txr); 3300 if (!ifsq_is_empty(txr->ifsq)) 3301 ifsq_devstart(txr->ifsq); 3302 lwkt_serialize_exit(&txr->tx_serialize); 3303 } 3304 } 3305 3306 /* Link status change */ 3307 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3308 sc->hw.mac.get_link_status = 1; 3309 igb_update_link_status(sc); 3310 } 3311 3312 if (reg_icr & E1000_ICR_RXO) 3313 sc->rx_overruns++; 3314 } 3315 3316 static int 3317 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3318 int *segs_used, int *idx) 3319 { 3320 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3321 bus_dmamap_t map; 3322 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3323 union e1000_adv_tx_desc *txd = NULL; 3324 struct mbuf *m_head = *m_headp; 3325 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3326 int maxsegs, nsegs, i, j, error; 3327 uint32_t hdrlen = 0; 3328 3329 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3330 error = igb_tso_pullup(txr, m_headp); 3331 if (error) 3332 return error; 3333 m_head = *m_headp; 3334 } 3335 3336 /* Set basic descriptor constants */ 3337 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3338 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3339 if (m_head->m_flags & M_VLANTAG) 3340 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3341 3342 /* 3343 * Map the packet for DMA. 3344 */ 3345 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3346 tx_buf_mapped = tx_buf; 3347 map = tx_buf->map; 3348 3349 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3350 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n")); 3351 if (maxsegs > IGB_MAX_SCATTER) 3352 maxsegs = IGB_MAX_SCATTER; 3353 3354 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3355 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3356 if (error) { 3357 if (error == ENOBUFS) 3358 txr->sc->mbuf_defrag_failed++; 3359 else 3360 txr->sc->no_tx_dma_setup++; 3361 3362 m_freem(*m_headp); 3363 *m_headp = NULL; 3364 return error; 3365 } 3366 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3367 3368 m_head = *m_headp; 3369 3370 /* 3371 * Set up the TX context descriptor, if any hardware offloading is 3372 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3373 * TX descriptor. 3374 * 3375 * Unlike these chips' predecessors (em/emx), TX context descriptor 3376 * will _not_ interfere TX data fetching pipelining. 3377 */ 3378 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3379 igb_tso_ctx(txr, m_head, &hdrlen); 3380 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3381 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3382 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3383 txr->tx_nsegs++; 3384 (*segs_used)++; 3385 } else if (igb_txcsum_ctx(txr, m_head)) { 3386 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3387 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3388 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3389 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3390 txr->tx_nsegs++; 3391 (*segs_used)++; 3392 } 3393 3394 *segs_used += nsegs; 3395 txr->tx_nsegs += nsegs; 3396 if (txr->tx_nsegs >= txr->intr_nsegs) { 3397 /* 3398 * Report Status (RS) is turned on every intr_nsegs 3399 * descriptors (roughly). 3400 */ 3401 txr->tx_nsegs = 0; 3402 cmd_rs = E1000_ADVTXD_DCMD_RS; 3403 } 3404 3405 /* Calculate payload length */ 3406 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3407 << E1000_ADVTXD_PAYLEN_SHIFT); 3408 3409 /* 3410 * 82575 needs the TX context index added; the queue 3411 * index is used as TX context index here. 3412 */ 3413 if (txr->sc->hw.mac.type == e1000_82575) 3414 olinfo_status |= txr->me << 4; 3415 3416 /* Set up our transmit descriptors */ 3417 i = txr->next_avail_desc; 3418 for (j = 0; j < nsegs; j++) { 3419 bus_size_t seg_len; 3420 bus_addr_t seg_addr; 3421 3422 tx_buf = &txr->tx_buf[i]; 3423 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3424 seg_addr = segs[j].ds_addr; 3425 seg_len = segs[j].ds_len; 3426 3427 txd->read.buffer_addr = htole64(seg_addr); 3428 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3429 txd->read.olinfo_status = htole32(olinfo_status); 3430 if (++i == txr->num_tx_desc) 3431 i = 0; 3432 tx_buf->m_head = NULL; 3433 } 3434 3435 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3436 txr->next_avail_desc = i; 3437 txr->tx_avail -= nsegs; 3438 3439 tx_buf->m_head = m_head; 3440 tx_buf_mapped->map = tx_buf->map; 3441 tx_buf->map = map; 3442 3443 /* 3444 * Last Descriptor of Packet needs End Of Packet (EOP) 3445 */ 3446 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3447 3448 /* 3449 * Defer TDT updating, until enough descrptors are setup 3450 */ 3451 *idx = i; 3452 #ifdef IGB_TSS_DEBUG 3453 ++txr->tx_packets; 3454 #endif 3455 3456 return 0; 3457 } 3458 3459 static void 3460 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3461 { 3462 struct igb_softc *sc = ifp->if_softc; 3463 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3464 struct mbuf *m_head; 3465 int idx = -1, nsegs = 0; 3466 3467 KKASSERT(txr->ifsq == ifsq); 3468 ASSERT_SERIALIZED(&txr->tx_serialize); 3469 3470 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3471 return; 3472 3473 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { 3474 ifsq_purge(ifsq); 3475 return; 3476 } 3477 3478 if (!IGB_IS_NOT_OACTIVE(txr)) 3479 igb_txeof(txr); 3480 3481 while (!ifsq_is_empty(ifsq)) { 3482 if (IGB_IS_OACTIVE(txr)) { 3483 ifsq_set_oactive(ifsq); 3484 /* Set watchdog on */ 3485 txr->tx_watchdog.wd_timer = 5; 3486 break; 3487 } 3488 3489 m_head = ifsq_dequeue(ifsq); 3490 if (m_head == NULL) 3491 break; 3492 3493 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3494 IFNET_STAT_INC(ifp, oerrors, 1); 3495 continue; 3496 } 3497 3498 if (nsegs >= txr->wreg_nsegs) { 3499 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3500 idx = -1; 3501 nsegs = 0; 3502 } 3503 3504 /* Send a copy of the frame to the BPF listener */ 3505 ETHER_BPF_MTAP(ifp, m_head); 3506 } 3507 if (idx >= 0) 3508 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3509 } 3510 3511 static void 3512 igb_watchdog(struct ifaltq_subque *ifsq) 3513 { 3514 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3515 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3516 struct igb_softc *sc = ifp->if_softc; 3517 int i; 3518 3519 KKASSERT(txr->ifsq == ifsq); 3520 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3521 3522 /* 3523 * If flow control has paused us since last checking 3524 * it invalidates the watchdog timing, so dont run it. 3525 */ 3526 if (sc->pause_frames) { 3527 sc->pause_frames = 0; 3528 txr->tx_watchdog.wd_timer = 5; 3529 return; 3530 } 3531 3532 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3533 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3534 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3535 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3536 if_printf(ifp, "TX(%d) desc avail = %d, " 3537 "Next TX to Clean = %d\n", 3538 txr->me, txr->tx_avail, txr->next_to_clean); 3539 3540 IFNET_STAT_INC(ifp, oerrors, 1); 3541 sc->watchdog_events++; 3542 3543 igb_init(sc); 3544 for (i = 0; i < sc->tx_ring_inuse; ++i) 3545 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 3546 } 3547 3548 static void 3549 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3550 { 3551 uint32_t eitr = 0; 3552 3553 if (rate > 0) { 3554 if (sc->hw.mac.type == e1000_82575) { 3555 eitr = 1000000000 / 256 / rate; 3556 /* 3557 * NOTE: 3558 * Document is wrong on the 2 bits left shift 3559 */ 3560 } else { 3561 eitr = 1000000 / rate; 3562 eitr <<= IGB_EITR_INTVL_SHIFT; 3563 } 3564 3565 if (eitr == 0) { 3566 /* Don't disable it */ 3567 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3568 } else if (eitr > IGB_EITR_INTVL_MASK) { 3569 /* Don't allow it to be too large */ 3570 eitr = IGB_EITR_INTVL_MASK; 3571 } 3572 } 3573 if (sc->hw.mac.type == e1000_82575) 3574 eitr |= eitr << 16; 3575 else 3576 eitr |= E1000_EITR_CNT_IGNR; 3577 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3578 } 3579 3580 static int 3581 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3582 { 3583 struct igb_softc *sc = (void *)arg1; 3584 struct ifnet *ifp = &sc->arpcom.ac_if; 3585 int error, intr_rate; 3586 3587 intr_rate = sc->intr_rate; 3588 error = sysctl_handle_int(oidp, &intr_rate, 0, req); 3589 if (error || req->newptr == NULL) 3590 return error; 3591 if (intr_rate < 0) 3592 return EINVAL; 3593 3594 ifnet_serialize_all(ifp); 3595 3596 sc->intr_rate = intr_rate; 3597 if (ifp->if_flags & IFF_RUNNING) 3598 igb_set_eitr(sc, 0, sc->intr_rate); 3599 3600 if (bootverbose) 3601 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate); 3602 3603 ifnet_deserialize_all(ifp); 3604 3605 return 0; 3606 } 3607 3608 static int 3609 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS) 3610 { 3611 struct igb_msix_data *msix = (void *)arg1; 3612 struct igb_softc *sc = msix->msix_sc; 3613 struct ifnet *ifp = &sc->arpcom.ac_if; 3614 int error, msix_rate; 3615 3616 msix_rate = msix->msix_rate; 3617 error = sysctl_handle_int(oidp, &msix_rate, 0, req); 3618 if (error || req->newptr == NULL) 3619 return error; 3620 if (msix_rate < 0) 3621 return EINVAL; 3622 3623 lwkt_serialize_enter(msix->msix_serialize); 3624 3625 msix->msix_rate = msix_rate; 3626 if (ifp->if_flags & IFF_RUNNING) 3627 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate); 3628 3629 if (bootverbose) { 3630 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc, 3631 msix->msix_rate); 3632 } 3633 3634 lwkt_serialize_exit(msix->msix_serialize); 3635 3636 return 0; 3637 } 3638 3639 static int 3640 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3641 { 3642 struct igb_softc *sc = (void *)arg1; 3643 struct ifnet *ifp = &sc->arpcom.ac_if; 3644 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3645 int error, nsegs; 3646 3647 nsegs = txr->intr_nsegs; 3648 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3649 if (error || req->newptr == NULL) 3650 return error; 3651 if (nsegs <= 0) 3652 return EINVAL; 3653 3654 ifnet_serialize_all(ifp); 3655 3656 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc || 3657 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) { 3658 error = EINVAL; 3659 } else { 3660 int i; 3661 3662 error = 0; 3663 for (i = 0; i < sc->tx_ring_cnt; ++i) 3664 sc->tx_rings[i].intr_nsegs = nsegs; 3665 } 3666 3667 ifnet_deserialize_all(ifp); 3668 3669 return error; 3670 } 3671 3672 static int 3673 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3674 { 3675 struct igb_softc *sc = (void *)arg1; 3676 struct ifnet *ifp = &sc->arpcom.ac_if; 3677 int error, nsegs, i; 3678 3679 nsegs = sc->rx_rings[0].wreg_nsegs; 3680 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3681 if (error || req->newptr == NULL) 3682 return error; 3683 3684 ifnet_serialize_all(ifp); 3685 for (i = 0; i < sc->rx_ring_cnt; ++i) 3686 sc->rx_rings[i].wreg_nsegs =nsegs; 3687 ifnet_deserialize_all(ifp); 3688 3689 return 0; 3690 } 3691 3692 static int 3693 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3694 { 3695 struct igb_softc *sc = (void *)arg1; 3696 struct ifnet *ifp = &sc->arpcom.ac_if; 3697 int error, nsegs, i; 3698 3699 nsegs = sc->tx_rings[0].wreg_nsegs; 3700 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3701 if (error || req->newptr == NULL) 3702 return error; 3703 3704 ifnet_serialize_all(ifp); 3705 for (i = 0; i < sc->tx_ring_cnt; ++i) 3706 sc->tx_rings[i].wreg_nsegs =nsegs; 3707 ifnet_deserialize_all(ifp); 3708 3709 return 0; 3710 } 3711 3712 #ifdef IFPOLL_ENABLE 3713 3714 static int 3715 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3716 { 3717 struct igb_softc *sc = (void *)arg1; 3718 struct ifnet *ifp = &sc->arpcom.ac_if; 3719 int error, off; 3720 3721 off = sc->rx_npoll_off; 3722 error = sysctl_handle_int(oidp, &off, 0, req); 3723 if (error || req->newptr == NULL) 3724 return error; 3725 if (off < 0) 3726 return EINVAL; 3727 3728 ifnet_serialize_all(ifp); 3729 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3730 error = EINVAL; 3731 } else { 3732 error = 0; 3733 sc->rx_npoll_off = off; 3734 } 3735 ifnet_deserialize_all(ifp); 3736 3737 return error; 3738 } 3739 3740 static int 3741 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3742 { 3743 struct igb_softc *sc = (void *)arg1; 3744 struct ifnet *ifp = &sc->arpcom.ac_if; 3745 int error, off; 3746 3747 off = sc->tx_npoll_off; 3748 error = sysctl_handle_int(oidp, &off, 0, req); 3749 if (error || req->newptr == NULL) 3750 return error; 3751 if (off < 0) 3752 return EINVAL; 3753 3754 ifnet_serialize_all(ifp); 3755 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3756 error = EINVAL; 3757 } else { 3758 error = 0; 3759 sc->tx_npoll_off = off; 3760 } 3761 ifnet_deserialize_all(ifp); 3762 3763 return error; 3764 } 3765 3766 #endif /* IFPOLL_ENABLE */ 3767 3768 static void 3769 igb_init_intr(struct igb_softc *sc) 3770 { 3771 igb_set_intr_mask(sc); 3772 3773 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3774 igb_init_unshared_intr(sc); 3775 3776 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3777 igb_set_eitr(sc, 0, sc->intr_rate); 3778 } else { 3779 int i; 3780 3781 for (i = 0; i < sc->msix_cnt; ++i) 3782 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate); 3783 } 3784 } 3785 3786 static void 3787 igb_init_unshared_intr(struct igb_softc *sc) 3788 { 3789 struct e1000_hw *hw = &sc->hw; 3790 const struct igb_rx_ring *rxr; 3791 const struct igb_tx_ring *txr; 3792 uint32_t ivar, index; 3793 int i; 3794 3795 /* 3796 * Enable extended mode 3797 */ 3798 if (sc->hw.mac.type != e1000_82575) { 3799 uint32_t gpie; 3800 int ivar_max; 3801 3802 gpie = E1000_GPIE_NSICR; 3803 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3804 gpie |= E1000_GPIE_MSIX_MODE | 3805 E1000_GPIE_EIAME | 3806 E1000_GPIE_PBA; 3807 } 3808 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3809 3810 /* 3811 * Clear IVARs 3812 */ 3813 switch (sc->hw.mac.type) { 3814 case e1000_82576: 3815 ivar_max = IGB_MAX_IVAR_82576; 3816 break; 3817 3818 case e1000_82580: 3819 ivar_max = IGB_MAX_IVAR_82580; 3820 break; 3821 3822 case e1000_i350: 3823 ivar_max = IGB_MAX_IVAR_I350; 3824 break; 3825 3826 case e1000_vfadapt: 3827 case e1000_vfadapt_i350: 3828 ivar_max = IGB_MAX_IVAR_VF; 3829 break; 3830 3831 case e1000_i210: 3832 ivar_max = IGB_MAX_IVAR_I210; 3833 break; 3834 3835 case e1000_i211: 3836 ivar_max = IGB_MAX_IVAR_I211; 3837 break; 3838 3839 default: 3840 panic("unknown mac type %d\n", sc->hw.mac.type); 3841 } 3842 for (i = 0; i < ivar_max; ++i) 3843 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3844 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3845 } else { 3846 uint32_t tmp; 3847 3848 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3849 ("82575 w/ MSI-X")); 3850 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3851 tmp |= E1000_CTRL_EXT_IRCA; 3852 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3853 } 3854 3855 /* 3856 * Map TX/RX interrupts to EICR 3857 */ 3858 switch (sc->hw.mac.type) { 3859 case e1000_82580: 3860 case e1000_i350: 3861 case e1000_vfadapt: 3862 case e1000_vfadapt_i350: 3863 case e1000_i210: 3864 case e1000_i211: 3865 /* RX entries */ 3866 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3867 rxr = &sc->rx_rings[i]; 3868 3869 index = i >> 1; 3870 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3871 3872 if (i & 1) { 3873 ivar &= 0xff00ffff; 3874 ivar |= 3875 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3876 } else { 3877 ivar &= 0xffffff00; 3878 ivar |= 3879 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3880 } 3881 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3882 } 3883 /* TX entries */ 3884 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3885 txr = &sc->tx_rings[i]; 3886 3887 index = i >> 1; 3888 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3889 3890 if (i & 1) { 3891 ivar &= 0x00ffffff; 3892 ivar |= 3893 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3894 } else { 3895 ivar &= 0xffff00ff; 3896 ivar |= 3897 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3898 } 3899 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3900 } 3901 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3902 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3903 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3904 } 3905 break; 3906 3907 case e1000_82576: 3908 /* RX entries */ 3909 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3910 rxr = &sc->rx_rings[i]; 3911 3912 index = i & 0x7; /* Each IVAR has two entries */ 3913 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3914 3915 if (i < 8) { 3916 ivar &= 0xffffff00; 3917 ivar |= 3918 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3919 } else { 3920 ivar &= 0xff00ffff; 3921 ivar |= 3922 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3923 } 3924 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3925 } 3926 /* TX entries */ 3927 for (i = 0; i < sc->tx_ring_inuse; ++i) { 3928 txr = &sc->tx_rings[i]; 3929 3930 index = i & 0x7; /* Each IVAR has two entries */ 3931 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3932 3933 if (i < 8) { 3934 ivar &= 0xffff00ff; 3935 ivar |= 3936 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3937 } else { 3938 ivar &= 0x00ffffff; 3939 ivar |= 3940 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3941 } 3942 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3943 } 3944 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3945 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3946 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3947 } 3948 break; 3949 3950 case e1000_82575: 3951 /* 3952 * Enable necessary interrupt bits. 3953 * 3954 * The name of the register is confusing; in addition to 3955 * configuring the first vector of MSI-X, it also configures 3956 * which bits of EICR could be set by the hardware even when 3957 * MSI or line interrupt is used; it thus controls interrupt 3958 * generation. It MUST be configured explicitly; the default 3959 * value mentioned in the datasheet is wrong: RX queue0 and 3960 * TX queue0 are NOT enabled by default. 3961 */ 3962 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3963 break; 3964 3965 default: 3966 panic("unknown mac type %d\n", sc->hw.mac.type); 3967 } 3968 } 3969 3970 static int 3971 igb_setup_intr(struct igb_softc *sc) 3972 { 3973 int error; 3974 3975 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 3976 return igb_msix_setup(sc); 3977 3978 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE, 3979 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr, 3980 sc, &sc->intr_tag, &sc->main_serialize); 3981 if (error) { 3982 device_printf(sc->dev, "Failed to register interrupt handler"); 3983 return error; 3984 } 3985 return 0; 3986 } 3987 3988 static void 3989 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax) 3990 { 3991 if (txr->sc->hw.mac.type == e1000_82575) { 3992 txr->tx_intr_bit = 0; /* unused */ 3993 switch (txr->me) { 3994 case 0: 3995 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3996 break; 3997 case 1: 3998 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3999 break; 4000 case 2: 4001 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 4002 break; 4003 case 3: 4004 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 4005 break; 4006 default: 4007 panic("unsupported # of TX ring, %d\n", txr->me); 4008 } 4009 } else { 4010 int intr_bit = *intr_bit0; 4011 4012 txr->tx_intr_bit = intr_bit % intr_bitmax; 4013 txr->tx_intr_mask = 1 << txr->tx_intr_bit; 4014 4015 *intr_bit0 = intr_bit + 1; 4016 } 4017 } 4018 4019 static void 4020 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax) 4021 { 4022 if (rxr->sc->hw.mac.type == e1000_82575) { 4023 rxr->rx_intr_bit = 0; /* unused */ 4024 switch (rxr->me) { 4025 case 0: 4026 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 4027 break; 4028 case 1: 4029 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 4030 break; 4031 case 2: 4032 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 4033 break; 4034 case 3: 4035 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 4036 break; 4037 default: 4038 panic("unsupported # of RX ring, %d\n", rxr->me); 4039 } 4040 } else { 4041 int intr_bit = *intr_bit0; 4042 4043 rxr->rx_intr_bit = intr_bit % intr_bitmax; 4044 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit; 4045 4046 *intr_bit0 = intr_bit + 1; 4047 } 4048 } 4049 4050 static void 4051 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4052 { 4053 struct igb_softc *sc = ifp->if_softc; 4054 4055 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 4056 } 4057 4058 static void 4059 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4060 { 4061 struct igb_softc *sc = ifp->if_softc; 4062 4063 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 4064 } 4065 4066 static int 4067 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4068 { 4069 struct igb_softc *sc = ifp->if_softc; 4070 4071 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 4072 slz); 4073 } 4074 4075 #ifdef INVARIANTS 4076 4077 static void 4078 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4079 boolean_t serialized) 4080 { 4081 struct igb_softc *sc = ifp->if_softc; 4082 4083 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 4084 slz, serialized); 4085 } 4086 4087 #endif /* INVARIANTS */ 4088 4089 static void 4090 igb_set_intr_mask(struct igb_softc *sc) 4091 { 4092 int i; 4093 4094 sc->intr_mask = sc->sts_intr_mask; 4095 for (i = 0; i < sc->rx_ring_inuse; ++i) 4096 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 4097 for (i = 0; i < sc->tx_ring_inuse; ++i) 4098 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 4099 if (bootverbose) { 4100 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 4101 sc->intr_mask); 4102 } 4103 } 4104 4105 static int 4106 igb_alloc_intr(struct igb_softc *sc) 4107 { 4108 int i, intr_bit, intr_bitmax; 4109 u_int intr_flags; 4110 4111 igb_msix_try_alloc(sc); 4112 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 4113 goto done; 4114 4115 /* 4116 * Allocate MSI/legacy interrupt resource 4117 */ 4118 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 4119 &sc->intr_rid, &intr_flags); 4120 4121 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 4122 int unshared; 4123 4124 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 4125 if (!unshared) { 4126 sc->flags |= IGB_FLAG_SHARED_INTR; 4127 if (bootverbose) 4128 device_printf(sc->dev, "IRQ shared\n"); 4129 } else { 4130 intr_flags &= ~RF_SHAREABLE; 4131 if (bootverbose) 4132 device_printf(sc->dev, "IRQ unshared\n"); 4133 } 4134 } 4135 4136 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4137 &sc->intr_rid, intr_flags); 4138 if (sc->intr_res == NULL) { 4139 device_printf(sc->dev, "Unable to allocate bus resource: " 4140 "interrupt\n"); 4141 return ENXIO; 4142 } 4143 4144 for (i = 0; i < sc->tx_ring_cnt; ++i) 4145 sc->tx_rings[i].tx_intr_cpuid = rman_get_cpuid(sc->intr_res); 4146 4147 /* 4148 * Setup MSI/legacy interrupt mask 4149 */ 4150 switch (sc->hw.mac.type) { 4151 case e1000_82575: 4152 intr_bitmax = IGB_MAX_TXRXINT_82575; 4153 break; 4154 4155 case e1000_82576: 4156 intr_bitmax = IGB_MAX_TXRXINT_82576; 4157 break; 4158 4159 case e1000_82580: 4160 intr_bitmax = IGB_MAX_TXRXINT_82580; 4161 break; 4162 4163 case e1000_i350: 4164 intr_bitmax = IGB_MAX_TXRXINT_I350; 4165 break; 4166 4167 case e1000_i210: 4168 intr_bitmax = IGB_MAX_TXRXINT_I210; 4169 break; 4170 4171 case e1000_i211: 4172 intr_bitmax = IGB_MAX_TXRXINT_I211; 4173 break; 4174 4175 default: 4176 intr_bitmax = IGB_MIN_TXRXINT; 4177 break; 4178 } 4179 intr_bit = 0; 4180 for (i = 0; i < sc->tx_ring_cnt; ++i) 4181 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax); 4182 for (i = 0; i < sc->rx_ring_cnt; ++i) 4183 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax); 4184 sc->sts_intr_bit = 0; 4185 sc->sts_intr_mask = E1000_EICR_OTHER; 4186 4187 /* Initialize interrupt rate */ 4188 sc->intr_rate = IGB_INTR_RATE; 4189 done: 4190 igb_set_ring_inuse(sc, FALSE); 4191 igb_set_intr_mask(sc); 4192 return 0; 4193 } 4194 4195 static void 4196 igb_free_intr(struct igb_softc *sc) 4197 { 4198 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4199 if (sc->intr_res != NULL) { 4200 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid, 4201 sc->intr_res); 4202 } 4203 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4204 pci_release_msi(sc->dev); 4205 } else { 4206 igb_msix_free(sc, TRUE); 4207 } 4208 } 4209 4210 static void 4211 igb_teardown_intr(struct igb_softc *sc) 4212 { 4213 if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4214 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag); 4215 else 4216 igb_msix_teardown(sc, sc->msix_cnt); 4217 } 4218 4219 static void 4220 igb_msix_try_alloc(struct igb_softc *sc) 4221 { 4222 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 4223 int i, x, error; 4224 int offset, offset_def; 4225 struct igb_msix_data *msix; 4226 boolean_t aggregate, setup = FALSE; 4227 4228 /* 4229 * Don't enable MSI-X on 82575, see: 4230 * 82575 specification update errata #25 4231 */ 4232 if (sc->hw.mac.type == e1000_82575) 4233 return; 4234 4235 /* Don't enable MSI-X on VF */ 4236 if (sc->vf_ifp) 4237 return; 4238 4239 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4240 igb_msix_enable); 4241 if (!msix_enable) 4242 return; 4243 4244 msix_cnt = pci_msix_count(sc->dev); 4245 #ifdef IGB_MSIX_DEBUG 4246 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4247 #endif 4248 if (msix_cnt <= 1) { 4249 /* One MSI-X model does not make sense */ 4250 return; 4251 } 4252 4253 i = 0; 4254 while ((1 << (i + 1)) <= msix_cnt) 4255 ++i; 4256 msix_cnt2 = 1 << i; 4257 4258 if (bootverbose) { 4259 device_printf(sc->dev, "MSI-X count %d/%d\n", 4260 msix_cnt2, msix_cnt); 4261 } 4262 4263 KKASSERT(msix_cnt2 <= msix_cnt); 4264 if (msix_cnt == msix_cnt2) { 4265 /* We need at least one MSI-X for link status */ 4266 msix_cnt2 >>= 1; 4267 if (msix_cnt2 <= 1) { 4268 /* One MSI-X for RX/TX does not make sense */ 4269 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 4270 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 4271 return; 4272 } 4273 KKASSERT(msix_cnt > msix_cnt2); 4274 4275 if (bootverbose) { 4276 device_printf(sc->dev, "MSI-X count fixup %d/%d\n", 4277 msix_cnt2, msix_cnt); 4278 } 4279 } 4280 4281 sc->rx_ring_msix = sc->rx_ring_cnt; 4282 if (sc->rx_ring_msix > msix_cnt2) 4283 sc->rx_ring_msix = msix_cnt2; 4284 4285 sc->tx_ring_msix = sc->tx_ring_cnt; 4286 if (sc->tx_ring_msix > msix_cnt2) 4287 sc->tx_ring_msix = msix_cnt2; 4288 4289 if (msix_cnt >= sc->tx_ring_msix + sc->rx_ring_msix + 1) { 4290 /* 4291 * Independent TX/RX MSI-X 4292 */ 4293 aggregate = FALSE; 4294 if (bootverbose) 4295 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 4296 alloc_cnt = sc->tx_ring_msix + sc->rx_ring_msix; 4297 } else { 4298 /* 4299 * Aggregate TX/RX MSI-X 4300 */ 4301 aggregate = TRUE; 4302 if (bootverbose) 4303 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 4304 alloc_cnt = msix_cnt2; 4305 if (alloc_cnt > ncpus2) 4306 alloc_cnt = ncpus2; 4307 if (sc->rx_ring_msix > alloc_cnt) 4308 sc->rx_ring_msix = alloc_cnt; 4309 if (sc->tx_ring_msix > alloc_cnt) 4310 sc->tx_ring_msix = alloc_cnt; 4311 } 4312 ++alloc_cnt; /* For link status */ 4313 4314 if (bootverbose) { 4315 device_printf(sc->dev, "MSI-X alloc %d, " 4316 "RX ring %d, TX ring %d\n", alloc_cnt, 4317 sc->rx_ring_msix, sc->tx_ring_msix); 4318 } 4319 4320 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4321 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4322 &sc->msix_mem_rid, RF_ACTIVE); 4323 if (sc->msix_mem_res == NULL) { 4324 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4325 return; 4326 } 4327 4328 sc->msix_cnt = alloc_cnt; 4329 sc->msix_data = kmalloc_cachealign( 4330 sizeof(struct igb_msix_data) * sc->msix_cnt, 4331 M_DEVBUF, M_WAITOK | M_ZERO); 4332 for (x = 0; x < sc->msix_cnt; ++x) { 4333 msix = &sc->msix_data[x]; 4334 4335 lwkt_serialize_init(&msix->msix_serialize0); 4336 msix->msix_sc = sc; 4337 msix->msix_rid = -1; 4338 msix->msix_vector = x; 4339 msix->msix_mask = 1 << msix->msix_vector; 4340 msix->msix_rate = IGB_INTR_RATE; 4341 } 4342 4343 x = 0; 4344 if (!aggregate) { 4345 /* 4346 * RX rings 4347 */ 4348 if (sc->rx_ring_msix == ncpus2) { 4349 offset = 0; 4350 } else { 4351 offset_def = (sc->rx_ring_msix * 4352 device_get_unit(sc->dev)) % ncpus2; 4353 4354 offset = device_getenv_int(sc->dev, 4355 "msix.rxoff", offset_def); 4356 if (offset >= ncpus2 || 4357 offset % sc->rx_ring_msix != 0) { 4358 device_printf(sc->dev, 4359 "invalid msix.rxoff %d, use %d\n", 4360 offset, offset_def); 4361 offset = offset_def; 4362 } 4363 } 4364 igb_msix_rx_conf(sc, 0, &x, offset); 4365 4366 /* 4367 * TX rings 4368 */ 4369 if (sc->tx_ring_msix == ncpus2) { 4370 offset = 0; 4371 } else { 4372 offset_def = (sc->tx_ring_msix * 4373 device_get_unit(sc->dev)) % ncpus2; 4374 4375 offset = device_getenv_int(sc->dev, 4376 "msix.txoff", offset_def); 4377 if (offset >= ncpus2 || 4378 offset % sc->tx_ring_msix != 0) { 4379 device_printf(sc->dev, 4380 "invalid msix.txoff %d, use %d\n", 4381 offset, offset_def); 4382 offset = offset_def; 4383 } 4384 } 4385 igb_msix_tx_conf(sc, 0, &x, offset); 4386 } else { 4387 int ring_agg, ring_max; 4388 4389 ring_agg = sc->rx_ring_msix; 4390 if (ring_agg > sc->tx_ring_msix) 4391 ring_agg = sc->tx_ring_msix; 4392 4393 ring_max = sc->rx_ring_msix; 4394 if (ring_max < sc->tx_ring_msix) 4395 ring_max = sc->tx_ring_msix; 4396 4397 if (ring_max == ncpus2) { 4398 offset = 0; 4399 } else { 4400 offset_def = (ring_max * device_get_unit(sc->dev)) % 4401 ncpus2; 4402 4403 offset = device_getenv_int(sc->dev, "msix.off", 4404 offset_def); 4405 if (offset >= ncpus2 || offset % ring_max != 0) { 4406 device_printf(sc->dev, 4407 "invalid msix.off %d, use %d\n", 4408 offset, offset_def); 4409 offset = offset_def; 4410 } 4411 } 4412 4413 for (i = 0; i < ring_agg; ++i) { 4414 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4415 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4416 4417 KKASSERT(x < sc->msix_cnt); 4418 msix = &sc->msix_data[x++]; 4419 4420 txr->tx_intr_bit = msix->msix_vector; 4421 txr->tx_intr_mask = msix->msix_mask; 4422 rxr->rx_intr_bit = msix->msix_vector; 4423 rxr->rx_intr_mask = msix->msix_mask; 4424 4425 msix->msix_serialize = &msix->msix_serialize0; 4426 msix->msix_func = igb_msix_rxtx; 4427 msix->msix_arg = msix; 4428 msix->msix_rx = rxr; 4429 msix->msix_tx = txr; 4430 4431 msix->msix_cpuid = i + offset; 4432 KKASSERT(msix->msix_cpuid < ncpus2); 4433 txr->tx_intr_cpuid = msix->msix_cpuid; 4434 4435 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4436 "%s rxtx%d", device_get_nameunit(sc->dev), i); 4437 msix->msix_rate = IGB_MSIX_RX_RATE; 4438 ksnprintf(msix->msix_rate_desc, 4439 sizeof(msix->msix_rate_desc), 4440 "RXTX%d interrupt rate", i); 4441 } 4442 4443 if (ring_agg != ring_max) { 4444 if (ring_max == sc->tx_ring_msix) 4445 igb_msix_tx_conf(sc, i, &x, offset); 4446 else 4447 igb_msix_rx_conf(sc, i, &x, offset); 4448 } 4449 } 4450 4451 /* 4452 * Link status 4453 */ 4454 KKASSERT(x < sc->msix_cnt); 4455 msix = &sc->msix_data[x++]; 4456 sc->sts_intr_bit = msix->msix_vector; 4457 sc->sts_intr_mask = msix->msix_mask; 4458 4459 msix->msix_serialize = &sc->main_serialize; 4460 msix->msix_func = igb_msix_status; 4461 msix->msix_arg = sc; 4462 msix->msix_cpuid = 0; 4463 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts", 4464 device_get_nameunit(sc->dev)); 4465 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4466 "status interrupt rate"); 4467 4468 KKASSERT(x == sc->msix_cnt); 4469 4470 error = pci_setup_msix(sc->dev); 4471 if (error) { 4472 device_printf(sc->dev, "Setup MSI-X failed\n"); 4473 goto back; 4474 } 4475 setup = TRUE; 4476 4477 for (i = 0; i < sc->msix_cnt; ++i) { 4478 msix = &sc->msix_data[i]; 4479 4480 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector, 4481 &msix->msix_rid, msix->msix_cpuid); 4482 if (error) { 4483 device_printf(sc->dev, 4484 "Unable to allocate MSI-X %d on cpu%d\n", 4485 msix->msix_vector, msix->msix_cpuid); 4486 goto back; 4487 } 4488 4489 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4490 &msix->msix_rid, RF_ACTIVE); 4491 if (msix->msix_res == NULL) { 4492 device_printf(sc->dev, 4493 "Unable to allocate MSI-X %d resource\n", 4494 msix->msix_vector); 4495 error = ENOMEM; 4496 goto back; 4497 } 4498 } 4499 4500 pci_enable_msix(sc->dev); 4501 sc->intr_type = PCI_INTR_TYPE_MSIX; 4502 back: 4503 if (error) 4504 igb_msix_free(sc, setup); 4505 } 4506 4507 static void 4508 igb_msix_free(struct igb_softc *sc, boolean_t setup) 4509 { 4510 int i; 4511 4512 KKASSERT(sc->msix_cnt > 1); 4513 4514 for (i = 0; i < sc->msix_cnt; ++i) { 4515 struct igb_msix_data *msix = &sc->msix_data[i]; 4516 4517 if (msix->msix_res != NULL) { 4518 bus_release_resource(sc->dev, SYS_RES_IRQ, 4519 msix->msix_rid, msix->msix_res); 4520 } 4521 if (msix->msix_rid >= 0) 4522 pci_release_msix_vector(sc->dev, msix->msix_rid); 4523 } 4524 if (setup) 4525 pci_teardown_msix(sc->dev); 4526 4527 sc->msix_cnt = 0; 4528 kfree(sc->msix_data, M_DEVBUF); 4529 sc->msix_data = NULL; 4530 } 4531 4532 static int 4533 igb_msix_setup(struct igb_softc *sc) 4534 { 4535 int i; 4536 4537 for (i = 0; i < sc->msix_cnt; ++i) { 4538 struct igb_msix_data *msix = &sc->msix_data[i]; 4539 int error; 4540 4541 error = bus_setup_intr_descr(sc->dev, msix->msix_res, 4542 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 4543 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 4544 if (error) { 4545 device_printf(sc->dev, "could not set up %s " 4546 "interrupt handler.\n", msix->msix_desc); 4547 igb_msix_teardown(sc, i); 4548 return error; 4549 } 4550 } 4551 return 0; 4552 } 4553 4554 static void 4555 igb_msix_teardown(struct igb_softc *sc, int msix_cnt) 4556 { 4557 int i; 4558 4559 for (i = 0; i < msix_cnt; ++i) { 4560 struct igb_msix_data *msix = &sc->msix_data[i]; 4561 4562 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle); 4563 } 4564 } 4565 4566 static void 4567 igb_msix_rx(void *arg) 4568 { 4569 struct igb_rx_ring *rxr = arg; 4570 4571 ASSERT_SERIALIZED(&rxr->rx_serialize); 4572 igb_rxeof(rxr, -1); 4573 4574 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4575 } 4576 4577 static void 4578 igb_msix_tx(void *arg) 4579 { 4580 struct igb_tx_ring *txr = arg; 4581 4582 ASSERT_SERIALIZED(&txr->tx_serialize); 4583 4584 igb_txeof(txr); 4585 if (!ifsq_is_empty(txr->ifsq)) 4586 ifsq_devstart(txr->ifsq); 4587 4588 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4589 } 4590 4591 static void 4592 igb_msix_status(void *arg) 4593 { 4594 struct igb_softc *sc = arg; 4595 uint32_t icr; 4596 4597 ASSERT_SERIALIZED(&sc->main_serialize); 4598 4599 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4600 if (icr & E1000_ICR_LSC) { 4601 sc->hw.mac.get_link_status = 1; 4602 igb_update_link_status(sc); 4603 } 4604 4605 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4606 } 4607 4608 static void 4609 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4610 { 4611 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling); 4612 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling); 4613 if (bootverbose) { 4614 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n", 4615 sc->rx_ring_inuse, sc->rx_ring_cnt, 4616 sc->tx_ring_inuse, sc->tx_ring_cnt); 4617 } 4618 } 4619 4620 static int 4621 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling) 4622 { 4623 if (!IGB_ENABLE_HWRSS(sc)) 4624 return 1; 4625 4626 if (polling) 4627 return sc->rx_ring_cnt; 4628 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4629 return IGB_MIN_RING_RSS; 4630 else 4631 return sc->rx_ring_msix; 4632 } 4633 4634 static int 4635 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling) 4636 { 4637 if (!IGB_ENABLE_HWTSS(sc)) 4638 return 1; 4639 4640 if (polling) 4641 return sc->tx_ring_cnt; 4642 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4643 return IGB_MIN_RING; 4644 else 4645 return sc->tx_ring_msix; 4646 } 4647 4648 static int 4649 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4650 { 4651 int hoff, iphlen, thoff; 4652 struct mbuf *m; 4653 4654 m = *mp; 4655 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4656 4657 iphlen = m->m_pkthdr.csum_iphlen; 4658 thoff = m->m_pkthdr.csum_thlen; 4659 hoff = m->m_pkthdr.csum_lhlen; 4660 4661 KASSERT(iphlen > 0, ("invalid ip hlen")); 4662 KASSERT(thoff > 0, ("invalid tcp hlen")); 4663 KASSERT(hoff > 0, ("invalid ether hlen")); 4664 4665 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4666 m = m_pullup(m, hoff + iphlen + thoff); 4667 if (m == NULL) { 4668 *mp = NULL; 4669 return ENOBUFS; 4670 } 4671 *mp = m; 4672 } 4673 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { 4674 struct ip *ip; 4675 4676 ip = mtodoff(m, struct ip *, hoff); 4677 ip->ip_len = 0; 4678 } 4679 4680 return 0; 4681 } 4682 4683 static void 4684 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4685 { 4686 struct e1000_adv_tx_context_desc *TXD; 4687 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4688 int hoff, ctxd, iphlen, thoff; 4689 4690 iphlen = m->m_pkthdr.csum_iphlen; 4691 thoff = m->m_pkthdr.csum_thlen; 4692 hoff = m->m_pkthdr.csum_lhlen; 4693 4694 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4695 4696 ctxd = txr->next_avail_desc; 4697 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4698 4699 if (m->m_flags & M_VLANTAG) { 4700 uint16_t vlantag; 4701 4702 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4703 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4704 } 4705 4706 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4707 vlan_macip_lens |= iphlen; 4708 4709 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4710 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4711 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4712 4713 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4714 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4715 4716 /* 4717 * 82575 needs the TX context index added; the queue 4718 * index is used as TX context index here. 4719 */ 4720 if (txr->sc->hw.mac.type == e1000_82575) 4721 mss_l4len_idx |= txr->me << 4; 4722 4723 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4724 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4725 TXD->seqnum_seed = htole32(0); 4726 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4727 4728 /* We've consumed the first desc, adjust counters */ 4729 if (++ctxd == txr->num_tx_desc) 4730 ctxd = 0; 4731 txr->next_avail_desc = ctxd; 4732 --txr->tx_avail; 4733 4734 *hlen = hoff + iphlen + thoff; 4735 } 4736 4737 static void 4738 igb_setup_serializer(struct igb_softc *sc) 4739 { 4740 const struct igb_msix_data *msix; 4741 int i, j; 4742 4743 /* 4744 * Allocate serializer array 4745 */ 4746 4747 /* Main + TX + RX */ 4748 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt; 4749 4750 /* Aggregate TX/RX MSI-X */ 4751 for (i = 0; i < sc->msix_cnt; ++i) { 4752 msix = &sc->msix_data[i]; 4753 if (msix->msix_serialize == &msix->msix_serialize0) 4754 sc->serialize_cnt++; 4755 } 4756 4757 sc->serializes = 4758 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 4759 M_DEVBUF, M_WAITOK | M_ZERO); 4760 4761 /* 4762 * Setup serializers 4763 * 4764 * NOTE: Order is critical 4765 */ 4766 4767 i = 0; 4768 4769 KKASSERT(i < sc->serialize_cnt); 4770 sc->serializes[i++] = &sc->main_serialize; 4771 4772 for (j = 0; j < sc->msix_cnt; ++j) { 4773 msix = &sc->msix_data[j]; 4774 if (msix->msix_serialize == &msix->msix_serialize0) { 4775 KKASSERT(i < sc->serialize_cnt); 4776 sc->serializes[i++] = msix->msix_serialize; 4777 } 4778 } 4779 4780 for (j = 0; j < sc->tx_ring_cnt; ++j) { 4781 KKASSERT(i < sc->serialize_cnt); 4782 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 4783 } 4784 4785 for (j = 0; j < sc->rx_ring_cnt; ++j) { 4786 KKASSERT(i < sc->serialize_cnt); 4787 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 4788 } 4789 4790 KKASSERT(i == sc->serialize_cnt); 4791 } 4792 4793 static void 4794 igb_msix_rx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4795 { 4796 int x = *x0; 4797 4798 for (; i < sc->rx_ring_msix; ++i) { 4799 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4800 struct igb_msix_data *msix; 4801 4802 KKASSERT(x < sc->msix_cnt); 4803 msix = &sc->msix_data[x++]; 4804 4805 rxr->rx_intr_bit = msix->msix_vector; 4806 rxr->rx_intr_mask = msix->msix_mask; 4807 4808 msix->msix_serialize = &rxr->rx_serialize; 4809 msix->msix_func = igb_msix_rx; 4810 msix->msix_arg = rxr; 4811 4812 msix->msix_cpuid = i + offset; 4813 KKASSERT(msix->msix_cpuid < ncpus2); 4814 4815 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s rx%d", 4816 device_get_nameunit(sc->dev), i); 4817 4818 msix->msix_rate = IGB_MSIX_RX_RATE; 4819 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4820 "RX%d interrupt rate", i); 4821 } 4822 *x0 = x; 4823 } 4824 4825 static void 4826 igb_msix_tx_conf(struct igb_softc *sc, int i, int *x0, int offset) 4827 { 4828 int x = *x0; 4829 4830 for (; i < sc->tx_ring_msix; ++i) { 4831 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4832 struct igb_msix_data *msix; 4833 4834 KKASSERT(x < sc->msix_cnt); 4835 msix = &sc->msix_data[x++]; 4836 4837 txr->tx_intr_bit = msix->msix_vector; 4838 txr->tx_intr_mask = msix->msix_mask; 4839 4840 msix->msix_serialize = &txr->tx_serialize; 4841 msix->msix_func = igb_msix_tx; 4842 msix->msix_arg = txr; 4843 4844 msix->msix_cpuid = i + offset; 4845 KKASSERT(msix->msix_cpuid < ncpus2); 4846 txr->tx_intr_cpuid = msix->msix_cpuid; 4847 4848 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s tx%d", 4849 device_get_nameunit(sc->dev), i); 4850 4851 msix->msix_rate = IGB_MSIX_TX_RATE; 4852 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4853 "TX%d interrupt rate", i); 4854 } 4855 *x0 = x; 4856 } 4857 4858 static void 4859 igb_msix_rxtx(void *arg) 4860 { 4861 struct igb_msix_data *msix = arg; 4862 struct igb_rx_ring *rxr = msix->msix_rx; 4863 struct igb_tx_ring *txr = msix->msix_tx; 4864 4865 ASSERT_SERIALIZED(&msix->msix_serialize0); 4866 4867 lwkt_serialize_enter(&rxr->rx_serialize); 4868 igb_rxeof(rxr, -1); 4869 lwkt_serialize_exit(&rxr->rx_serialize); 4870 4871 lwkt_serialize_enter(&txr->tx_serialize); 4872 igb_txeof(txr); 4873 if (!ifsq_is_empty(txr->ifsq)) 4874 ifsq_devstart(txr->ifsq); 4875 lwkt_serialize_exit(&txr->tx_serialize); 4876 4877 E1000_WRITE_REG(&msix->msix_sc->hw, E1000_EIMS, msix->msix_mask); 4878 } 4879 4880 static void 4881 igb_set_timer_cpuid(struct igb_softc *sc, boolean_t polling) 4882 { 4883 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4884 sc->timer_cpuid = 0; /* XXX fixed */ 4885 else 4886 sc->timer_cpuid = rman_get_cpuid(sc->intr_res); 4887 } 4888