1 /* 2 * Copyright (c) 2001-2011, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_polling.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/proc.h> 42 #include <sys/rman.h> 43 #include <sys/serialize.h> 44 #include <sys/serialize2.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <sys/systm.h> 49 50 #include <net/bpf.h> 51 #include <net/ethernet.h> 52 #include <net/if.h> 53 #include <net/if_arp.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/ifq_var.h> 57 #include <net/toeplitz.h> 58 #include <net/toeplitz2.h> 59 #include <net/vlan/if_vlan_var.h> 60 #include <net/vlan/if_vlan_ether.h> 61 #include <net/if_poll.h> 62 63 #include <netinet/in_systm.h> 64 #include <netinet/in.h> 65 #include <netinet/ip.h> 66 #include <netinet/tcp.h> 67 #include <netinet/udp.h> 68 69 #include <bus/pci/pcivar.h> 70 #include <bus/pci/pcireg.h> 71 72 #include <dev/netif/ig_hal/e1000_api.h> 73 #include <dev/netif/ig_hal/e1000_82575.h> 74 #include <dev/netif/igb/if_igb.h> 75 76 #define IGB_NAME "Intel(R) PRO/1000 " 77 #define IGB_DEVICE(id) \ 78 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 79 #define IGB_DEVICE_NULL { 0, 0, NULL } 80 81 static struct igb_device { 82 uint16_t vid; 83 uint16_t did; 84 const char *desc; 85 } igb_devices[] = { 86 IGB_DEVICE(82575EB_COPPER), 87 IGB_DEVICE(82575EB_FIBER_SERDES), 88 IGB_DEVICE(82575GB_QUAD_COPPER), 89 IGB_DEVICE(82576), 90 IGB_DEVICE(82576_NS), 91 IGB_DEVICE(82576_NS_SERDES), 92 IGB_DEVICE(82576_FIBER), 93 IGB_DEVICE(82576_SERDES), 94 IGB_DEVICE(82576_SERDES_QUAD), 95 IGB_DEVICE(82576_QUAD_COPPER), 96 IGB_DEVICE(82576_QUAD_COPPER_ET2), 97 IGB_DEVICE(82576_VF), 98 IGB_DEVICE(82580_COPPER), 99 IGB_DEVICE(82580_FIBER), 100 IGB_DEVICE(82580_SERDES), 101 IGB_DEVICE(82580_SGMII), 102 IGB_DEVICE(82580_COPPER_DUAL), 103 IGB_DEVICE(82580_QUAD_FIBER), 104 IGB_DEVICE(DH89XXCC_SERDES), 105 IGB_DEVICE(DH89XXCC_SGMII), 106 IGB_DEVICE(DH89XXCC_SFP), 107 IGB_DEVICE(DH89XXCC_BACKPLANE), 108 IGB_DEVICE(I350_COPPER), 109 IGB_DEVICE(I350_FIBER), 110 IGB_DEVICE(I350_SERDES), 111 IGB_DEVICE(I350_SGMII), 112 IGB_DEVICE(I350_VF), 113 114 /* required last entry */ 115 IGB_DEVICE_NULL 116 }; 117 118 static int igb_probe(device_t); 119 static int igb_attach(device_t); 120 static int igb_detach(device_t); 121 static int igb_shutdown(device_t); 122 static int igb_suspend(device_t); 123 static int igb_resume(device_t); 124 125 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 126 static void igb_setup_ifp(struct igb_softc *); 127 static int igb_txctx_pullup(struct igb_tx_ring *, struct mbuf **); 128 static boolean_t igb_txctx(struct igb_tx_ring *, struct mbuf *); 129 static void igb_add_sysctl(struct igb_softc *); 130 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 131 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 132 133 static void igb_vf_init_stats(struct igb_softc *); 134 static void igb_reset(struct igb_softc *); 135 static void igb_update_stats_counters(struct igb_softc *); 136 static void igb_update_vf_stats_counters(struct igb_softc *); 137 static void igb_update_link_status(struct igb_softc *); 138 static void igb_init_tx_unit(struct igb_softc *); 139 static void igb_init_rx_unit(struct igb_softc *); 140 141 static void igb_set_vlan(struct igb_softc *); 142 static void igb_set_multi(struct igb_softc *); 143 static void igb_set_promisc(struct igb_softc *); 144 static void igb_disable_promisc(struct igb_softc *); 145 146 static int igb_dma_alloc(struct igb_softc *); 147 static void igb_dma_free(struct igb_softc *); 148 static int igb_create_tx_ring(struct igb_tx_ring *); 149 static int igb_create_rx_ring(struct igb_rx_ring *); 150 static void igb_free_tx_ring(struct igb_tx_ring *); 151 static void igb_free_rx_ring(struct igb_rx_ring *); 152 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 153 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 154 static void igb_init_tx_ring(struct igb_tx_ring *); 155 static int igb_init_rx_ring(struct igb_rx_ring *); 156 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 157 static int igb_encap(struct igb_tx_ring *, struct mbuf **); 158 159 static void igb_stop(struct igb_softc *); 160 static void igb_init(void *); 161 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 162 static void igb_media_status(struct ifnet *, struct ifmediareq *); 163 static int igb_media_change(struct ifnet *); 164 static void igb_timer(void *); 165 static void igb_watchdog(struct ifnet *); 166 static void igb_start(struct ifnet *); 167 #ifdef DEVICE_POLLING 168 static void igb_poll(struct ifnet *, enum poll_cmd, int); 169 #endif 170 171 static void igb_intr(void *); 172 static void igb_shared_intr(void *); 173 static void igb_rxeof(struct igb_rx_ring *, int); 174 static void igb_txeof(struct igb_tx_ring *); 175 static void igb_set_eitr(struct igb_softc *); 176 static void igb_enable_intr(struct igb_softc *); 177 static void igb_disable_intr(struct igb_softc *); 178 static void igb_init_unshared_intr(struct igb_softc *); 179 static void igb_init_intr(struct igb_softc *); 180 static int igb_setup_intr(struct igb_softc *); 181 static void igb_setup_tx_intr(struct igb_tx_ring *); 182 static void igb_setup_rx_intr(struct igb_rx_ring *); 183 184 /* Management and WOL Support */ 185 static void igb_get_mgmt(struct igb_softc *); 186 static void igb_rel_mgmt(struct igb_softc *); 187 static void igb_get_hw_control(struct igb_softc *); 188 static void igb_rel_hw_control(struct igb_softc *); 189 static void igb_enable_wol(device_t); 190 191 static device_method_t igb_methods[] = { 192 /* Device interface */ 193 DEVMETHOD(device_probe, igb_probe), 194 DEVMETHOD(device_attach, igb_attach), 195 DEVMETHOD(device_detach, igb_detach), 196 DEVMETHOD(device_shutdown, igb_shutdown), 197 DEVMETHOD(device_suspend, igb_suspend), 198 DEVMETHOD(device_resume, igb_resume), 199 { 0, 0 } 200 }; 201 202 static driver_t igb_driver = { 203 "igb", 204 igb_methods, 205 sizeof(struct igb_softc), 206 }; 207 208 static devclass_t igb_devclass; 209 210 DECLARE_DUMMY_MODULE(if_igb); 211 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 212 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 213 214 static int igb_rxd = IGB_DEFAULT_RXD; 215 static int igb_txd = IGB_DEFAULT_TXD; 216 static int igb_msi_enable = 1; 217 static int igb_msix_enable = 1; 218 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 219 static int igb_fc_setting = e1000_fc_full; 220 221 /* 222 * DMA Coalescing, only for i350 - default to off, 223 * this feature is for power savings 224 */ 225 static int igb_dma_coalesce = 0; 226 227 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 228 TUNABLE_INT("hw.igb.txd", &igb_txd); 229 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 230 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 231 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); 232 233 /* i350 specific */ 234 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 235 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 236 237 static __inline void 238 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 239 { 240 /* Ignore Checksum bit is set */ 241 if (staterr & E1000_RXD_STAT_IXSM) 242 return; 243 244 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 245 E1000_RXD_STAT_IPCS) 246 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 247 248 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 249 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 250 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 251 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 252 mp->m_pkthdr.csum_data = htons(0xffff); 253 } 254 } 255 } 256 257 static int 258 igb_probe(device_t dev) 259 { 260 const struct igb_device *d; 261 uint16_t vid, did; 262 263 vid = pci_get_vendor(dev); 264 did = pci_get_device(dev); 265 266 for (d = igb_devices; d->desc != NULL; ++d) { 267 if (vid == d->vid && did == d->did) { 268 device_set_desc(dev, d->desc); 269 return 0; 270 } 271 } 272 return ENXIO; 273 } 274 275 static int 276 igb_attach(device_t dev) 277 { 278 struct igb_softc *sc = device_get_softc(dev); 279 uint16_t eeprom_data; 280 u_int intr_flags; 281 int error = 0; 282 283 #ifdef notyet 284 /* SYSCTL stuff */ 285 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 286 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 287 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 288 igb_sysctl_nvm_info, "I", "NVM Information"); 289 290 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 291 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 292 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, 293 &igb_enable_aim, 1, "Interrupt Moderation"); 294 295 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 296 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 297 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, 298 adapter, 0, igb_set_flowcntl, "I", "Flow Control"); 299 #endif 300 301 callout_init_mp(&sc->timer); 302 303 sc->dev = sc->osdep.dev = dev; 304 305 /* 306 * Determine hardware and mac type 307 */ 308 sc->hw.vendor_id = pci_get_vendor(dev); 309 sc->hw.device_id = pci_get_device(dev); 310 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 311 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 312 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 313 314 if (e1000_set_mac_type(&sc->hw)) 315 return ENXIO; 316 317 /* Are we a VF device? */ 318 if (sc->hw.mac.type == e1000_vfadapt || 319 sc->hw.mac.type == e1000_vfadapt_i350) 320 sc->vf_ifp = 1; 321 else 322 sc->vf_ifp = 0; 323 324 /* Enable bus mastering */ 325 pci_enable_busmaster(dev); 326 327 /* 328 * Allocate IO memory 329 */ 330 sc->mem_rid = PCIR_BAR(0); 331 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 332 RF_ACTIVE); 333 if (sc->mem_res == NULL) { 334 device_printf(dev, "Unable to allocate bus resource: memory\n"); 335 error = ENXIO; 336 goto failed; 337 } 338 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 339 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 340 341 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 342 343 /* 344 * Allocate interrupt 345 */ 346 sc->intr_type = pci_alloc_1intr(dev, igb_msi_enable, 347 &sc->intr_rid, &intr_flags); 348 349 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 350 intr_flags); 351 if (sc->intr_res == NULL) { 352 device_printf(dev, "Unable to allocate bus resource: " 353 "interrupt\n"); 354 error = ENXIO; 355 goto failed; 356 } 357 358 /* Save PCI command register for Shared Code */ 359 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 360 sc->hw.back = &sc->osdep; 361 362 sc->num_queues = 1; /* Defaults for Legacy or MSI */ 363 sc->intr_rate = IGB_INTR_RATE; 364 365 /* Do Shared Code initialization */ 366 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 367 device_printf(dev, "Setup of Shared code failed\n"); 368 error = ENXIO; 369 goto failed; 370 } 371 372 e1000_get_bus_info(&sc->hw); 373 374 sc->hw.mac.autoneg = DO_AUTO_NEG; 375 sc->hw.phy.autoneg_wait_to_complete = FALSE; 376 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 377 378 /* Copper options */ 379 if (sc->hw.phy.media_type == e1000_media_type_copper) { 380 sc->hw.phy.mdix = AUTO_ALL_MODES; 381 sc->hw.phy.disable_polarity_correction = FALSE; 382 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 383 } 384 385 /* Set the frame limits assuming standard ethernet sized frames. */ 386 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 387 sc->min_frame_size = ETHER_MIN_LEN; 388 389 /* Allocate RX/TX rings' busdma(9) stuffs */ 390 error = igb_dma_alloc(sc); 391 if (error) 392 goto failed; 393 394 /* Allocate the appropriate stats memory */ 395 if (sc->vf_ifp) { 396 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 397 M_WAITOK | M_ZERO); 398 igb_vf_init_stats(sc); 399 } else { 400 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 401 M_WAITOK | M_ZERO); 402 } 403 404 /* Allocate multicast array memory. */ 405 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 406 M_DEVBUF, M_WAITOK); 407 408 /* Some adapter-specific advanced features */ 409 if (sc->hw.mac.type >= e1000_i350) { 410 #ifdef notyet 411 igb_set_sysctl_value(adapter, "dma_coalesce", 412 "configure dma coalesce", 413 &adapter->dma_coalesce, igb_dma_coalesce); 414 igb_set_sysctl_value(adapter, "eee_disabled", 415 "enable Energy Efficient Ethernet", 416 &adapter->hw.dev_spec._82575.eee_disable, 417 igb_eee_disabled); 418 #else 419 sc->dma_coalesce = igb_dma_coalesce; 420 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 421 #endif 422 e1000_set_eee_i350(&sc->hw); 423 } 424 425 /* 426 * Start from a known state, this is important in reading the nvm and 427 * mac from that. 428 */ 429 e1000_reset_hw(&sc->hw); 430 431 /* Make sure we have a good EEPROM before we read from it */ 432 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 433 /* 434 * Some PCI-E parts fail the first check due to 435 * the link being in sleep state, call it again, 436 * if it fails a second time its a real issue. 437 */ 438 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 439 device_printf(dev, 440 "The EEPROM Checksum Is Not Valid\n"); 441 error = EIO; 442 goto failed; 443 } 444 } 445 446 /* Copy the permanent MAC address out of the EEPROM */ 447 if (e1000_read_mac_addr(&sc->hw) < 0) { 448 device_printf(dev, "EEPROM read error while reading MAC" 449 " address\n"); 450 error = EIO; 451 goto failed; 452 } 453 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 454 device_printf(dev, "Invalid MAC address\n"); 455 error = EIO; 456 goto failed; 457 } 458 459 #ifdef notyet 460 /* 461 ** Configure Interrupts 462 */ 463 if ((adapter->msix > 1) && (igb_enable_msix)) 464 error = igb_allocate_msix(adapter); 465 else /* MSI or Legacy */ 466 error = igb_allocate_legacy(adapter); 467 if (error) 468 goto err_late; 469 #endif 470 471 /* Setup OS specific network interface */ 472 igb_setup_ifp(sc); 473 474 /* Add sysctl tree, must after igb_setup_ifp() */ 475 igb_add_sysctl(sc); 476 477 /* Now get a good starting state */ 478 igb_reset(sc); 479 480 /* Initialize statistics */ 481 igb_update_stats_counters(sc); 482 483 sc->hw.mac.get_link_status = 1; 484 igb_update_link_status(sc); 485 486 /* Indicate SOL/IDER usage */ 487 if (e1000_check_reset_block(&sc->hw)) { 488 device_printf(dev, 489 "PHY reset is blocked due to SOL/IDER session.\n"); 490 } 491 492 /* Determine if we have to control management hardware */ 493 sc->has_manage = e1000_enable_mng_pass_thru(&sc->hw); 494 495 /* 496 * Setup Wake-on-Lan 497 */ 498 /* APME bit in EEPROM is mapped to WUC.APME */ 499 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 500 if (eeprom_data) 501 sc->wol = E1000_WUFC_MAG; 502 /* XXX disable WOL */ 503 sc->wol = 0; 504 505 #ifdef notyet 506 /* Register for VLAN events */ 507 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 508 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 509 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 510 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 511 #endif 512 513 #ifdef notyet 514 igb_add_hw_stats(adapter); 515 #endif 516 517 error = igb_setup_intr(sc); 518 if (error) { 519 ether_ifdetach(&sc->arpcom.ac_if); 520 goto failed; 521 } 522 return 0; 523 524 failed: 525 igb_detach(dev); 526 return error; 527 } 528 529 static int 530 igb_detach(device_t dev) 531 { 532 struct igb_softc *sc = device_get_softc(dev); 533 534 if (device_is_attached(dev)) { 535 struct ifnet *ifp = &sc->arpcom.ac_if; 536 537 ifnet_serialize_all(ifp); 538 539 igb_stop(sc); 540 541 e1000_phy_hw_reset(&sc->hw); 542 543 /* Give control back to firmware */ 544 igb_rel_mgmt(sc); 545 igb_rel_hw_control(sc); 546 547 if (sc->wol) { 548 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 549 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 550 igb_enable_wol(dev); 551 } 552 553 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 554 555 ifnet_deserialize_all(ifp); 556 557 ether_ifdetach(ifp); 558 } else if (sc->mem_res != NULL) { 559 igb_rel_hw_control(sc); 560 } 561 bus_generic_detach(dev); 562 563 if (sc->intr_res != NULL) { 564 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 565 sc->intr_res); 566 } 567 if (sc->intr_type == PCI_INTR_TYPE_MSI) 568 pci_release_msi(dev); 569 570 if (sc->mem_res != NULL) { 571 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 572 sc->mem_res); 573 } 574 575 igb_dma_free(sc); 576 577 if (sc->mta != NULL) 578 kfree(sc->mta, M_DEVBUF); 579 if (sc->stats != NULL) 580 kfree(sc->stats, M_DEVBUF); 581 582 if (sc->sysctl_tree != NULL) 583 sysctl_ctx_free(&sc->sysctl_ctx); 584 585 return 0; 586 } 587 588 static int 589 igb_shutdown(device_t dev) 590 { 591 return igb_suspend(dev); 592 } 593 594 static int 595 igb_suspend(device_t dev) 596 { 597 struct igb_softc *sc = device_get_softc(dev); 598 struct ifnet *ifp = &sc->arpcom.ac_if; 599 600 ifnet_serialize_all(ifp); 601 602 igb_stop(sc); 603 604 igb_rel_mgmt(sc); 605 igb_rel_hw_control(sc); 606 607 if (sc->wol) { 608 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 609 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 610 igb_enable_wol(dev); 611 } 612 613 ifnet_deserialize_all(ifp); 614 615 return bus_generic_suspend(dev); 616 } 617 618 static int 619 igb_resume(device_t dev) 620 { 621 struct igb_softc *sc = device_get_softc(dev); 622 struct ifnet *ifp = &sc->arpcom.ac_if; 623 624 ifnet_serialize_all(ifp); 625 626 igb_init(sc); 627 igb_get_mgmt(sc); 628 629 if_devstart(ifp); 630 631 ifnet_deserialize_all(ifp); 632 633 return bus_generic_resume(dev); 634 } 635 636 static int 637 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 638 { 639 struct igb_softc *sc = ifp->if_softc; 640 struct ifreq *ifr = (struct ifreq *)data; 641 int max_frame_size, mask, reinit; 642 int error = 0; 643 644 ASSERT_IFNET_SERIALIZED_ALL(ifp); 645 646 switch (command) { 647 case SIOCSIFMTU: 648 max_frame_size = 9234; 649 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 650 ETHER_CRC_LEN) { 651 error = EINVAL; 652 break; 653 } 654 655 ifp->if_mtu = ifr->ifr_mtu; 656 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 657 ETHER_CRC_LEN; 658 659 if (ifp->if_flags & IFF_RUNNING) 660 igb_init(sc); 661 break; 662 663 case SIOCSIFFLAGS: 664 if (ifp->if_flags & IFF_UP) { 665 if (ifp->if_flags & IFF_RUNNING) { 666 if ((ifp->if_flags ^ sc->if_flags) & 667 (IFF_PROMISC | IFF_ALLMULTI)) { 668 igb_disable_promisc(sc); 669 igb_set_promisc(sc); 670 } 671 } else { 672 igb_init(sc); 673 } 674 } else if (ifp->if_flags & IFF_RUNNING) { 675 igb_stop(sc); 676 } 677 sc->if_flags = ifp->if_flags; 678 break; 679 680 case SIOCADDMULTI: 681 case SIOCDELMULTI: 682 if (ifp->if_flags & IFF_RUNNING) { 683 igb_disable_intr(sc); 684 igb_set_multi(sc); 685 #ifdef DEVICE_POLLING 686 if (!(ifp->if_flags & IFF_POLLING)) 687 #endif 688 igb_enable_intr(sc); 689 } 690 break; 691 692 case SIOCSIFMEDIA: 693 /* 694 * As the speed/duplex settings are being 695 * changed, we need toreset the PHY. 696 */ 697 sc->hw.phy.reset_disable = FALSE; 698 699 /* Check SOL/IDER usage */ 700 if (e1000_check_reset_block(&sc->hw)) { 701 if_printf(ifp, "Media change is " 702 "blocked due to SOL/IDER session.\n"); 703 break; 704 } 705 /* FALL THROUGH */ 706 707 case SIOCGIFMEDIA: 708 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 709 break; 710 711 case SIOCSIFCAP: 712 reinit = 0; 713 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 714 if (mask & IFCAP_HWCSUM) { 715 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 716 reinit = 1; 717 } 718 if (mask & IFCAP_VLAN_HWTAGGING) { 719 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 720 reinit = 1; 721 } 722 if (reinit && (ifp->if_flags & IFF_RUNNING)) 723 igb_init(sc); 724 break; 725 726 default: 727 error = ether_ioctl(ifp, command, data); 728 break; 729 } 730 return error; 731 } 732 733 static void 734 igb_init(void *xsc) 735 { 736 struct igb_softc *sc = xsc; 737 struct ifnet *ifp = &sc->arpcom.ac_if; 738 int i; 739 740 ASSERT_IFNET_SERIALIZED_ALL(ifp); 741 742 igb_stop(sc); 743 744 /* Get the latest mac address, User can use a LAA */ 745 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 746 747 /* Put the address into the Receive Address Array */ 748 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 749 750 igb_reset(sc); 751 igb_update_link_status(sc); 752 753 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 754 755 /* Set hardware offload abilities */ 756 if (ifp->if_capenable & IFCAP_TXCSUM) 757 ifp->if_hwassist = IGB_CSUM_FEATURES; 758 else 759 ifp->if_hwassist = 0; 760 761 /* Configure for OS presence */ 762 igb_get_mgmt(sc); 763 764 /* Prepare transmit descriptors and buffers */ 765 for (i = 0; i < sc->num_queues; ++i) 766 igb_init_tx_ring(&sc->tx_rings[i]); 767 igb_init_tx_unit(sc); 768 769 /* Setup Multicast table */ 770 igb_set_multi(sc); 771 772 #if 0 773 /* 774 * Figure out the desired mbuf pool 775 * for doing jumbo/packetsplit 776 */ 777 if (adapter->max_frame_size <= 2048) 778 adapter->rx_mbuf_sz = MCLBYTES; 779 else if (adapter->max_frame_size <= 4096) 780 adapter->rx_mbuf_sz = MJUMPAGESIZE; 781 else 782 adapter->rx_mbuf_sz = MJUM9BYTES; 783 #else 784 sc->rx_mbuf_sz = MCLBYTES; 785 #endif 786 787 /* Initialize interrupt */ 788 igb_init_intr(sc); 789 790 /* Prepare receive descriptors and buffers */ 791 for (i = 0; i < sc->num_queues; ++i) { 792 int error; 793 794 error = igb_init_rx_ring(&sc->rx_rings[i]); 795 if (error) { 796 if_printf(ifp, "Could not setup receive structures\n"); 797 igb_stop(sc); 798 return; 799 } 800 } 801 igb_init_rx_unit(sc); 802 803 /* Enable VLAN support */ 804 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 805 igb_set_vlan(sc); 806 807 /* Don't lose promiscuous settings */ 808 igb_set_promisc(sc); 809 810 ifp->if_flags |= IFF_RUNNING; 811 ifp->if_flags &= ~IFF_OACTIVE; 812 813 callout_reset(&sc->timer, hz, igb_timer, sc); 814 e1000_clear_hw_cntrs_base_generic(&sc->hw); 815 816 #if 0 817 if (adapter->msix > 1) /* Set up queue routing */ 818 igb_configure_queues(adapter); 819 #endif 820 821 /* this clears any pending interrupts */ 822 E1000_READ_REG(&sc->hw, E1000_ICR); 823 #ifdef DEVICE_POLLING 824 /* 825 * Only enable interrupts if we are not polling, make sure 826 * they are off otherwise. 827 */ 828 if (ifp->if_flags & IFF_POLLING) 829 igb_disable_intr(sc); 830 else 831 #endif /* DEVICE_POLLING */ 832 { 833 igb_enable_intr(sc); 834 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 835 } 836 837 /* Set Energy Efficient Ethernet */ 838 e1000_set_eee_i350(&sc->hw); 839 840 /* Don't reset the phy next time init gets called */ 841 sc->hw.phy.reset_disable = TRUE; 842 } 843 844 static void 845 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 846 { 847 struct igb_softc *sc = ifp->if_softc; 848 u_char fiber_type = IFM_1000_SX; 849 850 ASSERT_IFNET_SERIALIZED_ALL(ifp); 851 852 igb_update_link_status(sc); 853 854 ifmr->ifm_status = IFM_AVALID; 855 ifmr->ifm_active = IFM_ETHER; 856 857 if (!sc->link_active) 858 return; 859 860 ifmr->ifm_status |= IFM_ACTIVE; 861 862 if (sc->hw.phy.media_type == e1000_media_type_fiber || 863 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 864 ifmr->ifm_active |= fiber_type | IFM_FDX; 865 } else { 866 switch (sc->link_speed) { 867 case 10: 868 ifmr->ifm_active |= IFM_10_T; 869 break; 870 871 case 100: 872 ifmr->ifm_active |= IFM_100_TX; 873 break; 874 875 case 1000: 876 ifmr->ifm_active |= IFM_1000_T; 877 break; 878 } 879 if (sc->link_duplex == FULL_DUPLEX) 880 ifmr->ifm_active |= IFM_FDX; 881 else 882 ifmr->ifm_active |= IFM_HDX; 883 } 884 } 885 886 static int 887 igb_media_change(struct ifnet *ifp) 888 { 889 struct igb_softc *sc = ifp->if_softc; 890 struct ifmedia *ifm = &sc->media; 891 892 ASSERT_IFNET_SERIALIZED_ALL(ifp); 893 894 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 895 return EINVAL; 896 897 switch (IFM_SUBTYPE(ifm->ifm_media)) { 898 case IFM_AUTO: 899 sc->hw.mac.autoneg = DO_AUTO_NEG; 900 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 901 break; 902 903 case IFM_1000_LX: 904 case IFM_1000_SX: 905 case IFM_1000_T: 906 sc->hw.mac.autoneg = DO_AUTO_NEG; 907 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 908 break; 909 910 case IFM_100_TX: 911 sc->hw.mac.autoneg = FALSE; 912 sc->hw.phy.autoneg_advertised = 0; 913 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 914 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 915 else 916 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 917 break; 918 919 case IFM_10_T: 920 sc->hw.mac.autoneg = FALSE; 921 sc->hw.phy.autoneg_advertised = 0; 922 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 923 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 924 else 925 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 926 break; 927 928 default: 929 if_printf(ifp, "Unsupported media type\n"); 930 break; 931 } 932 933 igb_init(sc); 934 935 return 0; 936 } 937 938 static void 939 igb_set_promisc(struct igb_softc *sc) 940 { 941 struct ifnet *ifp = &sc->arpcom.ac_if; 942 struct e1000_hw *hw = &sc->hw; 943 uint32_t reg; 944 945 if (sc->vf_ifp) { 946 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 947 return; 948 } 949 950 reg = E1000_READ_REG(hw, E1000_RCTL); 951 if (ifp->if_flags & IFF_PROMISC) { 952 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 953 E1000_WRITE_REG(hw, E1000_RCTL, reg); 954 } else if (ifp->if_flags & IFF_ALLMULTI) { 955 reg |= E1000_RCTL_MPE; 956 reg &= ~E1000_RCTL_UPE; 957 E1000_WRITE_REG(hw, E1000_RCTL, reg); 958 } 959 } 960 961 static void 962 igb_disable_promisc(struct igb_softc *sc) 963 { 964 struct e1000_hw *hw = &sc->hw; 965 uint32_t reg; 966 967 if (sc->vf_ifp) { 968 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 969 return; 970 } 971 reg = E1000_READ_REG(hw, E1000_RCTL); 972 reg &= ~E1000_RCTL_UPE; 973 reg &= ~E1000_RCTL_MPE; 974 E1000_WRITE_REG(hw, E1000_RCTL, reg); 975 } 976 977 static void 978 igb_set_multi(struct igb_softc *sc) 979 { 980 struct ifnet *ifp = &sc->arpcom.ac_if; 981 struct ifmultiaddr *ifma; 982 uint32_t reg_rctl = 0; 983 uint8_t *mta; 984 int mcnt = 0; 985 986 mta = sc->mta; 987 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 988 989 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 990 if (ifma->ifma_addr->sa_family != AF_LINK) 991 continue; 992 993 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 994 break; 995 996 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 997 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 998 mcnt++; 999 } 1000 1001 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1002 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1003 reg_rctl |= E1000_RCTL_MPE; 1004 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1005 } else { 1006 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1007 } 1008 } 1009 1010 static void 1011 igb_timer(void *xsc) 1012 { 1013 struct igb_softc *sc = xsc; 1014 struct ifnet *ifp = &sc->arpcom.ac_if; 1015 1016 ifnet_serialize_all(ifp); 1017 1018 igb_update_link_status(sc); 1019 igb_update_stats_counters(sc); 1020 1021 callout_reset(&sc->timer, hz, igb_timer, sc); 1022 1023 ifnet_deserialize_all(ifp); 1024 } 1025 1026 static void 1027 igb_update_link_status(struct igb_softc *sc) 1028 { 1029 struct ifnet *ifp = &sc->arpcom.ac_if; 1030 struct e1000_hw *hw = &sc->hw; 1031 uint32_t link_check, thstat, ctrl; 1032 1033 link_check = thstat = ctrl = 0; 1034 1035 /* Get the cached link value or read for real */ 1036 switch (hw->phy.media_type) { 1037 case e1000_media_type_copper: 1038 if (hw->mac.get_link_status) { 1039 /* Do the work to read phy */ 1040 e1000_check_for_link(hw); 1041 link_check = !hw->mac.get_link_status; 1042 } else { 1043 link_check = TRUE; 1044 } 1045 break; 1046 1047 case e1000_media_type_fiber: 1048 e1000_check_for_link(hw); 1049 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1050 break; 1051 1052 case e1000_media_type_internal_serdes: 1053 e1000_check_for_link(hw); 1054 link_check = hw->mac.serdes_has_link; 1055 break; 1056 1057 /* VF device is type_unknown */ 1058 case e1000_media_type_unknown: 1059 e1000_check_for_link(hw); 1060 link_check = !hw->mac.get_link_status; 1061 /* Fall thru */ 1062 default: 1063 break; 1064 } 1065 1066 /* Check for thermal downshift or shutdown */ 1067 if (hw->mac.type == e1000_i350) { 1068 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1069 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1070 } 1071 1072 /* Now we check if a transition has happened */ 1073 if (link_check && sc->link_active == 0) { 1074 e1000_get_speed_and_duplex(hw, 1075 &sc->link_speed, &sc->link_duplex); 1076 if (bootverbose) { 1077 if_printf(ifp, "Link is up %d Mbps %s\n", 1078 sc->link_speed, 1079 sc->link_duplex == FULL_DUPLEX ? 1080 "Full Duplex" : "Half Duplex"); 1081 } 1082 sc->link_active = 1; 1083 1084 ifp->if_baudrate = sc->link_speed * 1000000; 1085 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1086 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1087 if_printf(ifp, "Link: thermal downshift\n"); 1088 /* This can sleep */ 1089 ifp->if_link_state = LINK_STATE_UP; 1090 if_link_state_change(ifp); 1091 } else if (!link_check && sc->link_active == 1) { 1092 ifp->if_baudrate = sc->link_speed = 0; 1093 sc->link_duplex = 0; 1094 if (bootverbose) 1095 if_printf(ifp, "Link is Down\n"); 1096 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1097 (thstat & E1000_THSTAT_PWR_DOWN)) 1098 if_printf(ifp, "Link: thermal shutdown\n"); 1099 sc->link_active = 0; 1100 /* This can sleep */ 1101 ifp->if_link_state = LINK_STATE_DOWN; 1102 if_link_state_change(ifp); 1103 } 1104 } 1105 1106 static void 1107 igb_stop(struct igb_softc *sc) 1108 { 1109 struct ifnet *ifp = &sc->arpcom.ac_if; 1110 int i; 1111 1112 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1113 1114 igb_disable_intr(sc); 1115 1116 callout_stop(&sc->timer); 1117 1118 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1119 ifp->if_timer = 0; 1120 1121 e1000_reset_hw(&sc->hw); 1122 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1123 1124 e1000_led_off(&sc->hw); 1125 e1000_cleanup_led(&sc->hw); 1126 1127 for (i = 0; i < sc->num_queues; ++i) 1128 igb_free_tx_ring(&sc->tx_rings[i]); 1129 for (i = 0; i < sc->num_queues; ++i) 1130 igb_free_rx_ring(&sc->rx_rings[i]); 1131 } 1132 1133 static void 1134 igb_reset(struct igb_softc *sc) 1135 { 1136 struct ifnet *ifp = &sc->arpcom.ac_if; 1137 struct e1000_hw *hw = &sc->hw; 1138 struct e1000_fc_info *fc = &hw->fc; 1139 uint32_t pba = 0; 1140 uint16_t hwm; 1141 1142 /* Let the firmware know the OS is in control */ 1143 igb_get_hw_control(sc); 1144 1145 /* 1146 * Packet Buffer Allocation (PBA) 1147 * Writing PBA sets the receive portion of the buffer 1148 * the remainder is used for the transmit buffer. 1149 */ 1150 switch (hw->mac.type) { 1151 case e1000_82575: 1152 pba = E1000_PBA_32K; 1153 break; 1154 1155 case e1000_82576: 1156 case e1000_vfadapt: 1157 pba = E1000_READ_REG(hw, E1000_RXPBS); 1158 pba &= E1000_RXPBS_SIZE_MASK_82576; 1159 break; 1160 1161 case e1000_82580: 1162 case e1000_i350: 1163 case e1000_vfadapt_i350: 1164 pba = E1000_READ_REG(hw, E1000_RXPBS); 1165 pba = e1000_rxpbs_adjust_82580(pba); 1166 break; 1167 /* XXX pba = E1000_PBA_35K; */ 1168 1169 default: 1170 break; 1171 } 1172 1173 /* Special needs in case of Jumbo frames */ 1174 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1175 uint32_t tx_space, min_tx, min_rx; 1176 1177 pba = E1000_READ_REG(hw, E1000_PBA); 1178 tx_space = pba >> 16; 1179 pba &= 0xffff; 1180 1181 min_tx = (sc->max_frame_size + 1182 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1183 min_tx = roundup2(min_tx, 1024); 1184 min_tx >>= 10; 1185 min_rx = sc->max_frame_size; 1186 min_rx = roundup2(min_rx, 1024); 1187 min_rx >>= 10; 1188 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1189 pba = pba - (min_tx - tx_space); 1190 /* 1191 * if short on rx space, rx wins 1192 * and must trump tx adjustment 1193 */ 1194 if (pba < min_rx) 1195 pba = min_rx; 1196 } 1197 E1000_WRITE_REG(hw, E1000_PBA, pba); 1198 } 1199 1200 /* 1201 * These parameters control the automatic generation (Tx) and 1202 * response (Rx) to Ethernet PAUSE frames. 1203 * - High water mark should allow for at least two frames to be 1204 * received after sending an XOFF. 1205 * - Low water mark works best when it is very near the high water mark. 1206 * This allows the receiver to restart by sending XON when it has 1207 * drained a bit. 1208 */ 1209 hwm = min(((pba << 10) * 9 / 10), 1210 ((pba << 10) - 2 * sc->max_frame_size)); 1211 1212 if (hw->mac.type < e1000_82576) { 1213 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1214 fc->low_water = fc->high_water - 8; 1215 } else { 1216 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1217 fc->low_water = fc->high_water - 16; 1218 } 1219 fc->pause_time = IGB_FC_PAUSE_TIME; 1220 fc->send_xon = TRUE; 1221 1222 /* Issue a global reset */ 1223 e1000_reset_hw(hw); 1224 E1000_WRITE_REG(hw, E1000_WUC, 0); 1225 1226 if (e1000_init_hw(hw) < 0) 1227 if_printf(ifp, "Hardware Initialization Failed\n"); 1228 1229 /* Setup DMA Coalescing */ 1230 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) { 1231 uint32_t reg; 1232 1233 hwm = (pba - 4) << 10; 1234 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT) 1235 & E1000_DMACR_DMACTHR_MASK; 1236 1237 /* transition to L0x or L1 if available..*/ 1238 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 1239 1240 /* timer = +-1000 usec in 32usec intervals */ 1241 reg |= (1000 >> 5); 1242 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1243 1244 /* No lower threshold */ 1245 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 1246 1247 /* set hwm to PBA - 2 * max frame size */ 1248 E1000_WRITE_REG(hw, E1000_FCRTC, hwm); 1249 1250 /* Set the interval before transition */ 1251 reg = E1000_READ_REG(hw, E1000_DMCTLX); 1252 reg |= 0x800000FF; /* 255 usec */ 1253 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 1254 1255 /* free space in tx packet buffer to wake from DMA coal */ 1256 E1000_WRITE_REG(hw, E1000_DMCTXTH, 1257 (20480 - (2 * sc->max_frame_size)) >> 6); 1258 1259 /* make low power state decision controlled by DMA coal */ 1260 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1261 E1000_WRITE_REG(hw, E1000_PCIEMISC, 1262 reg | E1000_PCIEMISC_LX_DECISION); 1263 if_printf(ifp, "DMA Coalescing enabled\n"); 1264 } 1265 1266 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1267 e1000_get_phy_info(hw); 1268 e1000_check_for_link(hw); 1269 } 1270 1271 static void 1272 igb_setup_ifp(struct igb_softc *sc) 1273 { 1274 struct ifnet *ifp = &sc->arpcom.ac_if; 1275 1276 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 1277 ifp->if_softc = sc; 1278 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1279 ifp->if_init = igb_init; 1280 ifp->if_ioctl = igb_ioctl; 1281 ifp->if_start = igb_start; 1282 #ifdef DEVICE_POLLING 1283 ifp->if_poll = igb_poll; 1284 #endif 1285 ifp->if_watchdog = igb_watchdog; 1286 1287 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 1288 ifq_set_ready(&ifp->if_snd); 1289 1290 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1291 1292 ifp->if_capabilities = 1293 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1294 ifp->if_capenable = ifp->if_capabilities; 1295 ifp->if_hwassist = IGB_CSUM_FEATURES; 1296 1297 /* 1298 * Tell the upper layer(s) we support long frames 1299 */ 1300 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1301 1302 /* 1303 * Specify the media types supported by this adapter and register 1304 * callbacks to update media and link information 1305 */ 1306 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status); 1307 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1308 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1309 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1310 0, NULL); 1311 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1312 } else { 1313 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1314 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1315 0, NULL); 1316 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1317 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1318 0, NULL); 1319 if (sc->hw.phy.type != e1000_phy_ife) { 1320 ifmedia_add(&sc->media, 1321 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1322 ifmedia_add(&sc->media, 1323 IFM_ETHER | IFM_1000_T, 0, NULL); 1324 } 1325 } 1326 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1327 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1328 } 1329 1330 static void 1331 igb_add_sysctl(struct igb_softc *sc) 1332 { 1333 sysctl_ctx_init(&sc->sysctl_ctx); 1334 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1335 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1336 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, ""); 1337 if (sc->sysctl_tree == NULL) { 1338 device_printf(sc->dev, "can't add sysctl node\n"); 1339 return; 1340 } 1341 1342 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1343 OID_AUTO, "rxd", CTLFLAG_RD, &sc->num_rx_desc, 0, NULL); 1344 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1345 OID_AUTO, "txd", CTLFLAG_RD, &sc->num_tx_desc, 0, NULL); 1346 1347 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1348 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1349 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate"); 1350 1351 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1352 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1353 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1354 "# segments per TX interrupt"); 1355 } 1356 1357 static int 1358 igb_dma_alloc(struct igb_softc *sc) 1359 { 1360 int error, i; 1361 1362 /* First allocate the top level queue structs */ 1363 sc->queues = kmalloc(sizeof(struct igb_queue) * sc->num_queues, 1364 M_DEVBUF, M_WAITOK | M_ZERO); 1365 1366 /* 1367 * Create top level busdma tag 1368 */ 1369 error = bus_dma_tag_create(NULL, 1, 0, 1370 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1371 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1372 &sc->parent_tag); 1373 if (error) { 1374 device_printf(sc->dev, "could not create top level DMA tag\n"); 1375 return error; 1376 } 1377 1378 /* 1379 * Allocate TX descriptor rings and buffers 1380 */ 1381 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->num_queues, 1382 M_DEVBUF, M_WAITOK | M_ZERO); 1383 for (i = 0; i < sc->num_queues; ++i) { 1384 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1385 1386 /* Set up some basics */ 1387 txr->sc = sc; 1388 txr->me = i; 1389 1390 error = igb_create_tx_ring(txr); 1391 if (error) 1392 return error; 1393 } 1394 1395 /* 1396 * Allocate RX descriptor rings and buffers 1397 */ 1398 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->num_queues, 1399 M_DEVBUF, M_WAITOK | M_ZERO); 1400 for (i = 0; i < sc->num_queues; ++i) { 1401 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1402 1403 /* Set up some basics */ 1404 rxr->sc = sc; 1405 rxr->me = i; 1406 1407 error = igb_create_rx_ring(rxr); 1408 if (error) 1409 return error; 1410 } 1411 1412 /* 1413 * Finally set up the queue holding structs 1414 */ 1415 for (i = 0; i < sc->num_queues; i++) { 1416 struct igb_queue *que = &sc->queues[i]; 1417 1418 que->sc = sc; 1419 que->txr = &sc->tx_rings[i]; 1420 que->rxr = &sc->rx_rings[i]; 1421 } 1422 return 0; 1423 } 1424 1425 static void 1426 igb_dma_free(struct igb_softc *sc) 1427 { 1428 int i; 1429 1430 if (sc->queues != NULL) 1431 kfree(sc->queues, M_DEVBUF); 1432 1433 if (sc->tx_rings != NULL) { 1434 for (i = 0; i < sc->num_queues; ++i) 1435 igb_destroy_tx_ring(&sc->tx_rings[i], sc->num_tx_desc); 1436 kfree(sc->tx_rings, M_DEVBUF); 1437 } 1438 1439 if (sc->rx_rings != NULL) { 1440 for (i = 0; i < sc->num_queues; ++i) 1441 igb_destroy_rx_ring(&sc->rx_rings[i], sc->num_rx_desc); 1442 kfree(sc->rx_rings, M_DEVBUF); 1443 } 1444 } 1445 1446 static int 1447 igb_create_tx_ring(struct igb_tx_ring *txr) 1448 { 1449 int tsize, error, i; 1450 1451 /* 1452 * Validate number of transmit descriptors. It must not exceed 1453 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1454 */ 1455 if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || 1456 (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { 1457 device_printf(txr->sc->dev, 1458 "Using %d TX descriptors instead of %d!\n", 1459 IGB_DEFAULT_TXD, igb_txd); 1460 txr->sc->num_tx_desc = IGB_DEFAULT_TXD; 1461 } else { 1462 txr->sc->num_tx_desc = igb_txd; 1463 } 1464 1465 /* 1466 * Allocate TX descriptor ring 1467 */ 1468 tsize = roundup2(txr->sc->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1469 IGB_DBA_ALIGN); 1470 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1471 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1472 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1473 if (txr->txdma.dma_vaddr == NULL) { 1474 device_printf(txr->sc->dev, 1475 "Unable to allocate TX Descriptor memory\n"); 1476 return ENOMEM; 1477 } 1478 txr->tx_base = txr->txdma.dma_vaddr; 1479 bzero(txr->tx_base, tsize); 1480 1481 txr->tx_buf = kmalloc(sizeof(struct igb_tx_buf) * txr->sc->num_tx_desc, 1482 M_DEVBUF, M_WAITOK | M_ZERO); 1483 1484 /* 1485 * Allocate TX head write-back buffer 1486 */ 1487 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1488 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1489 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1490 if (txr->tx_hdr == NULL) { 1491 device_printf(txr->sc->dev, 1492 "Unable to allocate TX head write-back buffer\n"); 1493 return ENOMEM; 1494 } 1495 1496 /* 1497 * Create DMA tag for TX buffers 1498 */ 1499 error = bus_dma_tag_create(txr->sc->parent_tag, 1500 1, 0, /* alignment, bounds */ 1501 BUS_SPACE_MAXADDR, /* lowaddr */ 1502 BUS_SPACE_MAXADDR, /* highaddr */ 1503 NULL, NULL, /* filter, filterarg */ 1504 IGB_TSO_SIZE, /* maxsize */ 1505 IGB_MAX_SCATTER, /* nsegments */ 1506 PAGE_SIZE, /* maxsegsize */ 1507 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1508 BUS_DMA_ONEBPAGE, /* flags */ 1509 &txr->tx_tag); 1510 if (error) { 1511 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1512 kfree(txr->tx_buf, M_DEVBUF); 1513 txr->tx_buf = NULL; 1514 return error; 1515 } 1516 1517 /* 1518 * Create DMA maps for TX buffers 1519 */ 1520 for (i = 0; i < txr->sc->num_tx_desc; ++i) { 1521 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1522 1523 error = bus_dmamap_create(txr->tx_tag, 1524 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1525 if (error) { 1526 device_printf(txr->sc->dev, 1527 "Unable to create TX DMA map\n"); 1528 igb_destroy_tx_ring(txr, i); 1529 return error; 1530 } 1531 } 1532 1533 /* 1534 * Initialize various watermark 1535 */ 1536 txr->spare_desc = IGB_TX_SPARE; 1537 txr->intr_nsegs = txr->sc->num_tx_desc / 16; 1538 txr->oact_hi_desc = txr->sc->num_tx_desc / 2; 1539 txr->oact_lo_desc = txr->sc->num_tx_desc / 8; 1540 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX) 1541 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX; 1542 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED) 1543 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED; 1544 1545 return 0; 1546 } 1547 1548 static void 1549 igb_free_tx_ring(struct igb_tx_ring *txr) 1550 { 1551 int i; 1552 1553 for (i = 0; i < txr->sc->num_tx_desc; ++i) { 1554 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1555 1556 if (txbuf->m_head != NULL) { 1557 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1558 m_freem(txbuf->m_head); 1559 txbuf->m_head = NULL; 1560 } 1561 } 1562 } 1563 1564 static void 1565 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1566 { 1567 int i; 1568 1569 if (txr->txdma.dma_vaddr != NULL) { 1570 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1571 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1572 txr->txdma.dma_map); 1573 bus_dma_tag_destroy(txr->txdma.dma_tag); 1574 txr->txdma.dma_vaddr = NULL; 1575 } 1576 1577 if (txr->tx_hdr != NULL) { 1578 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1579 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1580 txr->tx_hdr_dmap); 1581 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1582 txr->tx_hdr = NULL; 1583 } 1584 1585 if (txr->tx_buf == NULL) 1586 return; 1587 1588 for (i = 0; i < ndesc; ++i) { 1589 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1590 1591 KKASSERT(txbuf->m_head == NULL); 1592 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1593 } 1594 bus_dma_tag_destroy(txr->tx_tag); 1595 1596 kfree(txr->tx_buf, M_DEVBUF); 1597 txr->tx_buf = NULL; 1598 } 1599 1600 static void 1601 igb_init_tx_ring(struct igb_tx_ring *txr) 1602 { 1603 /* Clear the old descriptor contents */ 1604 bzero(txr->tx_base, 1605 sizeof(union e1000_adv_tx_desc) * txr->sc->num_tx_desc); 1606 1607 /* Clear TX head write-back buffer */ 1608 *(txr->tx_hdr) = 0; 1609 1610 /* Reset indices */ 1611 txr->next_avail_desc = 0; 1612 txr->next_to_clean = 0; 1613 txr->tx_nsegs = 0; 1614 1615 /* Set number of descriptors available */ 1616 txr->tx_avail = txr->sc->num_tx_desc; 1617 } 1618 1619 static void 1620 igb_init_tx_unit(struct igb_softc *sc) 1621 { 1622 struct e1000_hw *hw = &sc->hw; 1623 uint32_t tctl; 1624 int i; 1625 1626 /* Setup the Tx Descriptor Rings */ 1627 for (i = 0; i < sc->num_queues; ++i) { 1628 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1629 uint64_t bus_addr = txr->txdma.dma_paddr; 1630 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1631 uint32_t txdctl = 0; 1632 uint32_t dca_txctrl; 1633 1634 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1635 sc->num_tx_desc * sizeof(struct e1000_tx_desc)); 1636 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1637 (uint32_t)(bus_addr >> 32)); 1638 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1639 (uint32_t)bus_addr); 1640 1641 /* Setup the HW Tx Head and Tail descriptor pointers */ 1642 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1643 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1644 1645 txdctl |= IGB_TX_PTHRESH; 1646 txdctl |= IGB_TX_HTHRESH << 8; 1647 txdctl |= IGB_TX_WTHRESH << 16; 1648 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1649 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 1650 1651 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1652 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1653 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1654 1655 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1656 (uint32_t)(hdr_paddr >> 32)); 1657 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1658 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1659 } 1660 1661 if (sc->vf_ifp) 1662 return; 1663 1664 e1000_config_collision_dist(hw); 1665 1666 /* Program the Transmit Control Register */ 1667 tctl = E1000_READ_REG(hw, E1000_TCTL); 1668 tctl &= ~E1000_TCTL_CT; 1669 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 1670 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 1671 1672 /* This write will effectively turn on the transmit unit. */ 1673 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1674 } 1675 1676 static boolean_t 1677 igb_txctx(struct igb_tx_ring *txr, struct mbuf *mp) 1678 { 1679 struct e1000_adv_tx_context_desc *TXD; 1680 struct igb_tx_buf *txbuf; 1681 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 1682 struct ether_vlan_header *eh; 1683 struct ip *ip = NULL; 1684 int ehdrlen, ctxd, ip_hlen = 0; 1685 uint16_t etype, vlantag = 0; 1686 boolean_t offload = TRUE; 1687 1688 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 1689 offload = FALSE; 1690 1691 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 1692 ctxd = txr->next_avail_desc; 1693 txbuf = &txr->tx_buf[ctxd]; 1694 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 1695 1696 /* 1697 * In advanced descriptors the vlan tag must 1698 * be placed into the context descriptor, thus 1699 * we need to be here just for that setup. 1700 */ 1701 if (mp->m_flags & M_VLANTAG) { 1702 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 1703 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 1704 } else if (!offload) { 1705 return FALSE; 1706 } 1707 1708 /* 1709 * Determine where frame payload starts. 1710 * Jump over vlan headers if already present, 1711 * helpful for QinQ too. 1712 */ 1713 KASSERT(mp->m_len >= ETHER_HDR_LEN, 1714 ("igb_txctx_pullup is not called (eh)?\n")); 1715 eh = mtod(mp, struct ether_vlan_header *); 1716 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1717 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 1718 ("igb_txctx_pullup is not called (evh)?\n")); 1719 etype = ntohs(eh->evl_proto); 1720 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 1721 } else { 1722 etype = ntohs(eh->evl_encap_proto); 1723 ehdrlen = ETHER_HDR_LEN; 1724 } 1725 1726 /* Set the ether header length */ 1727 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 1728 1729 switch (etype) { 1730 case ETHERTYPE_IP: 1731 KASSERT(mp->m_len >= ehdrlen + IGB_IPVHL_SIZE, 1732 ("igb_txctx_pullup is not called (eh+ip_vhl)?\n")); 1733 1734 /* NOTE: We could only safely access ip.ip_vhl part */ 1735 ip = (struct ip *)(mp->m_data + ehdrlen); 1736 ip_hlen = ip->ip_hl << 2; 1737 1738 if (mp->m_pkthdr.csum_flags & CSUM_IP) 1739 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 1740 break; 1741 1742 #ifdef notyet 1743 case ETHERTYPE_IPV6: 1744 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1745 ip_hlen = sizeof(struct ip6_hdr); 1746 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 1747 break; 1748 #endif 1749 1750 default: 1751 offload = FALSE; 1752 break; 1753 } 1754 1755 vlan_macip_lens |= ip_hlen; 1756 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 1757 1758 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 1759 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 1760 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 1761 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 1762 1763 /* 82575 needs the queue index added */ 1764 if (txr->sc->hw.mac.type == e1000_82575) 1765 mss_l4len_idx = txr->me << 4; 1766 1767 /* Now copy bits into descriptor */ 1768 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 1769 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 1770 TXD->seqnum_seed = htole32(0); 1771 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 1772 1773 txbuf->m_head = NULL; 1774 1775 /* We've consumed the first desc, adjust counters */ 1776 if (++ctxd == txr->sc->num_tx_desc) 1777 ctxd = 0; 1778 txr->next_avail_desc = ctxd; 1779 --txr->tx_avail; 1780 1781 return offload; 1782 } 1783 1784 static void 1785 igb_txeof(struct igb_tx_ring *txr) 1786 { 1787 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 1788 int first, hdr, avail; 1789 1790 if (txr->tx_avail == txr->sc->num_tx_desc) 1791 return; 1792 1793 first = txr->next_to_clean; 1794 hdr = *(txr->tx_hdr); 1795 1796 if (first == hdr) 1797 return; 1798 1799 avail = txr->tx_avail; 1800 while (first != hdr) { 1801 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 1802 1803 ++avail; 1804 if (txbuf->m_head) { 1805 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1806 m_freem(txbuf->m_head); 1807 txbuf->m_head = NULL; 1808 ++ifp->if_opackets; 1809 } 1810 if (++first == txr->sc->num_tx_desc) 1811 first = 0; 1812 } 1813 txr->next_to_clean = first; 1814 txr->tx_avail = avail; 1815 1816 /* 1817 * If we have a minimum free, clear IFF_OACTIVE 1818 * to tell the stack that it is OK to send packets. 1819 */ 1820 if (IGB_IS_NOT_OACTIVE(txr)) { 1821 ifp->if_flags &= ~IFF_OACTIVE; 1822 1823 /* 1824 * We have enough TX descriptors, turn off 1825 * the watchdog. We allow small amount of 1826 * packets (roughly intr_nsegs) pending on 1827 * the transmit ring. 1828 */ 1829 ifp->if_timer = 0; 1830 } 1831 } 1832 1833 static int 1834 igb_create_rx_ring(struct igb_rx_ring *rxr) 1835 { 1836 int rsize, i, error; 1837 1838 /* 1839 * Validate number of receive descriptors. It must not exceed 1840 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1841 */ 1842 if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || 1843 (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { 1844 device_printf(rxr->sc->dev, 1845 "Using %d RX descriptors instead of %d!\n", 1846 IGB_DEFAULT_RXD, igb_rxd); 1847 rxr->sc->num_rx_desc = IGB_DEFAULT_RXD; 1848 } else { 1849 rxr->sc->num_rx_desc = igb_rxd; 1850 } 1851 1852 /* 1853 * Allocate RX descriptor ring 1854 */ 1855 rsize = roundup2(rxr->sc->num_rx_desc * sizeof(union e1000_adv_rx_desc), 1856 IGB_DBA_ALIGN); 1857 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 1858 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 1859 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 1860 &rxr->rxdma.dma_paddr); 1861 if (rxr->rxdma.dma_vaddr == NULL) { 1862 device_printf(rxr->sc->dev, 1863 "Unable to allocate RxDescriptor memory\n"); 1864 return ENOMEM; 1865 } 1866 rxr->rx_base = rxr->rxdma.dma_vaddr; 1867 bzero(rxr->rx_base, rsize); 1868 1869 rxr->rx_buf = kmalloc(sizeof(struct igb_rx_buf) * rxr->sc->num_rx_desc, 1870 M_DEVBUF, M_WAITOK | M_ZERO); 1871 1872 /* 1873 * Create DMA tag for RX buffers 1874 */ 1875 error = bus_dma_tag_create(rxr->sc->parent_tag, 1876 1, 0, /* alignment, bounds */ 1877 BUS_SPACE_MAXADDR, /* lowaddr */ 1878 BUS_SPACE_MAXADDR, /* highaddr */ 1879 NULL, NULL, /* filter, filterarg */ 1880 MCLBYTES, /* maxsize */ 1881 1, /* nsegments */ 1882 MCLBYTES, /* maxsegsize */ 1883 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 1884 &rxr->rx_tag); 1885 if (error) { 1886 device_printf(rxr->sc->dev, 1887 "Unable to create RX payload DMA tag\n"); 1888 kfree(rxr->rx_buf, M_DEVBUF); 1889 rxr->rx_buf = NULL; 1890 return error; 1891 } 1892 1893 /* 1894 * Create spare DMA map for RX buffers 1895 */ 1896 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 1897 &rxr->rx_sparemap); 1898 if (error) { 1899 device_printf(rxr->sc->dev, 1900 "Unable to create spare RX DMA maps\n"); 1901 bus_dma_tag_destroy(rxr->rx_tag); 1902 kfree(rxr->rx_buf, M_DEVBUF); 1903 rxr->rx_buf = NULL; 1904 return error; 1905 } 1906 1907 /* 1908 * Create DMA maps for RX buffers 1909 */ 1910 for (i = 0; i < rxr->sc->num_rx_desc; i++) { 1911 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 1912 1913 error = bus_dmamap_create(rxr->rx_tag, 1914 BUS_DMA_WAITOK, &rxbuf->map); 1915 if (error) { 1916 device_printf(rxr->sc->dev, 1917 "Unable to create RX DMA maps\n"); 1918 igb_destroy_rx_ring(rxr, i); 1919 return error; 1920 } 1921 } 1922 return 0; 1923 } 1924 1925 static void 1926 igb_free_rx_ring(struct igb_rx_ring *rxr) 1927 { 1928 int i; 1929 1930 for (i = 0; i < rxr->sc->num_rx_desc; ++i) { 1931 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 1932 1933 if (rxbuf->m_head != NULL) { 1934 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 1935 m_freem(rxbuf->m_head); 1936 rxbuf->m_head = NULL; 1937 } 1938 } 1939 1940 if (rxr->fmp != NULL) 1941 m_freem(rxr->fmp); 1942 rxr->fmp = NULL; 1943 rxr->lmp = NULL; 1944 } 1945 1946 static void 1947 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 1948 { 1949 int i; 1950 1951 if (rxr->rxdma.dma_vaddr != NULL) { 1952 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 1953 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 1954 rxr->rxdma.dma_map); 1955 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 1956 rxr->rxdma.dma_vaddr = NULL; 1957 } 1958 1959 if (rxr->rx_buf == NULL) 1960 return; 1961 1962 for (i = 0; i < ndesc; ++i) { 1963 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 1964 1965 KKASSERT(rxbuf->m_head == NULL); 1966 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 1967 } 1968 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 1969 bus_dma_tag_destroy(rxr->rx_tag); 1970 1971 kfree(rxr->rx_buf, M_DEVBUF); 1972 rxr->rx_buf = NULL; 1973 } 1974 1975 static void 1976 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 1977 { 1978 rxd->read.pkt_addr = htole64(rxbuf->paddr); 1979 rxd->wb.upper.status_error = 0; 1980 } 1981 1982 static int 1983 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 1984 { 1985 struct mbuf *m; 1986 bus_dma_segment_t seg; 1987 bus_dmamap_t map; 1988 struct igb_rx_buf *rxbuf; 1989 int error, nseg; 1990 1991 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 1992 if (m == NULL) { 1993 if (wait) { 1994 if_printf(&rxr->sc->arpcom.ac_if, 1995 "Unable to allocate RX mbuf\n"); 1996 } 1997 return ENOBUFS; 1998 } 1999 m->m_len = m->m_pkthdr.len = MCLBYTES; 2000 2001 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2002 m_adj(m, ETHER_ALIGN); 2003 2004 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2005 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2006 if (error) { 2007 m_freem(m); 2008 if (wait) { 2009 if_printf(&rxr->sc->arpcom.ac_if, 2010 "Unable to load RX mbuf\n"); 2011 } 2012 return error; 2013 } 2014 2015 rxbuf = &rxr->rx_buf[i]; 2016 if (rxbuf->m_head != NULL) 2017 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2018 2019 map = rxbuf->map; 2020 rxbuf->map = rxr->rx_sparemap; 2021 rxr->rx_sparemap = map; 2022 2023 rxbuf->m_head = m; 2024 rxbuf->paddr = seg.ds_addr; 2025 2026 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2027 return 0; 2028 } 2029 2030 static int 2031 igb_init_rx_ring(struct igb_rx_ring *rxr) 2032 { 2033 int i; 2034 2035 /* Clear the ring contents */ 2036 bzero(rxr->rx_base, 2037 rxr->sc->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2038 2039 /* Now replenish the ring mbufs */ 2040 for (i = 0; i < rxr->sc->num_rx_desc; ++i) { 2041 int error; 2042 2043 error = igb_newbuf(rxr, i, TRUE); 2044 if (error) 2045 return error; 2046 } 2047 2048 /* Setup our descriptor indices */ 2049 rxr->next_to_check = 0; 2050 2051 rxr->fmp = NULL; 2052 rxr->lmp = NULL; 2053 rxr->discard = FALSE; 2054 2055 return 0; 2056 } 2057 2058 static void 2059 igb_init_rx_unit(struct igb_softc *sc) 2060 { 2061 struct ifnet *ifp = &sc->arpcom.ac_if; 2062 struct e1000_hw *hw = &sc->hw; 2063 uint32_t rctl, rxcsum, srrctl = 0; 2064 int i; 2065 2066 /* 2067 * Make sure receives are disabled while setting 2068 * up the descriptor ring 2069 */ 2070 rctl = E1000_READ_REG(hw, E1000_RCTL); 2071 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2072 2073 #if 0 2074 /* 2075 ** Set up for header split 2076 */ 2077 if (igb_header_split) { 2078 /* Use a standard mbuf for the header */ 2079 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2080 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2081 } else 2082 #endif 2083 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2084 2085 /* 2086 ** Set up for jumbo frames 2087 */ 2088 if (ifp->if_mtu > ETHERMTU) { 2089 rctl |= E1000_RCTL_LPE; 2090 #if 0 2091 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2092 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2093 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2094 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2095 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2096 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2097 } 2098 /* Set maximum packet len */ 2099 psize = adapter->max_frame_size; 2100 /* are we on a vlan? */ 2101 if (adapter->ifp->if_vlantrunk != NULL) 2102 psize += VLAN_TAG_SIZE; 2103 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2104 #else 2105 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2106 rctl |= E1000_RCTL_SZ_2048; 2107 #endif 2108 } else { 2109 rctl &= ~E1000_RCTL_LPE; 2110 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2111 rctl |= E1000_RCTL_SZ_2048; 2112 } 2113 2114 /* Setup the Base and Length of the Rx Descriptor Rings */ 2115 for (i = 0; i < sc->num_queues; ++i) { 2116 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2117 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2118 uint32_t rxdctl; 2119 2120 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2121 sc->num_rx_desc * sizeof(struct e1000_rx_desc)); 2122 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2123 (uint32_t)(bus_addr >> 32)); 2124 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2125 (uint32_t)bus_addr); 2126 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2127 /* Enable this Queue */ 2128 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2129 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2130 rxdctl &= 0xFFF00000; 2131 rxdctl |= IGB_RX_PTHRESH; 2132 rxdctl |= IGB_RX_HTHRESH << 8; 2133 rxdctl |= IGB_RX_WTHRESH << 16; 2134 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2135 } 2136 2137 /* 2138 * Setup for RX MultiQueue 2139 */ 2140 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); 2141 #if 0 2142 if (adapter->num_queues >1) { 2143 u32 random[10], mrqc, shift = 0; 2144 union igb_reta { 2145 u32 dword; 2146 u8 bytes[4]; 2147 } reta; 2148 2149 arc4rand(&random, sizeof(random), 0); 2150 if (adapter->hw.mac.type == e1000_82575) 2151 shift = 6; 2152 /* Warning FM follows */ 2153 for (int i = 0; i < 128; i++) { 2154 reta.bytes[i & 3] = 2155 (i % adapter->num_queues) << shift; 2156 if ((i & 3) == 3) 2157 E1000_WRITE_REG(hw, 2158 E1000_RETA(i >> 2), reta.dword); 2159 } 2160 /* Now fill in hash table */ 2161 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2162 for (int i = 0; i < 10; i++) 2163 E1000_WRITE_REG_ARRAY(hw, 2164 E1000_RSSRK(0), i, random[i]); 2165 2166 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | 2167 E1000_MRQC_RSS_FIELD_IPV4_TCP); 2168 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | 2169 E1000_MRQC_RSS_FIELD_IPV6_TCP); 2170 mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | 2171 E1000_MRQC_RSS_FIELD_IPV6_UDP); 2172 mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | 2173 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 2174 2175 E1000_WRITE_REG(hw, E1000_MRQC, mrqc); 2176 2177 /* 2178 ** NOTE: Receive Full-Packet Checksum Offload 2179 ** is mutually exclusive with Multiqueue. However 2180 ** this is not the same as TCP/IP checksums which 2181 ** still work. 2182 */ 2183 rxcsum |= E1000_RXCSUM_PCSD; 2184 } else 2185 #endif 2186 { 2187 /* Non RSS setup */ 2188 if (ifp->if_capenable & IFCAP_RXCSUM) 2189 rxcsum |= E1000_RXCSUM_IPPCSE; 2190 else 2191 rxcsum &= ~E1000_RXCSUM_TUOFL; 2192 } 2193 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 2194 2195 /* Setup the Receive Control Register */ 2196 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2197 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2198 E1000_RCTL_RDMTS_HALF | 2199 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2200 /* Strip CRC bytes. */ 2201 rctl |= E1000_RCTL_SECRC; 2202 /* Make sure VLAN Filters are off */ 2203 rctl &= ~E1000_RCTL_VFE; 2204 /* Don't store bad packets */ 2205 rctl &= ~E1000_RCTL_SBP; 2206 2207 /* Enable Receives */ 2208 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2209 2210 /* 2211 * Setup the HW Rx Head and Tail Descriptor Pointers 2212 * - needs to be after enable 2213 */ 2214 for (i = 0; i < sc->num_queues; ++i) { 2215 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2216 2217 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2218 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->sc->num_rx_desc - 1); 2219 } 2220 } 2221 2222 static void 2223 igb_rxeof(struct igb_rx_ring *rxr, int count) 2224 { 2225 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2226 union e1000_adv_rx_desc *cur; 2227 uint32_t staterr; 2228 int i; 2229 2230 i = rxr->next_to_check; 2231 cur = &rxr->rx_base[i]; 2232 staterr = le32toh(cur->wb.upper.status_error); 2233 2234 if ((staterr & E1000_RXD_STAT_DD) == 0) 2235 return; 2236 2237 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2238 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2239 struct mbuf *m = NULL; 2240 boolean_t eop; 2241 2242 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2243 if (eop) 2244 --count; 2245 2246 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2247 !rxr->discard) { 2248 struct mbuf *mp = rxbuf->m_head; 2249 uint16_t vlan; 2250 int len; 2251 2252 len = le16toh(cur->wb.upper.length); 2253 if (rxr->sc->hw.mac.type == e1000_i350 && 2254 (staterr & E1000_RXDEXT_STATERR_LB)) 2255 vlan = be16toh(cur->wb.upper.vlan); 2256 else 2257 vlan = le16toh(cur->wb.upper.vlan); 2258 2259 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2260 BUS_DMASYNC_POSTREAD); 2261 2262 if (igb_newbuf(rxr, i, FALSE) != 0) { 2263 ifp->if_iqdrops++; 2264 goto discard; 2265 } 2266 2267 mp->m_len = len; 2268 if (rxr->fmp == NULL) { 2269 mp->m_pkthdr.len = len; 2270 rxr->fmp = mp; 2271 rxr->lmp = mp; 2272 } else { 2273 rxr->lmp->m_next = mp; 2274 rxr->lmp = rxr->lmp->m_next; 2275 rxr->fmp->m_pkthdr.len += len; 2276 } 2277 2278 if (eop) { 2279 m = rxr->fmp; 2280 rxr->fmp = NULL; 2281 rxr->lmp = NULL; 2282 2283 m->m_pkthdr.rcvif = ifp; 2284 ifp->if_ipackets++; 2285 2286 if (ifp->if_capenable & IFCAP_RXCSUM) 2287 igb_rxcsum(staterr, m); 2288 2289 if (staterr & E1000_RXD_STAT_VP) { 2290 m->m_pkthdr.ether_vlantag = vlan; 2291 m->m_flags |= M_VLANTAG; 2292 } 2293 2294 #if 0 2295 if (ifp->if_capenable & IFCAP_RSS) { 2296 pi = emx_rssinfo(m, &pi0, mrq, 2297 rss_hash, staterr); 2298 } 2299 #endif 2300 } 2301 } else { 2302 ifp->if_ierrors++; 2303 discard: 2304 igb_setup_rxdesc(cur, rxbuf); 2305 if (!eop) 2306 rxr->discard = TRUE; 2307 else 2308 rxr->discard = FALSE; 2309 if (rxr->fmp != NULL) { 2310 m_freem(rxr->fmp); 2311 rxr->fmp = NULL; 2312 rxr->lmp = NULL; 2313 } 2314 m = NULL; 2315 } 2316 2317 if (m != NULL) 2318 ether_input_pkt(ifp, m, NULL); 2319 2320 /* Advance our pointers to the next descriptor. */ 2321 if (++i == rxr->sc->num_rx_desc) 2322 i = 0; 2323 2324 cur = &rxr->rx_base[i]; 2325 staterr = le32toh(cur->wb.upper.status_error); 2326 } 2327 rxr->next_to_check = i; 2328 2329 if (--i < 0) 2330 i = rxr->sc->num_rx_desc - 1; 2331 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2332 } 2333 2334 2335 static void 2336 igb_set_vlan(struct igb_softc *sc) 2337 { 2338 struct e1000_hw *hw = &sc->hw; 2339 uint32_t reg; 2340 #if 0 2341 struct ifnet *ifp = sc->arpcom.ac_if; 2342 #endif 2343 2344 if (sc->vf_ifp) { 2345 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2346 return; 2347 } 2348 2349 reg = E1000_READ_REG(hw, E1000_CTRL); 2350 reg |= E1000_CTRL_VME; 2351 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2352 2353 #if 0 2354 /* Enable the Filter Table */ 2355 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2356 reg = E1000_READ_REG(hw, E1000_RCTL); 2357 reg &= ~E1000_RCTL_CFIEN; 2358 reg |= E1000_RCTL_VFE; 2359 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2360 } 2361 #endif 2362 2363 /* Update the frame size */ 2364 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2365 sc->max_frame_size + VLAN_TAG_SIZE); 2366 2367 #if 0 2368 /* Don't bother with table if no vlans */ 2369 if ((adapter->num_vlans == 0) || 2370 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2371 return; 2372 /* 2373 ** A soft reset zero's out the VFTA, so 2374 ** we need to repopulate it now. 2375 */ 2376 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2377 if (adapter->shadow_vfta[i] != 0) { 2378 if (adapter->vf_ifp) 2379 e1000_vfta_set_vf(hw, 2380 adapter->shadow_vfta[i], TRUE); 2381 else 2382 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2383 i, adapter->shadow_vfta[i]); 2384 } 2385 #endif 2386 } 2387 2388 static void 2389 igb_enable_intr(struct igb_softc *sc) 2390 { 2391 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 2392 2393 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2394 /* XXX MSI-X should use sc->intr_mask */ 2395 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2396 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2397 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2398 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2399 } else { 2400 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2401 } 2402 E1000_WRITE_FLUSH(&sc->hw); 2403 } 2404 2405 static void 2406 igb_disable_intr(struct igb_softc *sc) 2407 { 2408 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2409 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2410 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2411 } 2412 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2413 E1000_WRITE_FLUSH(&sc->hw); 2414 2415 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 2416 } 2417 2418 /* 2419 * Bit of a misnomer, what this really means is 2420 * to enable OS management of the system... aka 2421 * to disable special hardware management features 2422 */ 2423 static void 2424 igb_get_mgmt(struct igb_softc *sc) 2425 { 2426 if (sc->has_manage) { 2427 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2428 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2429 2430 /* disable hardware interception of ARP */ 2431 manc &= ~E1000_MANC_ARP_EN; 2432 2433 /* enable receiving management packets to the host */ 2434 manc |= E1000_MANC_EN_MNG2HOST; 2435 manc2h |= 1 << 5; /* Mng Port 623 */ 2436 manc2h |= 1 << 6; /* Mng Port 664 */ 2437 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2438 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2439 } 2440 } 2441 2442 /* 2443 * Give control back to hardware management controller 2444 * if there is one. 2445 */ 2446 static void 2447 igb_rel_mgmt(struct igb_softc *sc) 2448 { 2449 if (sc->has_manage) { 2450 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2451 2452 /* Re-enable hardware interception of ARP */ 2453 manc |= E1000_MANC_ARP_EN; 2454 manc &= ~E1000_MANC_EN_MNG2HOST; 2455 2456 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2457 } 2458 } 2459 2460 /* 2461 * Sets CTRL_EXT:DRV_LOAD bit. 2462 * 2463 * For ASF and Pass Through versions of f/w this means that 2464 * the driver is loaded. 2465 */ 2466 static void 2467 igb_get_hw_control(struct igb_softc *sc) 2468 { 2469 uint32_t ctrl_ext; 2470 2471 if (sc->vf_ifp) 2472 return; 2473 2474 /* Let firmware know the driver has taken over */ 2475 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2476 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2477 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2478 } 2479 2480 /* 2481 * Resets CTRL_EXT:DRV_LOAD bit. 2482 * 2483 * For ASF and Pass Through versions of f/w this means that the 2484 * driver is no longer loaded. 2485 */ 2486 static void 2487 igb_rel_hw_control(struct igb_softc *sc) 2488 { 2489 uint32_t ctrl_ext; 2490 2491 if (sc->vf_ifp) 2492 return; 2493 2494 /* Let firmware taken over control of h/w */ 2495 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2496 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2497 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2498 } 2499 2500 static int 2501 igb_is_valid_ether_addr(const uint8_t *addr) 2502 { 2503 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2504 2505 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2506 return FALSE; 2507 return TRUE; 2508 } 2509 2510 /* 2511 * Enable PCI Wake On Lan capability 2512 */ 2513 static void 2514 igb_enable_wol(device_t dev) 2515 { 2516 uint16_t cap, status; 2517 uint8_t id; 2518 2519 /* First find the capabilities pointer*/ 2520 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2521 2522 /* Read the PM Capabilities */ 2523 id = pci_read_config(dev, cap, 1); 2524 if (id != PCIY_PMG) /* Something wrong */ 2525 return; 2526 2527 /* 2528 * OK, we have the power capabilities, 2529 * so now get the status register 2530 */ 2531 cap += PCIR_POWER_STATUS; 2532 status = pci_read_config(dev, cap, 2); 2533 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2534 pci_write_config(dev, cap, status, 2); 2535 } 2536 2537 static void 2538 igb_update_stats_counters(struct igb_softc *sc) 2539 { 2540 struct e1000_hw *hw = &sc->hw; 2541 struct e1000_hw_stats *stats; 2542 struct ifnet *ifp = &sc->arpcom.ac_if; 2543 2544 /* 2545 * The virtual function adapter has only a 2546 * small controlled set of stats, do only 2547 * those and return. 2548 */ 2549 if (sc->vf_ifp) { 2550 igb_update_vf_stats_counters(sc); 2551 return; 2552 } 2553 stats = sc->stats; 2554 2555 if (sc->hw.phy.media_type == e1000_media_type_copper || 2556 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2557 stats->symerrs += 2558 E1000_READ_REG(hw,E1000_SYMERRS); 2559 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2560 } 2561 2562 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2563 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2564 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2565 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2566 2567 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2568 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2569 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2570 stats->dc += E1000_READ_REG(hw, E1000_DC); 2571 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2572 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2573 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2574 2575 /* 2576 * For watchdog management we need to know if we have been 2577 * paused during the last interval, so capture that here. 2578 */ 2579 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2580 stats->xoffrxc += sc->pause_frames; 2581 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2582 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2583 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2584 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2585 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2586 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2587 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2588 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2589 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2590 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2591 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2592 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2593 2594 /* For the 64-bit byte counters the low dword must be read first. */ 2595 /* Both registers clear on the read of the high dword */ 2596 2597 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2598 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2599 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2600 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2601 2602 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2603 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2604 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2605 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2606 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2607 2608 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2609 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2610 2611 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2612 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2613 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2614 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2615 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2616 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 2617 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 2618 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 2619 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 2620 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 2621 2622 /* Interrupt Counts */ 2623 2624 stats->iac += E1000_READ_REG(hw, E1000_IAC); 2625 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 2626 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 2627 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 2628 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 2629 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 2630 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 2631 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 2632 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 2633 2634 /* Host to Card Statistics */ 2635 2636 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 2637 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 2638 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 2639 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 2640 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 2641 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 2642 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 2643 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 2644 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 2645 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 2646 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 2647 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 2648 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 2649 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 2650 2651 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 2652 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 2653 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 2654 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 2655 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 2656 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 2657 2658 ifp->if_collisions = stats->colc; 2659 2660 /* Rx Errors */ 2661 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc + 2662 stats->ruc + stats->roc + stats->mpc + stats->cexterr; 2663 2664 /* Tx Errors */ 2665 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events; 2666 2667 /* Driver specific counters */ 2668 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 2669 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 2670 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 2671 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 2672 sc->packet_buf_alloc_tx = 2673 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 2674 sc->packet_buf_alloc_rx = 2675 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 2676 } 2677 2678 static void 2679 igb_vf_init_stats(struct igb_softc *sc) 2680 { 2681 struct e1000_hw *hw = &sc->hw; 2682 struct e1000_vf_stats *stats; 2683 2684 stats = sc->stats; 2685 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 2686 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 2687 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 2688 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 2689 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 2690 } 2691 2692 static void 2693 igb_update_vf_stats_counters(struct igb_softc *sc) 2694 { 2695 struct e1000_hw *hw = &sc->hw; 2696 struct e1000_vf_stats *stats; 2697 2698 if (sc->link_speed == 0) 2699 return; 2700 2701 stats = sc->stats; 2702 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 2703 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 2704 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 2705 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 2706 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 2707 } 2708 2709 #ifdef DEVICE_POLLING 2710 2711 static void 2712 igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2713 { 2714 struct igb_softc *sc = ifp->if_softc; 2715 uint32_t reg_icr; 2716 2717 ASSERT_SERIALIZED(ifp->if_serializer); 2718 2719 switch (cmd) { 2720 case POLL_REGISTER: 2721 igb_disable_intr(sc); 2722 break; 2723 2724 case POLL_DEREGISTER: 2725 igb_enable_intr(sc); 2726 break; 2727 2728 case POLL_AND_CHECK_STATUS: 2729 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 2730 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 2731 sc->hw.mac.get_link_status = 1; 2732 igb_update_link_status(sc); 2733 } 2734 /* FALL THROUGH */ 2735 case POLL_ONLY: 2736 if (ifp->if_flags & IFF_RUNNING) { 2737 igb_rxeof(sc->queues[0].rxr, count); 2738 2739 igb_txeof(sc->queues[0].txr); 2740 if (!ifq_is_empty(&ifp->if_snd)) 2741 if_devstart(ifp); 2742 } 2743 break; 2744 } 2745 } 2746 2747 #endif /* DEVICE_POLLING */ 2748 2749 static void 2750 igb_intr(void *xsc) 2751 { 2752 struct igb_softc *sc = xsc; 2753 struct ifnet *ifp = &sc->arpcom.ac_if; 2754 uint32_t eicr; 2755 2756 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2757 2758 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 2759 2760 if (eicr == 0) 2761 return; 2762 2763 if (ifp->if_flags & IFF_RUNNING) { 2764 if (eicr & sc->rx_rings[0].rx_intr_mask) 2765 igb_rxeof(&sc->rx_rings[0], -1); 2766 2767 if (eicr & sc->tx_rings[0].tx_intr_mask) { 2768 igb_txeof(&sc->tx_rings[0]); 2769 if (!ifq_is_empty(&ifp->if_snd)) 2770 if_devstart(ifp); 2771 } 2772 } 2773 2774 if (eicr & E1000_EICR_OTHER) { 2775 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 2776 2777 /* Link status change */ 2778 if (icr & E1000_ICR_LSC) { 2779 sc->hw.mac.get_link_status = 1; 2780 igb_update_link_status(sc); 2781 } 2782 } 2783 2784 /* 2785 * Reading EICR has the side effect to clear interrupt mask, 2786 * so all interrupts need to be enabled here. 2787 */ 2788 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2789 } 2790 2791 static void 2792 igb_shared_intr(void *xsc) 2793 { 2794 struct igb_softc *sc = xsc; 2795 struct ifnet *ifp = &sc->arpcom.ac_if; 2796 uint32_t reg_icr; 2797 2798 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2799 2800 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 2801 2802 /* Hot eject? */ 2803 if (reg_icr == 0xffffffff) 2804 return; 2805 2806 /* Definitely not our interrupt. */ 2807 if (reg_icr == 0x0) 2808 return; 2809 2810 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 2811 return; 2812 2813 if (ifp->if_flags & IFF_RUNNING) { 2814 igb_rxeof(sc->queues[0].rxr, -1); 2815 2816 igb_txeof(sc->queues[0].txr); 2817 if (!ifq_is_empty(&ifp->if_snd)) 2818 if_devstart(ifp); 2819 } 2820 2821 /* Link status change */ 2822 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 2823 sc->hw.mac.get_link_status = 1; 2824 igb_update_link_status(sc); 2825 } 2826 2827 if (reg_icr & E1000_ICR_RXO) 2828 sc->rx_overruns++; 2829 } 2830 2831 static int 2832 igb_txctx_pullup(struct igb_tx_ring *txr, struct mbuf **m0) 2833 { 2834 struct mbuf *m = *m0; 2835 struct ether_header *eh; 2836 int len; 2837 2838 txr->ctx_try_pullup++; 2839 2840 len = ETHER_HDR_LEN + IGB_IPVHL_SIZE; 2841 2842 if (__predict_false(!M_WRITABLE(m))) { 2843 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2844 txr->ctx_drop1++; 2845 m_freem(m); 2846 *m0 = NULL; 2847 return ENOBUFS; 2848 } 2849 eh = mtod(m, struct ether_header *); 2850 2851 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2852 len += EVL_ENCAPLEN; 2853 2854 if (m->m_len < len) { 2855 txr->ctx_drop2++; 2856 m_freem(m); 2857 *m0 = NULL; 2858 return ENOBUFS; 2859 } 2860 return 0; 2861 } 2862 2863 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2864 txr->ctx_pullup1++; 2865 m = m_pullup(m, ETHER_HDR_LEN); 2866 if (m == NULL) { 2867 txr->ctx_pullup1_failed++; 2868 *m0 = NULL; 2869 return ENOBUFS; 2870 } 2871 *m0 = m; 2872 } 2873 eh = mtod(m, struct ether_header *); 2874 2875 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2876 len += EVL_ENCAPLEN; 2877 2878 if (m->m_len < len) { 2879 txr->ctx_pullup2++; 2880 m = m_pullup(m, len); 2881 if (m == NULL) { 2882 txr->ctx_pullup2_failed++; 2883 *m0 = NULL; 2884 return ENOBUFS; 2885 } 2886 *m0 = m; 2887 } 2888 return 0; 2889 } 2890 2891 static int 2892 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp) 2893 { 2894 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 2895 bus_dmamap_t map; 2896 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 2897 union e1000_adv_tx_desc *txd = NULL; 2898 struct mbuf *m_head = *m_headp; 2899 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 2900 int maxsegs, nsegs, i, j, error, last = 0; 2901 uint32_t hdrlen = 0; 2902 2903 if (m_head->m_len < IGB_TXCSUM_MINHL && 2904 ((m_head->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) || 2905 (m_head->m_flags & M_VLANTAG))) { 2906 /* 2907 * Make sure that ethernet header and ip.ip_hl are in 2908 * contiguous memory, since if TXCSUM or VLANTAG is 2909 * enabled, later TX context descriptor's setup need 2910 * to access ip.ip_hl. 2911 */ 2912 error = igb_txctx_pullup(txr, m_headp); 2913 if (error) { 2914 KKASSERT(*m_headp == NULL); 2915 return error; 2916 } 2917 m_head = *m_headp; 2918 } 2919 2920 /* Set basic descriptor constants */ 2921 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 2922 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 2923 if (m_head->m_flags & M_VLANTAG) 2924 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2925 2926 /* 2927 * Map the packet for DMA. 2928 */ 2929 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 2930 tx_buf_mapped = tx_buf; 2931 map = tx_buf->map; 2932 2933 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 2934 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n")); 2935 if (maxsegs > IGB_MAX_SCATTER) 2936 maxsegs = IGB_MAX_SCATTER; 2937 2938 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 2939 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2940 if (error) { 2941 if (error == ENOBUFS) 2942 txr->sc->mbuf_defrag_failed++; 2943 else 2944 txr->sc->no_tx_dma_setup++; 2945 2946 m_freem(*m_headp); 2947 *m_headp = NULL; 2948 return error; 2949 } 2950 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 2951 2952 m_head = *m_headp; 2953 2954 #if 0 2955 /* 2956 * Set up the context descriptor: 2957 * used when any hardware offload is done. 2958 * This includes CSUM, VLAN, and TSO. It 2959 * will use the first descriptor. 2960 */ 2961 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2962 if (igb_tso_setup(txr, m_head, &hdrlen)) { 2963 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2964 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2965 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2966 } else 2967 return (ENXIO); 2968 } else if (igb_tx_ctx_setup(txr, m_head)) 2969 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2970 #else 2971 if (igb_txctx(txr, m_head)) { 2972 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 2973 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 2974 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 2975 txr->tx_nsegs++; 2976 } 2977 #endif 2978 2979 txr->tx_nsegs += nsegs; 2980 if (txr->tx_nsegs >= txr->intr_nsegs) { 2981 /* 2982 * Report Status (RS) is turned on every intr_nsegs 2983 * descriptors (roughly). 2984 */ 2985 txr->tx_nsegs = 0; 2986 cmd_rs = E1000_ADVTXD_DCMD_RS; 2987 } 2988 2989 /* Calculate payload length */ 2990 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 2991 << E1000_ADVTXD_PAYLEN_SHIFT); 2992 2993 /* 82575 needs the queue index added */ 2994 if (txr->sc->hw.mac.type == e1000_82575) 2995 olinfo_status |= txr->me << 4; 2996 2997 /* Set up our transmit descriptors */ 2998 i = txr->next_avail_desc; 2999 for (j = 0; j < nsegs; j++) { 3000 bus_size_t seg_len; 3001 bus_addr_t seg_addr; 3002 3003 tx_buf = &txr->tx_buf[i]; 3004 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3005 seg_addr = segs[j].ds_addr; 3006 seg_len = segs[j].ds_len; 3007 3008 txd->read.buffer_addr = htole64(seg_addr); 3009 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3010 txd->read.olinfo_status = htole32(olinfo_status); 3011 last = i; 3012 if (++i == txr->sc->num_tx_desc) 3013 i = 0; 3014 tx_buf->m_head = NULL; 3015 } 3016 3017 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3018 txr->next_avail_desc = i; 3019 txr->tx_avail -= nsegs; 3020 3021 tx_buf->m_head = m_head; 3022 tx_buf_mapped->map = tx_buf->map; 3023 tx_buf->map = map; 3024 3025 /* 3026 * Last Descriptor of Packet needs End Of Packet (EOP) 3027 */ 3028 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3029 3030 /* 3031 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 3032 * that this frame is available to transmit. 3033 */ 3034 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), i); 3035 ++txr->tx_packets; 3036 3037 return 0; 3038 } 3039 3040 static void 3041 igb_start(struct ifnet *ifp) 3042 { 3043 struct igb_softc *sc = ifp->if_softc; 3044 struct igb_tx_ring *txr = sc->queues[0].txr; 3045 struct mbuf *m_head; 3046 3047 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3048 3049 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3050 return; 3051 3052 if (!sc->link_active) { 3053 ifq_purge(&ifp->if_snd); 3054 return; 3055 } 3056 3057 if (!IGB_IS_NOT_OACTIVE(txr)) 3058 igb_txeof(txr); 3059 3060 while (!ifq_is_empty(&ifp->if_snd)) { 3061 if (IGB_IS_OACTIVE(txr)) { 3062 ifp->if_flags |= IFF_OACTIVE; 3063 /* Set watchdog on */ 3064 ifp->if_timer = 5; 3065 break; 3066 } 3067 3068 m_head = ifq_dequeue(&ifp->if_snd, NULL); 3069 if (m_head == NULL) 3070 break; 3071 3072 if (igb_encap(txr, &m_head)) { 3073 ifp->if_oerrors++; 3074 continue; 3075 } 3076 3077 /* Send a copy of the frame to the BPF listener */ 3078 ETHER_BPF_MTAP(ifp, m_head); 3079 } 3080 } 3081 3082 static void 3083 igb_watchdog(struct ifnet *ifp) 3084 { 3085 struct igb_softc *sc = ifp->if_softc; 3086 struct igb_tx_ring *txr = sc->queues[0].txr; 3087 3088 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3089 3090 /* 3091 * If flow control has paused us since last checking 3092 * it invalidates the watchdog timing, so dont run it. 3093 */ 3094 if (sc->pause_frames) { 3095 sc->pause_frames = 0; 3096 ifp->if_timer = 5; 3097 return; 3098 } 3099 3100 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3101 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3102 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3103 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3104 if_printf(ifp, "TX(%d) desc avail = %d, " 3105 "Next TX to Clean = %d\n", 3106 txr->me, txr->tx_avail, txr->next_to_clean); 3107 3108 ifp->if_oerrors++; 3109 sc->watchdog_events++; 3110 3111 igb_init(sc); 3112 if (!ifq_is_empty(&ifp->if_snd)) 3113 if_devstart(ifp); 3114 } 3115 3116 static void 3117 igb_set_eitr(struct igb_softc *sc) 3118 { 3119 uint32_t itr = 0; 3120 3121 if (sc->intr_rate > 0) { 3122 if (sc->hw.mac.type == e1000_82575) { 3123 itr = 1000000000 / 256 / sc->intr_rate; 3124 /* 3125 * NOTE: 3126 * Document is wrong on the 2 bits left shift 3127 */ 3128 } else { 3129 itr = 1000000 / sc->intr_rate; 3130 itr <<= 2; 3131 } 3132 itr &= 0x7FFC; 3133 } 3134 if (sc->hw.mac.type == e1000_82575) 3135 itr |= itr << 16; 3136 else 3137 itr |= E1000_EITR_CNT_IGNR; 3138 E1000_WRITE_REG(&sc->hw, E1000_EITR(0), itr); 3139 } 3140 3141 static int 3142 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3143 { 3144 struct igb_softc *sc = (void *)arg1; 3145 struct ifnet *ifp = &sc->arpcom.ac_if; 3146 int error, intr_rate; 3147 3148 intr_rate = sc->intr_rate; 3149 error = sysctl_handle_int(oidp, &intr_rate, 0, req); 3150 if (error || req->newptr == NULL) 3151 return error; 3152 if (intr_rate < 0) 3153 return EINVAL; 3154 3155 ifnet_serialize_all(ifp); 3156 3157 sc->intr_rate = intr_rate; 3158 if (ifp->if_flags & IFF_RUNNING) 3159 igb_set_eitr(sc); 3160 3161 ifnet_deserialize_all(ifp); 3162 3163 if (bootverbose) 3164 if_printf(ifp, "Interrupt rate set to %d/sec\n", sc->intr_rate); 3165 return 0; 3166 } 3167 3168 static int 3169 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3170 { 3171 struct igb_softc *sc = (void *)arg1; 3172 struct ifnet *ifp = &sc->arpcom.ac_if; 3173 struct igb_tx_ring *txr = sc->queues[0].txr; 3174 int error, nsegs; 3175 3176 nsegs = txr->intr_nsegs; 3177 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3178 if (error || req->newptr == NULL) 3179 return error; 3180 if (nsegs <= 0) 3181 return EINVAL; 3182 3183 ifnet_serialize_all(ifp); 3184 3185 if (nsegs >= sc->num_tx_desc - txr->oact_lo_desc || 3186 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) { 3187 error = EINVAL; 3188 } else { 3189 error = 0; 3190 txr->intr_nsegs = nsegs; 3191 } 3192 3193 ifnet_deserialize_all(ifp); 3194 3195 return error; 3196 } 3197 3198 static void 3199 igb_init_intr(struct igb_softc *sc) 3200 { 3201 if (sc->flags & IGB_FLAG_SHARED_INTR) 3202 igb_set_eitr(sc); 3203 else 3204 igb_init_unshared_intr(sc); 3205 } 3206 3207 static void 3208 igb_init_unshared_intr(struct igb_softc *sc) 3209 { 3210 struct e1000_hw *hw = &sc->hw; 3211 const struct igb_rx_ring *rxr; 3212 const struct igb_tx_ring *txr; 3213 uint32_t ivar, index; 3214 int i; 3215 3216 /* 3217 * Enable extended mode 3218 */ 3219 if (sc->hw.mac.type != e1000_82575) { 3220 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_NSICR); 3221 } else { 3222 uint32_t tmp; 3223 3224 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3225 tmp |= E1000_CTRL_EXT_IRCA; 3226 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3227 } 3228 3229 /* 3230 * Map TX/RX interrupts to EICR 3231 */ 3232 switch (sc->hw.mac.type) { 3233 case e1000_82580: 3234 case e1000_i350: 3235 case e1000_vfadapt: 3236 case e1000_vfadapt_i350: 3237 /* RX entries */ 3238 for (i = 0; i < sc->num_queues; ++i) { 3239 rxr = &sc->rx_rings[i]; 3240 3241 index = i >> 1; 3242 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3243 3244 if (i & 1) { 3245 ivar &= 0xff00ffff; 3246 ivar |= 3247 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3248 } else { 3249 ivar &= 0xffffff00; 3250 ivar |= 3251 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3252 } 3253 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3254 } 3255 /* TX entries */ 3256 for (i = 0; i < sc->num_queues; ++i) { 3257 txr = &sc->tx_rings[i]; 3258 3259 index = i >> 1; 3260 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3261 3262 if (i & 1) { 3263 ivar &= 0x00ffffff; 3264 ivar |= 3265 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3266 } else { 3267 ivar &= 0xffff00ff; 3268 ivar |= 3269 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3270 } 3271 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3272 } 3273 /* Clear unused IVAR_MISC */ 3274 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3275 break; 3276 3277 case e1000_82576: 3278 /* RX entries */ 3279 for (i = 0; i < sc->num_queues; ++i) { 3280 rxr = &sc->rx_rings[i]; 3281 3282 index = i & 0x7; /* Each IVAR has two entries */ 3283 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3284 3285 if (i < 8) { 3286 ivar &= 0xffffff00; 3287 ivar |= 3288 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3289 } else { 3290 ivar &= 0xff00ffff; 3291 ivar |= 3292 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3293 } 3294 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3295 } 3296 /* TX entries */ 3297 for (i = 0; i < sc->num_queues; ++i) { 3298 txr = &sc->tx_rings[i]; 3299 3300 index = i & 0x7; /* Each IVAR has two entries */ 3301 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3302 3303 if (i < 8) { 3304 ivar &= 0xffff00ff; 3305 ivar |= 3306 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3307 } else { 3308 ivar &= 0x00ffffff; 3309 ivar |= 3310 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3311 } 3312 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3313 } 3314 /* Clear unused IVAR_MISC */ 3315 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3316 break; 3317 3318 case e1000_82575: 3319 /* 3320 * Enable necessary interrupt bits. 3321 * 3322 * The name of the register is confusing; in addition to 3323 * configuring the first vector of MSI-X, it also configures 3324 * which bits of EICR could be set by the hardware even when 3325 * MSI or line interrupt is used; it thus controls interrupt 3326 * generation. It MUST be configured explicitly; the default 3327 * value mentioned in the datasheet is wrong: RX queue0 and 3328 * TX queue0 are NOT enabled by default. 3329 */ 3330 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3331 break; 3332 3333 default: 3334 break; 3335 } 3336 3337 /* 3338 * Configure interrupt moderation 3339 */ 3340 igb_set_eitr(sc); 3341 } 3342 3343 static int 3344 igb_setup_intr(struct igb_softc *sc) 3345 { 3346 struct ifnet *ifp = &sc->arpcom.ac_if; 3347 int error, i; 3348 3349 /* 3350 * Setup interrupt mask 3351 */ 3352 for (i = 0; i < sc->num_queues; ++i) 3353 igb_setup_tx_intr(&sc->tx_rings[i]); 3354 for (i = 0; i < sc->num_queues; ++i) 3355 igb_setup_rx_intr(&sc->rx_rings[i]); 3356 3357 sc->intr_mask = E1000_EICR_OTHER; 3358 for (i = 0; i < sc->num_queues; ++i) 3359 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 3360 for (i = 0; i < sc->num_queues; ++i) 3361 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 3362 3363 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 3364 int unshared; 3365 3366 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 3367 if (!unshared) { 3368 sc->flags |= IGB_FLAG_SHARED_INTR; 3369 if (bootverbose) 3370 device_printf(sc->dev, "IRQ shared\n"); 3371 } else if (bootverbose) { 3372 device_printf(sc->dev, "IRQ unshared\n"); 3373 } 3374 } 3375 3376 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE, 3377 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_shared_intr : igb_intr, 3378 sc, &sc->intr_tag, ifp->if_serializer); 3379 if (error) { 3380 device_printf(sc->dev, "Failed to register interrupt handler"); 3381 return error; 3382 } 3383 3384 ifp->if_cpuid = rman_get_cpuid(sc->intr_res); 3385 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 3386 3387 return 0; 3388 } 3389 3390 static void 3391 igb_setup_tx_intr(struct igb_tx_ring *txr) 3392 { 3393 if (txr->sc->hw.mac.type == e1000_82575) { 3394 txr->tx_intr_bit = 0; /* unused */ 3395 switch (txr->me) { 3396 case 0: 3397 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3398 break; 3399 case 1: 3400 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3401 break; 3402 case 2: 3403 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 3404 break; 3405 case 3: 3406 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 3407 break; 3408 default: 3409 panic("unsupported # of TX ring, %d\n", txr->me); 3410 } 3411 } else { 3412 txr->tx_intr_bit = 0; /* XXX */ 3413 txr->tx_intr_mask = 1 << txr->tx_intr_bit; 3414 } 3415 } 3416 3417 static void 3418 igb_setup_rx_intr(struct igb_rx_ring *rxr) 3419 { 3420 if (rxr->sc->hw.mac.type == e1000_82575) { 3421 rxr->rx_intr_bit = 0; /* unused */ 3422 switch (rxr->me) { 3423 case 0: 3424 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 3425 break; 3426 case 1: 3427 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 3428 break; 3429 case 2: 3430 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 3431 break; 3432 case 3: 3433 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 3434 break; 3435 default: 3436 panic("unsupported # of RX ring, %d\n", rxr->me); 3437 } 3438 } else { 3439 rxr->rx_intr_bit = 1; /* XXX */ 3440 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit; 3441 } 3442 } 3443