1 /* 2 * Copyright (c) 2001-2013, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_ix.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 68 #include <bus/pci/pcivar.h> 69 #include <bus/pci/pcireg.h> 70 71 #include <dev/netif/ix/ixgbe_api.h> 72 #include <dev/netif/ix/if_ix.h> 73 74 #ifdef IX_RSS_DEBUG 75 #define IX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 76 do { \ 77 if (sc->rss_debug >= lvl) \ 78 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 79 } while (0) 80 #else /* !IX_RSS_DEBUG */ 81 #define IX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 82 #endif /* IX_RSS_DEBUG */ 83 84 #define IX_NAME "Intel(R) PRO/10GbE " 85 #define IX_DEVICE(id) \ 86 { IXGBE_VENDOR_ID, IXGBE_DEV_ID_##id, IX_NAME #id } 87 #define IX_DEVICE_NULL { 0, 0, NULL } 88 89 static struct ix_device { 90 uint16_t vid; 91 uint16_t did; 92 const char *desc; 93 } ix_devices[] = { 94 IX_DEVICE(82598AF_DUAL_PORT), 95 IX_DEVICE(82598AF_SINGLE_PORT), 96 IX_DEVICE(82598EB_CX4), 97 IX_DEVICE(82598AT), 98 IX_DEVICE(82598AT2), 99 IX_DEVICE(82598), 100 IX_DEVICE(82598_DA_DUAL_PORT), 101 IX_DEVICE(82598_CX4_DUAL_PORT), 102 IX_DEVICE(82598EB_XF_LR), 103 IX_DEVICE(82598_SR_DUAL_PORT_EM), 104 IX_DEVICE(82598EB_SFP_LOM), 105 IX_DEVICE(82599_KX4), 106 IX_DEVICE(82599_KX4_MEZZ), 107 IX_DEVICE(82599_SFP), 108 IX_DEVICE(82599_XAUI_LOM), 109 IX_DEVICE(82599_CX4), 110 IX_DEVICE(82599_T3_LOM), 111 IX_DEVICE(82599_COMBO_BACKPLANE), 112 IX_DEVICE(82599_BACKPLANE_FCOE), 113 IX_DEVICE(82599_SFP_SF2), 114 IX_DEVICE(82599_SFP_FCOE), 115 IX_DEVICE(82599EN_SFP), 116 IX_DEVICE(82599_SFP_SF_QP), 117 IX_DEVICE(X540T), 118 119 /* required last entry */ 120 IX_DEVICE_NULL 121 }; 122 123 static int ix_probe(device_t); 124 static int ix_attach(device_t); 125 static int ix_detach(device_t); 126 static int ix_shutdown(device_t); 127 128 static void ix_serialize(struct ifnet *, enum ifnet_serialize); 129 static void ix_deserialize(struct ifnet *, enum ifnet_serialize); 130 static int ix_tryserialize(struct ifnet *, enum ifnet_serialize); 131 #ifdef INVARIANTS 132 static void ix_serialize_assert(struct ifnet *, enum ifnet_serialize, 133 boolean_t); 134 #endif 135 static void ix_start(struct ifnet *, struct ifaltq_subque *); 136 static void ix_watchdog(struct ifaltq_subque *); 137 static int ix_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 138 static void ix_init(void *); 139 static void ix_stop(struct ix_softc *); 140 static void ix_media_status(struct ifnet *, struct ifmediareq *); 141 static int ix_media_change(struct ifnet *); 142 static void ix_timer(void *); 143 #ifdef IFPOLL_ENABLE 144 static void ix_npoll(struct ifnet *, struct ifpoll_info *); 145 static void ix_npoll_rx(struct ifnet *, void *, int); 146 static void ix_npoll_tx(struct ifnet *, void *, int); 147 static void ix_npoll_status(struct ifnet *); 148 #endif 149 150 static void ix_add_sysctl(struct ix_softc *); 151 static void ix_add_intr_rate_sysctl(struct ix_softc *, int, 152 const char *, int (*)(SYSCTL_HANDLER_ARGS), const char *); 153 static int ix_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 154 static int ix_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 155 static int ix_sysctl_txd(SYSCTL_HANDLER_ARGS); 156 static int ix_sysctl_rxd(SYSCTL_HANDLER_ARGS); 157 static int ix_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 158 static int ix_sysctl_intr_rate(SYSCTL_HANDLER_ARGS, int); 159 static int ix_sysctl_rxtx_intr_rate(SYSCTL_HANDLER_ARGS); 160 static int ix_sysctl_rx_intr_rate(SYSCTL_HANDLER_ARGS); 161 static int ix_sysctl_tx_intr_rate(SYSCTL_HANDLER_ARGS); 162 static int ix_sysctl_sts_intr_rate(SYSCTL_HANDLER_ARGS); 163 static int ix_sysctl_flowctrl(SYSCTL_HANDLER_ARGS); 164 #ifdef foo 165 static int ix_sysctl_advspeed(SYSCTL_HANDLER_ARGS); 166 #endif 167 #if 0 168 static void ix_add_hw_stats(struct ix_softc *); 169 #endif 170 #ifdef IFPOLL_ENABLE 171 static int ix_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 172 static int ix_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 173 #endif 174 175 static void ix_slot_info(struct ix_softc *); 176 static int ix_alloc_rings(struct ix_softc *); 177 static void ix_free_rings(struct ix_softc *); 178 static void ix_setup_ifp(struct ix_softc *); 179 static void ix_setup_serialize(struct ix_softc *); 180 static void ix_set_ring_inuse(struct ix_softc *, boolean_t); 181 static void ix_set_timer_cpuid(struct ix_softc *, boolean_t); 182 static void ix_update_stats(struct ix_softc *); 183 184 static void ix_set_promisc(struct ix_softc *); 185 static void ix_set_multi(struct ix_softc *); 186 static void ix_set_vlan(struct ix_softc *); 187 static uint8_t *ix_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 188 189 static int ix_get_txring_inuse(const struct ix_softc *, boolean_t); 190 static void ix_init_tx_ring(struct ix_tx_ring *); 191 static void ix_free_tx_ring(struct ix_tx_ring *); 192 static int ix_create_tx_ring(struct ix_tx_ring *); 193 static void ix_destroy_tx_ring(struct ix_tx_ring *, int); 194 static void ix_init_tx_unit(struct ix_softc *); 195 static int ix_encap(struct ix_tx_ring *, struct mbuf **, 196 uint16_t *, int *); 197 static int ix_tx_ctx_setup(struct ix_tx_ring *, 198 const struct mbuf *, uint32_t *, uint32_t *); 199 static int ix_tso_ctx_setup(struct ix_tx_ring *, 200 const struct mbuf *, uint32_t *, uint32_t *); 201 static void ix_txeof(struct ix_tx_ring *, int); 202 203 static int ix_get_rxring_inuse(const struct ix_softc *, boolean_t); 204 static int ix_init_rx_ring(struct ix_rx_ring *); 205 static void ix_free_rx_ring(struct ix_rx_ring *); 206 static int ix_create_rx_ring(struct ix_rx_ring *); 207 static void ix_destroy_rx_ring(struct ix_rx_ring *, int); 208 static void ix_init_rx_unit(struct ix_softc *); 209 #if 0 210 static void ix_setup_hw_rsc(struct ix_rx_ring *); 211 #endif 212 static int ix_newbuf(struct ix_rx_ring *, int, boolean_t); 213 static void ix_rxeof(struct ix_rx_ring *, int); 214 static void ix_rx_discard(struct ix_rx_ring *, int, boolean_t); 215 static void ix_enable_rx_drop(struct ix_softc *); 216 static void ix_disable_rx_drop(struct ix_softc *); 217 218 static void ix_alloc_msix(struct ix_softc *); 219 static void ix_free_msix(struct ix_softc *, boolean_t); 220 static void ix_conf_rx_msix(struct ix_softc *, int, int *, int); 221 static void ix_conf_tx_msix(struct ix_softc *, int, int *, int); 222 static void ix_setup_msix_eims(const struct ix_softc *, int, 223 uint32_t *, uint32_t *); 224 static int ix_alloc_intr(struct ix_softc *); 225 static void ix_free_intr(struct ix_softc *); 226 static int ix_setup_intr(struct ix_softc *); 227 static void ix_teardown_intr(struct ix_softc *, int); 228 static void ix_enable_intr(struct ix_softc *); 229 static void ix_disable_intr(struct ix_softc *); 230 static void ix_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t); 231 static void ix_set_eitr(struct ix_softc *, int, int); 232 static void ix_intr_status(struct ix_softc *, uint32_t); 233 static void ix_intr(void *); 234 static void ix_msix_rxtx(void *); 235 static void ix_msix_rx(void *); 236 static void ix_msix_tx(void *); 237 static void ix_msix_status(void *); 238 239 static void ix_config_link(struct ix_softc *); 240 static boolean_t ix_sfp_probe(struct ix_softc *); 241 static boolean_t ix_is_sfp(const struct ixgbe_hw *); 242 static void ix_setup_optics(struct ix_softc *); 243 static void ix_update_link_status(struct ix_softc *); 244 static void ix_handle_link(struct ix_softc *); 245 static void ix_handle_mod(struct ix_softc *); 246 static void ix_handle_msf(struct ix_softc *); 247 248 /* XXX Shared code structure requires this for the moment */ 249 extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *); 250 251 static device_method_t ix_methods[] = { 252 /* Device interface */ 253 DEVMETHOD(device_probe, ix_probe), 254 DEVMETHOD(device_attach, ix_attach), 255 DEVMETHOD(device_detach, ix_detach), 256 DEVMETHOD(device_shutdown, ix_shutdown), 257 DEVMETHOD_END 258 }; 259 260 static driver_t ix_driver = { 261 "ix", 262 ix_methods, 263 sizeof(struct ix_softc) 264 }; 265 266 static devclass_t ix_devclass; 267 268 DECLARE_DUMMY_MODULE(if_ix); 269 DRIVER_MODULE(if_ix, pci, ix_driver, ix_devclass, NULL, NULL); 270 271 static int ix_msi_enable = 1; 272 static int ix_msix_enable = 1; 273 static int ix_msix_agg_rxtx = 1; 274 static int ix_rxr = 0; 275 static int ix_txr = 0; 276 static int ix_txd = IX_PERF_TXD; 277 static int ix_rxd = IX_PERF_RXD; 278 static int ix_unsupported_sfp = 0; 279 280 TUNABLE_INT("hw.ix.msi.enable", &ix_msi_enable); 281 TUNABLE_INT("hw.ix.msix.enable", &ix_msix_enable); 282 TUNABLE_INT("hw.ix.msix.agg_rxtx", &ix_msix_agg_rxtx); 283 TUNABLE_INT("hw.ix.rxr", &ix_rxr); 284 TUNABLE_INT("hw.ix.txr", &ix_txr); 285 TUNABLE_INT("hw.ix.txd", &ix_txd); 286 TUNABLE_INT("hw.ix.rxd", &ix_rxd); 287 TUNABLE_INT("hw.ix.unsupported_sfp", &ix_unsupported_sfp); 288 289 /* 290 * Smart speed setting, default to on. This only works 291 * as a compile option right now as its during attach, 292 * set this to 'ixgbe_smart_speed_off' to disable. 293 */ 294 static const enum ixgbe_smart_speed ix_smart_speed = 295 ixgbe_smart_speed_on; 296 297 static int 298 ix_probe(device_t dev) 299 { 300 const struct ix_device *d; 301 uint16_t vid, did; 302 303 vid = pci_get_vendor(dev); 304 did = pci_get_device(dev); 305 306 for (d = ix_devices; d->desc != NULL; ++d) { 307 if (vid == d->vid && did == d->did) { 308 device_set_desc(dev, d->desc); 309 return 0; 310 } 311 } 312 return ENXIO; 313 } 314 315 static int 316 ix_attach(device_t dev) 317 { 318 struct ix_softc *sc = device_get_softc(dev); 319 struct ixgbe_hw *hw; 320 int error, ring_cnt_max; 321 uint16_t csum; 322 uint32_t ctrl_ext; 323 #ifdef IFPOLL_ENABLE 324 int offset, offset_def; 325 #endif 326 327 sc->dev = sc->osdep.dev = dev; 328 hw = &sc->hw; 329 330 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 331 device_get_unit(dev)); 332 ifmedia_init(&sc->media, IFM_IMASK, 333 ix_media_change, ix_media_status); 334 335 /* Save frame size */ 336 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 337 338 callout_init_mp(&sc->timer); 339 lwkt_serialize_init(&sc->main_serialize); 340 341 /* 342 * Save off the information about this board 343 */ 344 hw->vendor_id = pci_get_vendor(dev); 345 hw->device_id = pci_get_device(dev); 346 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 347 hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 348 hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 349 350 ixgbe_set_mac_type(hw); 351 352 /* Pick up the 82599 and VF settings */ 353 if (hw->mac.type != ixgbe_mac_82598EB) 354 hw->phy.smart_speed = ix_smart_speed; 355 356 /* Enable bus mastering */ 357 pci_enable_busmaster(dev); 358 359 /* 360 * Allocate IO memory 361 */ 362 sc->mem_rid = PCIR_BAR(0); 363 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 364 &sc->mem_rid, RF_ACTIVE); 365 if (sc->mem_res == NULL) { 366 device_printf(dev, "Unable to allocate bus resource: memory\n"); 367 error = ENXIO; 368 goto failed; 369 } 370 371 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 372 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 373 374 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 375 sc->hw.back = &sc->osdep; 376 377 /* 378 * Configure total supported RX/TX ring count 379 */ 380 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", ix_rxr); 381 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, IX_MAX_RXRING); 382 sc->rx_ring_inuse = sc->rx_ring_cnt; 383 384 switch (hw->mac.type) { 385 case ixgbe_mac_82598EB: 386 ring_cnt_max = IX_MAX_TXRING_82598; 387 break; 388 389 case ixgbe_mac_82599EB: 390 ring_cnt_max = IX_MAX_TXRING_82599; 391 break; 392 393 case ixgbe_mac_X540: 394 ring_cnt_max = IX_MAX_TXRING_X540; 395 break; 396 397 default: 398 ring_cnt_max = 1; 399 break; 400 } 401 sc->tx_ring_cnt = device_getenv_int(dev, "txr", ix_txr); 402 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_cnt_max); 403 sc->tx_ring_inuse = sc->tx_ring_cnt; 404 405 /* Allocate TX/RX rings */ 406 error = ix_alloc_rings(sc); 407 if (error) 408 goto failed; 409 410 #ifdef IFPOLL_ENABLE 411 /* 412 * NPOLLING RX CPU offset 413 */ 414 if (sc->rx_ring_cnt == ncpus2) { 415 offset = 0; 416 } else { 417 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 418 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 419 if (offset >= ncpus2 || 420 offset % sc->rx_ring_cnt != 0) { 421 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 422 offset, offset_def); 423 offset = offset_def; 424 } 425 } 426 sc->rx_npoll_off = offset; 427 428 /* 429 * NPOLLING TX CPU offset 430 */ 431 if (sc->tx_ring_cnt == ncpus2) { 432 offset = 0; 433 } else { 434 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 435 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 436 if (offset >= ncpus2 || 437 offset % sc->tx_ring_cnt != 0) { 438 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 439 offset, offset_def); 440 offset = offset_def; 441 } 442 } 443 sc->tx_npoll_off = offset; 444 #endif 445 446 /* Allocate interrupt */ 447 error = ix_alloc_intr(sc); 448 if (error) 449 goto failed; 450 451 /* Setup serializes */ 452 ix_setup_serialize(sc); 453 454 /* Allocate multicast array memory. */ 455 sc->mta = kmalloc(IXGBE_ETH_LENGTH_OF_ADDRESS * IX_MAX_MCASTADDR, 456 M_DEVBUF, M_WAITOK); 457 458 /* Initialize the shared code */ 459 hw->allow_unsupported_sfp = ix_unsupported_sfp; 460 error = ixgbe_init_shared_code(hw); 461 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 462 /* 463 * No optics in this port; ask timer routine 464 * to probe for later insertion. 465 */ 466 sc->sfp_probe = TRUE; 467 error = 0; 468 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 469 device_printf(dev, "Unsupported SFP+ module detected!\n"); 470 error = EIO; 471 goto failed; 472 } else if (error) { 473 device_printf(dev, "Unable to initialize the shared code\n"); 474 error = EIO; 475 goto failed; 476 } 477 478 /* Make sure we have a good EEPROM before we read from it */ 479 if (ixgbe_validate_eeprom_checksum(&sc->hw, &csum) < 0) { 480 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 481 error = EIO; 482 goto failed; 483 } 484 485 error = ixgbe_init_hw(hw); 486 if (error == IXGBE_ERR_EEPROM_VERSION) { 487 device_printf(dev, "Pre-production device detected\n"); 488 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 489 device_printf(dev, "Unsupported SFP+ Module\n"); 490 error = EIO; 491 goto failed; 492 } else if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 493 device_printf(dev, "No SFP+ Module found\n"); 494 } 495 496 /* Detect and set physical type */ 497 ix_setup_optics(sc); 498 499 /* Setup OS specific network interface */ 500 ix_setup_ifp(sc); 501 502 /* Add sysctl tree */ 503 ix_add_sysctl(sc); 504 505 error = ix_setup_intr(sc); 506 if (error) { 507 ether_ifdetach(&sc->arpcom.ac_if); 508 goto failed; 509 } 510 511 /* Initialize statistics */ 512 ix_update_stats(sc); 513 514 /* 515 * Check PCIE slot type/speed/width 516 */ 517 ix_slot_info(sc); 518 519 /* Set an initial default flow control value */ 520 sc->fc = ixgbe_fc_full; 521 522 /* Let hardware know driver is loaded */ 523 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 524 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 525 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 526 527 return 0; 528 failed: 529 ix_detach(dev); 530 return error; 531 } 532 533 static int 534 ix_detach(device_t dev) 535 { 536 struct ix_softc *sc = device_get_softc(dev); 537 538 if (device_is_attached(dev)) { 539 struct ifnet *ifp = &sc->arpcom.ac_if; 540 uint32_t ctrl_ext; 541 542 ifnet_serialize_all(ifp); 543 544 ix_stop(sc); 545 ix_teardown_intr(sc, sc->intr_cnt); 546 547 ifnet_deserialize_all(ifp); 548 549 callout_terminate(&sc->timer); 550 ether_ifdetach(ifp); 551 552 /* Let hardware know driver is unloading */ 553 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 554 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 555 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 556 } 557 558 ifmedia_removeall(&sc->media); 559 bus_generic_detach(dev); 560 561 ix_free_intr(sc); 562 563 if (sc->msix_mem_res != NULL) { 564 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 565 sc->msix_mem_res); 566 } 567 if (sc->mem_res != NULL) { 568 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 569 sc->mem_res); 570 } 571 572 ix_free_rings(sc); 573 574 if (sc->mta != NULL) 575 kfree(sc->mta, M_DEVBUF); 576 if (sc->serializes != NULL) 577 kfree(sc->serializes, M_DEVBUF); 578 579 return 0; 580 } 581 582 static int 583 ix_shutdown(device_t dev) 584 { 585 struct ix_softc *sc = device_get_softc(dev); 586 struct ifnet *ifp = &sc->arpcom.ac_if; 587 588 ifnet_serialize_all(ifp); 589 ix_stop(sc); 590 ifnet_deserialize_all(ifp); 591 592 return 0; 593 } 594 595 static void 596 ix_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 597 { 598 struct ix_softc *sc = ifp->if_softc; 599 struct ix_tx_ring *txr = ifsq_get_priv(ifsq); 600 int idx = -1; 601 uint16_t nsegs; 602 603 KKASSERT(txr->tx_ifsq == ifsq); 604 ASSERT_SERIALIZED(&txr->tx_serialize); 605 606 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 607 return; 608 609 if (!sc->link_active || (txr->tx_flags & IX_TXFLAG_ENABLED) == 0) { 610 ifsq_purge(ifsq); 611 return; 612 } 613 614 while (!ifsq_is_empty(ifsq)) { 615 struct mbuf *m_head; 616 617 if (txr->tx_avail <= IX_MAX_SCATTER + IX_TX_RESERVED) { 618 ifsq_set_oactive(ifsq); 619 txr->tx_watchdog.wd_timer = 5; 620 break; 621 } 622 623 m_head = ifsq_dequeue(ifsq); 624 if (m_head == NULL) 625 break; 626 627 if (ix_encap(txr, &m_head, &nsegs, &idx)) { 628 IFNET_STAT_INC(ifp, oerrors, 1); 629 continue; 630 } 631 632 /* 633 * TX interrupt are aggressively aggregated, so increasing 634 * opackets at TX interrupt time will make the opackets 635 * statistics vastly inaccurate; we do the opackets increment 636 * now. 637 */ 638 IFNET_STAT_INC(ifp, opackets, 1); 639 640 if (nsegs >= txr->tx_wreg_nsegs) { 641 IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->tx_idx), idx); 642 nsegs = 0; 643 idx = -1; 644 } 645 646 ETHER_BPF_MTAP(ifp, m_head); 647 } 648 if (idx >= 0) 649 IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->tx_idx), idx); 650 } 651 652 static int 653 ix_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 654 { 655 struct ix_softc *sc = ifp->if_softc; 656 struct ifreq *ifr = (struct ifreq *) data; 657 int error = 0, mask, reinit; 658 659 ASSERT_IFNET_SERIALIZED_ALL(ifp); 660 661 switch (command) { 662 case SIOCSIFMTU: 663 if (ifr->ifr_mtu > IX_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 664 error = EINVAL; 665 } else { 666 ifp->if_mtu = ifr->ifr_mtu; 667 sc->max_frame_size = 668 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 669 ix_init(sc); 670 } 671 break; 672 673 case SIOCSIFFLAGS: 674 if (ifp->if_flags & IFF_UP) { 675 if (ifp->if_flags & IFF_RUNNING) { 676 if ((ifp->if_flags ^ sc->if_flags) & 677 (IFF_PROMISC | IFF_ALLMULTI)) 678 ix_set_promisc(sc); 679 } else { 680 ix_init(sc); 681 } 682 } else if (ifp->if_flags & IFF_RUNNING) { 683 ix_stop(sc); 684 } 685 sc->if_flags = ifp->if_flags; 686 break; 687 688 case SIOCADDMULTI: 689 case SIOCDELMULTI: 690 if (ifp->if_flags & IFF_RUNNING) { 691 ix_disable_intr(sc); 692 ix_set_multi(sc); 693 #ifdef IFPOLL_ENABLE 694 if ((ifp->if_flags & IFF_NPOLLING) == 0) 695 #endif 696 ix_enable_intr(sc); 697 } 698 break; 699 700 case SIOCSIFMEDIA: 701 case SIOCGIFMEDIA: 702 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 703 break; 704 705 case SIOCSIFCAP: 706 reinit = 0; 707 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 708 if (mask & IFCAP_RXCSUM) { 709 ifp->if_capenable ^= IFCAP_RXCSUM; 710 reinit = 1; 711 } 712 if (mask & IFCAP_VLAN_HWTAGGING) { 713 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 714 reinit = 1; 715 } 716 if (mask & IFCAP_TXCSUM) { 717 ifp->if_capenable ^= IFCAP_TXCSUM; 718 if (ifp->if_capenable & IFCAP_TXCSUM) 719 ifp->if_hwassist |= CSUM_OFFLOAD; 720 else 721 ifp->if_hwassist &= ~CSUM_OFFLOAD; 722 } 723 if (mask & IFCAP_TSO) { 724 ifp->if_capenable ^= IFCAP_TSO; 725 if (ifp->if_capenable & IFCAP_TSO) 726 ifp->if_hwassist |= CSUM_TSO; 727 else 728 ifp->if_hwassist &= ~CSUM_TSO; 729 } 730 if (mask & IFCAP_RSS) 731 ifp->if_capenable ^= IFCAP_RSS; 732 if (reinit && (ifp->if_flags & IFF_RUNNING)) 733 ix_init(sc); 734 break; 735 736 #if 0 737 case SIOCGI2C: 738 { 739 struct ixgbe_i2c_req i2c; 740 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 741 if (error) 742 break; 743 if ((i2c.dev_addr != 0xA0) || (i2c.dev_addr != 0xA2)){ 744 error = EINVAL; 745 break; 746 } 747 hw->phy.ops.read_i2c_byte(hw, i2c.offset, 748 i2c.dev_addr, i2c.data); 749 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 750 break; 751 } 752 #endif 753 754 default: 755 error = ether_ioctl(ifp, command, data); 756 break; 757 } 758 return error; 759 } 760 761 #define IXGBE_MHADD_MFS_SHIFT 16 762 763 static void 764 ix_init(void *xsc) 765 { 766 struct ix_softc *sc = xsc; 767 struct ifnet *ifp = &sc->arpcom.ac_if; 768 struct ixgbe_hw *hw = &sc->hw; 769 uint32_t rxpb, frame, size, tmp; 770 uint32_t gpie, rxctrl; 771 int i, error; 772 boolean_t polling; 773 774 ASSERT_IFNET_SERIALIZED_ALL(ifp); 775 776 ix_stop(sc); 777 778 polling = FALSE; 779 #ifdef IFPOLL_ENABLE 780 if (ifp->if_flags & IFF_NPOLLING) 781 polling = TRUE; 782 #endif 783 784 /* Configure # of used RX/TX rings */ 785 ix_set_ring_inuse(sc, polling); 786 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 787 788 /* Get the latest mac address, User can use a LAA */ 789 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 790 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); 791 hw->addr_ctrl.rar_used_count = 1; 792 793 /* Prepare transmit descriptors and buffers */ 794 for (i = 0; i < sc->tx_ring_inuse; ++i) 795 ix_init_tx_ring(&sc->tx_rings[i]); 796 797 ixgbe_init_hw(hw); 798 ix_init_tx_unit(sc); 799 800 /* Setup Multicast table */ 801 ix_set_multi(sc); 802 803 /* Prepare receive descriptors and buffers */ 804 for (i = 0; i < sc->rx_ring_inuse; ++i) { 805 error = ix_init_rx_ring(&sc->rx_rings[i]); 806 if (error) { 807 if_printf(ifp, "Could not initialize RX ring%d\n", i); 808 ix_stop(sc); 809 return; 810 } 811 } 812 813 /* Configure RX settings */ 814 ix_init_rx_unit(sc); 815 816 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 817 818 /* Enable Fan Failure Interrupt */ 819 gpie |= IXGBE_SDP1_GPIEN; 820 821 /* Add for Module detection */ 822 if (hw->mac.type == ixgbe_mac_82599EB) 823 gpie |= IXGBE_SDP2_GPIEN; 824 825 /* Thermal Failure Detection */ 826 if (hw->mac.type == ixgbe_mac_X540) 827 gpie |= IXGBE_SDP0_GPIEN; 828 829 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 830 /* Enable Enhanced MSIX mode */ 831 gpie |= IXGBE_GPIE_MSIX_MODE; 832 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | 833 IXGBE_GPIE_OCD; 834 } 835 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 836 837 /* Set MTU size */ 838 if (ifp->if_mtu > ETHERMTU) { 839 uint32_t mhadd; 840 841 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 842 mhadd &= ~IXGBE_MHADD_MFS_MASK; 843 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 844 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 845 } 846 847 /* 848 * Enable TX rings 849 */ 850 for (i = 0; i < sc->tx_ring_inuse; ++i) { 851 uint32_t txdctl; 852 853 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 854 txdctl |= IXGBE_TXDCTL_ENABLE; 855 856 /* 857 * Set WTHRESH to 0, since TX head write-back is used 858 */ 859 txdctl &= ~(0x7f << 16); 860 861 /* 862 * When the internal queue falls below PTHRESH (32), 863 * start prefetching as long as there are at least 864 * HTHRESH (1) buffers ready. The values are taken 865 * from the Intel linux driver 3.8.21. 866 * Prefetching enables tx line rate even with 1 queue. 867 */ 868 txdctl |= (32 << 0) | (1 << 8); 869 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); 870 } 871 872 /* 873 * Enable RX rings 874 */ 875 for (i = 0; i < sc->rx_ring_inuse; ++i) { 876 uint32_t rxdctl; 877 int k; 878 879 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 880 if (hw->mac.type == ixgbe_mac_82598EB) { 881 /* 882 * PTHRESH = 21 883 * HTHRESH = 4 884 * WTHRESH = 8 885 */ 886 rxdctl &= ~0x3FFFFF; 887 rxdctl |= 0x080420; 888 } 889 rxdctl |= IXGBE_RXDCTL_ENABLE; 890 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); 891 for (k = 0; k < 10; ++k) { 892 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) & 893 IXGBE_RXDCTL_ENABLE) 894 break; 895 else 896 msec_delay(1); 897 } 898 wmb(); 899 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 900 sc->rx_rings[0].rx_ndesc - 1); 901 } 902 903 /* Set up VLAN support and filter */ 904 ix_set_vlan(sc); 905 906 /* Enable Receive engine */ 907 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 908 if (hw->mac.type == ixgbe_mac_82598EB) 909 rxctrl |= IXGBE_RXCTRL_DMBYPS; 910 rxctrl |= IXGBE_RXCTRL_RXEN; 911 ixgbe_enable_rx_dma(hw, rxctrl); 912 913 for (i = 0; i < sc->tx_ring_inuse; ++i) { 914 const struct ix_tx_ring *txr = &sc->tx_rings[i]; 915 916 if (txr->tx_intr_vec >= 0) { 917 ix_set_ivar(sc, i, txr->tx_intr_vec, 1); 918 } else { 919 /* 920 * Unconfigured TX interrupt vector could only 921 * happen for MSI-X. 922 */ 923 KASSERT(sc->intr_type == PCI_INTR_TYPE_MSIX, 924 ("TX intr vector is not set")); 925 KASSERT(i < sc->rx_ring_inuse, 926 ("invalid TX ring %d, no piggyback RX ring", i)); 927 KASSERT(sc->rx_rings[i].rx_txr == txr, 928 ("RX ring %d piggybacked TX ring mismatch", i)); 929 if (bootverbose) 930 if_printf(ifp, "IVAR skips TX ring %d\n", i); 931 } 932 } 933 for (i = 0; i < sc->rx_ring_inuse; ++i) { 934 const struct ix_rx_ring *rxr = &sc->rx_rings[i]; 935 936 KKASSERT(rxr->rx_intr_vec >= 0); 937 ix_set_ivar(sc, i, rxr->rx_intr_vec, 0); 938 if (rxr->rx_txr != NULL) { 939 /* 940 * Piggyback the TX ring interrupt onto the RX 941 * ring interrupt vector. 942 */ 943 KASSERT(rxr->rx_txr->tx_intr_vec < 0, 944 ("piggybacked TX ring configured intr vector")); 945 KASSERT(rxr->rx_txr->tx_idx == i, 946 ("RX ring %d piggybacked TX ring %u", 947 i, rxr->rx_txr->tx_idx)); 948 ix_set_ivar(sc, i, rxr->rx_intr_vec, 1); 949 if (bootverbose) { 950 if_printf(ifp, "IVAR RX ring %d piggybacks " 951 "TX ring %u\n", i, rxr->rx_txr->tx_idx); 952 } 953 } 954 } 955 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 956 /* Set up status MSI-X vector; it is using fixed entry 1 */ 957 ix_set_ivar(sc, 1, sc->sts_msix_vec, -1); 958 959 /* Set up auto-mask for TX and RX rings */ 960 if (hw->mac.type == ixgbe_mac_82598EB) { 961 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EIMS_RTX_QUEUE); 962 } else { 963 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 964 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 965 } 966 } else { 967 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EIMS_RTX_QUEUE); 968 } 969 for (i = 0; i < sc->intr_cnt; ++i) 970 ix_set_eitr(sc, i, sc->intr_data[i].intr_rate); 971 972 /* 973 * Check on any SFP devices that need to be kick-started 974 */ 975 if (hw->phy.type == ixgbe_phy_none) { 976 error = hw->phy.ops.identify(hw); 977 if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 978 if_printf(ifp, 979 "Unsupported SFP+ module type was detected.\n"); 980 /* XXX stop */ 981 return; 982 } 983 } 984 985 /* Config/Enable Link */ 986 ix_config_link(sc); 987 988 /* 989 * Hardware Packet Buffer & Flow Control setup 990 */ 991 frame = sc->max_frame_size; 992 993 /* Calculate High Water */ 994 if (hw->mac.type == ixgbe_mac_X540) 995 tmp = IXGBE_DV_X540(frame, frame); 996 else 997 tmp = IXGBE_DV(frame, frame); 998 size = IXGBE_BT2KB(tmp); 999 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 1000 hw->fc.high_water[0] = rxpb - size; 1001 1002 /* Now calculate Low Water */ 1003 if (hw->mac.type == ixgbe_mac_X540) 1004 tmp = IXGBE_LOW_DV_X540(frame); 1005 else 1006 tmp = IXGBE_LOW_DV(frame); 1007 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 1008 1009 hw->fc.requested_mode = sc->fc; 1010 hw->fc.pause_time = IX_FC_PAUSE; 1011 hw->fc.send_xon = TRUE; 1012 1013 /* Initialize the FC settings */ 1014 ixgbe_start_hw(hw); 1015 1016 /* 1017 * Only enable interrupts if we are not polling, make sure 1018 * they are off otherwise. 1019 */ 1020 if (polling) 1021 ix_disable_intr(sc); 1022 else 1023 ix_enable_intr(sc); 1024 1025 ifp->if_flags |= IFF_RUNNING; 1026 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1027 ifsq_clr_oactive(sc->tx_rings[i].tx_ifsq); 1028 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 1029 } 1030 1031 ix_set_timer_cpuid(sc, polling); 1032 callout_reset_bycpu(&sc->timer, hz, ix_timer, sc, sc->timer_cpuid); 1033 } 1034 1035 static void 1036 ix_intr(void *xsc) 1037 { 1038 struct ix_softc *sc = xsc; 1039 struct ixgbe_hw *hw = &sc->hw; 1040 uint32_t eicr; 1041 1042 ASSERT_SERIALIZED(&sc->main_serialize); 1043 1044 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1045 if (eicr == 0) { 1046 IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask); 1047 return; 1048 } 1049 1050 if (eicr & IX_RX0_INTR_MASK) { 1051 struct ix_rx_ring *rxr = &sc->rx_rings[0]; 1052 1053 lwkt_serialize_enter(&rxr->rx_serialize); 1054 ix_rxeof(rxr, -1); 1055 lwkt_serialize_exit(&rxr->rx_serialize); 1056 } 1057 if (eicr & IX_RX1_INTR_MASK) { 1058 struct ix_rx_ring *rxr; 1059 1060 KKASSERT(sc->rx_ring_inuse == IX_MIN_RXRING_RSS); 1061 rxr = &sc->rx_rings[1]; 1062 1063 lwkt_serialize_enter(&rxr->rx_serialize); 1064 ix_rxeof(rxr, -1); 1065 lwkt_serialize_exit(&rxr->rx_serialize); 1066 } 1067 1068 if (eicr & IX_TX_INTR_MASK) { 1069 struct ix_tx_ring *txr = &sc->tx_rings[0]; 1070 1071 lwkt_serialize_enter(&txr->tx_serialize); 1072 ix_txeof(txr, *(txr->tx_hdr)); 1073 if (!ifsq_is_empty(txr->tx_ifsq)) 1074 ifsq_devstart(txr->tx_ifsq); 1075 lwkt_serialize_exit(&txr->tx_serialize); 1076 } 1077 1078 if (__predict_false(eicr & IX_EICR_STATUS)) 1079 ix_intr_status(sc, eicr); 1080 1081 IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask); 1082 } 1083 1084 static void 1085 ix_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1086 { 1087 struct ix_softc *sc = ifp->if_softc; 1088 1089 ix_update_link_status(sc); 1090 1091 ifmr->ifm_status = IFM_AVALID; 1092 ifmr->ifm_active = IFM_ETHER; 1093 1094 if (!sc->link_active) 1095 return; 1096 1097 ifmr->ifm_status |= IFM_ACTIVE; 1098 1099 switch (sc->link_speed) { 1100 case IXGBE_LINK_SPEED_100_FULL: 1101 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1102 break; 1103 case IXGBE_LINK_SPEED_1GB_FULL: 1104 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1105 break; 1106 case IXGBE_LINK_SPEED_10GB_FULL: 1107 ifmr->ifm_active |= sc->optics | IFM_FDX; 1108 break; 1109 } 1110 } 1111 1112 static int 1113 ix_media_change(struct ifnet *ifp) 1114 { 1115 struct ix_softc *sc = ifp->if_softc; 1116 struct ifmedia *ifm = &sc->media; 1117 1118 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1119 return EINVAL; 1120 1121 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1122 case IFM_AUTO: 1123 sc->hw.phy.autoneg_advertised = 1124 IXGBE_LINK_SPEED_100_FULL | 1125 IXGBE_LINK_SPEED_1GB_FULL | 1126 IXGBE_LINK_SPEED_10GB_FULL; 1127 break; 1128 default: 1129 if_printf(ifp, "Only auto media type\n"); 1130 return EINVAL; 1131 } 1132 return 0; 1133 } 1134 1135 static __inline int 1136 ix_tso_pullup(struct mbuf **mp) 1137 { 1138 int hoff, iphlen, thoff; 1139 struct mbuf *m; 1140 1141 m = *mp; 1142 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1143 1144 iphlen = m->m_pkthdr.csum_iphlen; 1145 thoff = m->m_pkthdr.csum_thlen; 1146 hoff = m->m_pkthdr.csum_lhlen; 1147 1148 KASSERT(iphlen > 0, ("invalid ip hlen")); 1149 KASSERT(thoff > 0, ("invalid tcp hlen")); 1150 KASSERT(hoff > 0, ("invalid ether hlen")); 1151 1152 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1153 m = m_pullup(m, hoff + iphlen + thoff); 1154 if (m == NULL) { 1155 *mp = NULL; 1156 return ENOBUFS; 1157 } 1158 *mp = m; 1159 } 1160 return 0; 1161 } 1162 1163 static int 1164 ix_encap(struct ix_tx_ring *txr, struct mbuf **m_headp, 1165 uint16_t *segs_used, int *idx) 1166 { 1167 uint32_t olinfo_status = 0, cmd_type_len, cmd_rs = 0; 1168 int i, j, error, nsegs, first, maxsegs; 1169 struct mbuf *m_head = *m_headp; 1170 bus_dma_segment_t segs[IX_MAX_SCATTER]; 1171 bus_dmamap_t map; 1172 struct ix_tx_buf *txbuf; 1173 union ixgbe_adv_tx_desc *txd = NULL; 1174 1175 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1176 error = ix_tso_pullup(m_headp); 1177 if (__predict_false(error)) 1178 return error; 1179 m_head = *m_headp; 1180 } 1181 1182 /* Basic descriptor defines */ 1183 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | 1184 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); 1185 1186 if (m_head->m_flags & M_VLANTAG) 1187 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 1188 1189 /* 1190 * Important to capture the first descriptor 1191 * used because it will contain the index of 1192 * the one we tell the hardware to report back 1193 */ 1194 first = txr->tx_next_avail; 1195 txbuf = &txr->tx_buf[first]; 1196 map = txbuf->map; 1197 1198 /* 1199 * Map the packet for DMA. 1200 */ 1201 maxsegs = txr->tx_avail - IX_TX_RESERVED; 1202 if (maxsegs > IX_MAX_SCATTER) 1203 maxsegs = IX_MAX_SCATTER; 1204 1205 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 1206 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1207 if (__predict_false(error)) { 1208 m_freem(*m_headp); 1209 *m_headp = NULL; 1210 return error; 1211 } 1212 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 1213 1214 m_head = *m_headp; 1215 1216 /* 1217 * Set up the appropriate offload context if requested, 1218 * this may consume one TX descriptor. 1219 */ 1220 if (ix_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status)) { 1221 (*segs_used)++; 1222 txr->tx_nsegs++; 1223 } 1224 1225 *segs_used += nsegs; 1226 txr->tx_nsegs += nsegs; 1227 if (txr->tx_nsegs >= txr->tx_intr_nsegs) { 1228 /* 1229 * Report Status (RS) is turned on every intr_nsegs 1230 * descriptors (roughly). 1231 */ 1232 txr->tx_nsegs = 0; 1233 cmd_rs = IXGBE_TXD_CMD_RS; 1234 } 1235 1236 i = txr->tx_next_avail; 1237 for (j = 0; j < nsegs; j++) { 1238 bus_size_t seglen; 1239 bus_addr_t segaddr; 1240 1241 txbuf = &txr->tx_buf[i]; 1242 txd = &txr->tx_base[i]; 1243 seglen = segs[j].ds_len; 1244 segaddr = htole64(segs[j].ds_addr); 1245 1246 txd->read.buffer_addr = segaddr; 1247 txd->read.cmd_type_len = htole32(IXGBE_TXD_CMD_IFCS | 1248 cmd_type_len |seglen); 1249 txd->read.olinfo_status = htole32(olinfo_status); 1250 1251 if (++i == txr->tx_ndesc) 1252 i = 0; 1253 } 1254 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | cmd_rs); 1255 1256 txr->tx_avail -= nsegs; 1257 txr->tx_next_avail = i; 1258 1259 txbuf->m_head = m_head; 1260 txr->tx_buf[first].map = txbuf->map; 1261 txbuf->map = map; 1262 1263 /* 1264 * Defer TDT updating, until enough descrptors are setup 1265 */ 1266 *idx = i; 1267 1268 return 0; 1269 } 1270 1271 static void 1272 ix_set_promisc(struct ix_softc *sc) 1273 { 1274 struct ifnet *ifp = &sc->arpcom.ac_if; 1275 uint32_t reg_rctl; 1276 int mcnt = 0; 1277 1278 reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 1279 reg_rctl &= ~IXGBE_FCTRL_UPE; 1280 if (ifp->if_flags & IFF_ALLMULTI) { 1281 mcnt = IX_MAX_MCASTADDR; 1282 } else { 1283 struct ifmultiaddr *ifma; 1284 1285 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1286 if (ifma->ifma_addr->sa_family != AF_LINK) 1287 continue; 1288 if (mcnt == IX_MAX_MCASTADDR) 1289 break; 1290 mcnt++; 1291 } 1292 } 1293 if (mcnt < IX_MAX_MCASTADDR) 1294 reg_rctl &= ~IXGBE_FCTRL_MPE; 1295 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl); 1296 1297 if (ifp->if_flags & IFF_PROMISC) { 1298 reg_rctl |= IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE; 1299 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl); 1300 } else if (ifp->if_flags & IFF_ALLMULTI) { 1301 reg_rctl |= IXGBE_FCTRL_MPE; 1302 reg_rctl &= ~IXGBE_FCTRL_UPE; 1303 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl); 1304 } 1305 } 1306 1307 static void 1308 ix_set_multi(struct ix_softc *sc) 1309 { 1310 struct ifnet *ifp = &sc->arpcom.ac_if; 1311 struct ifmultiaddr *ifma; 1312 uint32_t fctrl; 1313 uint8_t *mta; 1314 int mcnt = 0; 1315 1316 mta = sc->mta; 1317 bzero(mta, IXGBE_ETH_LENGTH_OF_ADDRESS * IX_MAX_MCASTADDR); 1318 1319 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1320 if (ifma->ifma_addr->sa_family != AF_LINK) 1321 continue; 1322 if (mcnt == IX_MAX_MCASTADDR) 1323 break; 1324 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1325 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1326 IXGBE_ETH_LENGTH_OF_ADDRESS); 1327 mcnt++; 1328 } 1329 1330 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 1331 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1332 if (ifp->if_flags & IFF_PROMISC) { 1333 fctrl |= IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE; 1334 } else if (mcnt >= IX_MAX_MCASTADDR || (ifp->if_flags & IFF_ALLMULTI)) { 1335 fctrl |= IXGBE_FCTRL_MPE; 1336 fctrl &= ~IXGBE_FCTRL_UPE; 1337 } else { 1338 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1339 } 1340 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 1341 1342 if (mcnt < IX_MAX_MCASTADDR) { 1343 ixgbe_update_mc_addr_list(&sc->hw, 1344 mta, mcnt, ix_mc_array_itr, TRUE); 1345 } 1346 } 1347 1348 /* 1349 * This is an iterator function now needed by the multicast 1350 * shared code. It simply feeds the shared code routine the 1351 * addresses in the array of ix_set_multi() one by one. 1352 */ 1353 static uint8_t * 1354 ix_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq) 1355 { 1356 uint8_t *addr = *update_ptr; 1357 uint8_t *newptr; 1358 *vmdq = 0; 1359 1360 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1361 *update_ptr = newptr; 1362 return addr; 1363 } 1364 1365 static void 1366 ix_timer(void *arg) 1367 { 1368 struct ix_softc *sc = arg; 1369 1370 lwkt_serialize_enter(&sc->main_serialize); 1371 1372 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) { 1373 lwkt_serialize_exit(&sc->main_serialize); 1374 return; 1375 } 1376 1377 /* Check for pluggable optics */ 1378 if (sc->sfp_probe) { 1379 if (!ix_sfp_probe(sc)) 1380 goto done; /* Nothing to do */ 1381 } 1382 1383 ix_update_link_status(sc); 1384 ix_update_stats(sc); 1385 1386 done: 1387 callout_reset_bycpu(&sc->timer, hz, ix_timer, sc, sc->timer_cpuid); 1388 lwkt_serialize_exit(&sc->main_serialize); 1389 } 1390 1391 static void 1392 ix_update_link_status(struct ix_softc *sc) 1393 { 1394 struct ifnet *ifp = &sc->arpcom.ac_if; 1395 1396 if (sc->link_up) { 1397 if (sc->link_active == FALSE) { 1398 if (bootverbose) { 1399 if_printf(ifp, "Link is up %d Gbps %s\n", 1400 sc->link_speed == 128 ? 10 : 1, 1401 "Full Duplex"); 1402 } 1403 sc->link_active = TRUE; 1404 1405 /* Update any Flow Control changes */ 1406 ixgbe_fc_enable(&sc->hw); 1407 1408 ifp->if_link_state = LINK_STATE_UP; 1409 if_link_state_change(ifp); 1410 } 1411 } else { /* Link down */ 1412 if (sc->link_active == TRUE) { 1413 if (bootverbose) 1414 if_printf(ifp, "Link is Down\n"); 1415 ifp->if_link_state = LINK_STATE_DOWN; 1416 if_link_state_change(ifp); 1417 1418 sc->link_active = FALSE; 1419 } 1420 } 1421 } 1422 1423 static void 1424 ix_stop(struct ix_softc *sc) 1425 { 1426 struct ixgbe_hw *hw = &sc->hw; 1427 struct ifnet *ifp = &sc->arpcom.ac_if; 1428 int i; 1429 1430 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1431 1432 ix_disable_intr(sc); 1433 callout_stop(&sc->timer); 1434 1435 ifp->if_flags &= ~IFF_RUNNING; 1436 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1437 struct ix_tx_ring *txr = &sc->tx_rings[i]; 1438 1439 ifsq_clr_oactive(txr->tx_ifsq); 1440 ifsq_watchdog_stop(&txr->tx_watchdog); 1441 txr->tx_flags &= ~IX_TXFLAG_ENABLED; 1442 } 1443 1444 ixgbe_reset_hw(hw); 1445 hw->adapter_stopped = FALSE; 1446 ixgbe_stop_adapter(hw); 1447 if (hw->mac.type == ixgbe_mac_82599EB) 1448 ixgbe_stop_mac_link_on_d3_82599(hw); 1449 /* Turn off the laser - noop with no optics */ 1450 ixgbe_disable_tx_laser(hw); 1451 1452 /* Update the stack */ 1453 sc->link_up = FALSE; 1454 ix_update_link_status(sc); 1455 1456 /* Reprogram the RAR[0] in case user changed it. */ 1457 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1458 1459 for (i = 0; i < sc->tx_ring_cnt; ++i) 1460 ix_free_tx_ring(&sc->tx_rings[i]); 1461 1462 for (i = 0; i < sc->rx_ring_cnt; ++i) 1463 ix_free_rx_ring(&sc->rx_rings[i]); 1464 } 1465 1466 static void 1467 ix_setup_optics(struct ix_softc *sc) 1468 { 1469 struct ixgbe_hw *hw = &sc->hw; 1470 int layer; 1471 1472 layer = ixgbe_get_supported_physical_layer(hw); 1473 1474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 1475 sc->optics = IFM_10G_T; 1476 return; 1477 } 1478 1479 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 1480 sc->optics = IFM_1000_T; 1481 return; 1482 } 1483 1484 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 1485 sc->optics = IFM_1000_SX; 1486 return; 1487 } 1488 1489 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | 1490 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { 1491 sc->optics = IFM_10G_LR; 1492 return; 1493 } 1494 1495 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1496 sc->optics = IFM_10G_SR; 1497 return; 1498 } 1499 1500 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { 1501 sc->optics = IFM_10G_TWINAX; 1502 return; 1503 } 1504 1505 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 1506 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { 1507 sc->optics = IFM_10G_CX4; 1508 return; 1509 } 1510 1511 /* If we get here just set the default */ 1512 sc->optics = IFM_ETHER | IFM_AUTO; 1513 } 1514 1515 static void 1516 ix_setup_ifp(struct ix_softc *sc) 1517 { 1518 struct ixgbe_hw *hw = &sc->hw; 1519 struct ifnet *ifp = &sc->arpcom.ac_if; 1520 int i; 1521 1522 ifp->if_baudrate = IF_Gbps(10UL); 1523 1524 ifp->if_softc = sc; 1525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1526 ifp->if_init = ix_init; 1527 ifp->if_ioctl = ix_ioctl; 1528 ifp->if_start = ix_start; 1529 ifp->if_serialize = ix_serialize; 1530 ifp->if_deserialize = ix_deserialize; 1531 ifp->if_tryserialize = ix_tryserialize; 1532 #ifdef INVARIANTS 1533 ifp->if_serialize_assert = ix_serialize_assert; 1534 #endif 1535 #ifdef IFPOLL_ENABLE 1536 ifp->if_npoll = ix_npoll; 1537 #endif 1538 1539 /* Increase TSO burst length */ 1540 ifp->if_tsolen = (8 * ETHERMTU); 1541 1542 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_rings[0].rx_ndesc; 1543 ifp->if_nmbjclusters = ifp->if_nmbclusters; 1544 1545 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].tx_ndesc - 2); 1546 ifq_set_ready(&ifp->if_snd); 1547 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1548 1549 ifp->if_mapsubq = ifq_mapsubq_mask; 1550 ifq_set_subq_mask(&ifp->if_snd, 0); 1551 1552 ether_ifattach(ifp, hw->mac.addr, NULL); 1553 1554 ifp->if_capabilities = 1555 IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1556 if (IX_ENABLE_HWRSS(sc)) 1557 ifp->if_capabilities |= IFCAP_RSS; 1558 ifp->if_capenable = ifp->if_capabilities; 1559 ifp->if_hwassist = CSUM_OFFLOAD | CSUM_TSO; 1560 1561 /* 1562 * Tell the upper layer(s) we support long frames. 1563 */ 1564 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1565 1566 /* Setup TX rings and subqueues */ 1567 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1568 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1569 struct ix_tx_ring *txr = &sc->tx_rings[i]; 1570 1571 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 1572 ifsq_set_priv(ifsq, txr); 1573 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1574 txr->tx_ifsq = ifsq; 1575 1576 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, ix_watchdog); 1577 } 1578 1579 /* 1580 * Specify the media types supported by this adapter and register 1581 * callbacks to update media and link information 1582 */ 1583 ifmedia_add(&sc->media, IFM_ETHER | sc->optics, 0, NULL); 1584 ifmedia_set(&sc->media, IFM_ETHER | sc->optics); 1585 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1586 ifmedia_add(&sc->media, 1587 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1588 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1589 } 1590 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1591 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1592 } 1593 1594 static boolean_t 1595 ix_is_sfp(const struct ixgbe_hw *hw) 1596 { 1597 switch (hw->phy.type) { 1598 case ixgbe_phy_sfp_avago: 1599 case ixgbe_phy_sfp_ftl: 1600 case ixgbe_phy_sfp_intel: 1601 case ixgbe_phy_sfp_unknown: 1602 case ixgbe_phy_sfp_passive_tyco: 1603 case ixgbe_phy_sfp_passive_unknown: 1604 return TRUE; 1605 default: 1606 return FALSE; 1607 } 1608 } 1609 1610 static void 1611 ix_config_link(struct ix_softc *sc) 1612 { 1613 struct ixgbe_hw *hw = &sc->hw; 1614 boolean_t sfp; 1615 1616 sfp = ix_is_sfp(hw); 1617 if (sfp) { 1618 if (hw->phy.multispeed_fiber) { 1619 hw->mac.ops.setup_sfp(hw); 1620 ixgbe_enable_tx_laser(hw); 1621 ix_handle_msf(sc); 1622 } else { 1623 ix_handle_mod(sc); 1624 } 1625 } else { 1626 uint32_t autoneg, err = 0; 1627 1628 if (hw->mac.ops.check_link != NULL) { 1629 err = ixgbe_check_link(hw, &sc->link_speed, 1630 &sc->link_up, FALSE); 1631 if (err) 1632 return; 1633 } 1634 1635 autoneg = hw->phy.autoneg_advertised; 1636 if (!autoneg && hw->mac.ops.get_link_capabilities != NULL) { 1637 bool negotiate; 1638 1639 err = hw->mac.ops.get_link_capabilities(hw, 1640 &autoneg, &negotiate); 1641 if (err) 1642 return; 1643 } 1644 1645 if (hw->mac.ops.setup_link != NULL) { 1646 err = hw->mac.ops.setup_link(hw, 1647 autoneg, sc->link_up); 1648 if (err) 1649 return; 1650 } 1651 } 1652 } 1653 1654 static int 1655 ix_alloc_rings(struct ix_softc *sc) 1656 { 1657 int error, i; 1658 1659 /* 1660 * Create top level busdma tag 1661 */ 1662 error = bus_dma_tag_create(NULL, 1, 0, 1663 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1664 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1665 &sc->parent_tag); 1666 if (error) { 1667 device_printf(sc->dev, "could not create top level DMA tag\n"); 1668 return error; 1669 } 1670 1671 /* 1672 * Allocate TX descriptor rings and buffers 1673 */ 1674 sc->tx_rings = kmalloc_cachealign( 1675 sizeof(struct ix_tx_ring) * sc->tx_ring_cnt, 1676 M_DEVBUF, M_WAITOK | M_ZERO); 1677 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1678 struct ix_tx_ring *txr = &sc->tx_rings[i]; 1679 1680 txr->tx_sc = sc; 1681 txr->tx_idx = i; 1682 txr->tx_intr_vec = -1; 1683 lwkt_serialize_init(&txr->tx_serialize); 1684 1685 error = ix_create_tx_ring(txr); 1686 if (error) 1687 return error; 1688 } 1689 1690 /* 1691 * Allocate RX descriptor rings and buffers 1692 */ 1693 sc->rx_rings = kmalloc_cachealign( 1694 sizeof(struct ix_rx_ring) * sc->rx_ring_cnt, 1695 M_DEVBUF, M_WAITOK | M_ZERO); 1696 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1697 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 1698 1699 rxr->rx_sc = sc; 1700 rxr->rx_idx = i; 1701 rxr->rx_intr_vec = -1; 1702 lwkt_serialize_init(&rxr->rx_serialize); 1703 1704 error = ix_create_rx_ring(rxr); 1705 if (error) 1706 return error; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static int 1713 ix_create_tx_ring(struct ix_tx_ring *txr) 1714 { 1715 int error, i, tsize, ntxd; 1716 1717 /* 1718 * Validate number of transmit descriptors. It must not exceed 1719 * hardware maximum, and must be multiple of IX_DBA_ALIGN. 1720 */ 1721 ntxd = device_getenv_int(txr->tx_sc->dev, "txd", ix_txd); 1722 if (((ntxd * sizeof(union ixgbe_adv_tx_desc)) % IX_DBA_ALIGN) != 0 || 1723 ntxd < IX_MIN_TXD || ntxd > IX_MAX_TXD) { 1724 device_printf(txr->tx_sc->dev, 1725 "Using %d TX descriptors instead of %d!\n", 1726 IX_DEF_TXD, ntxd); 1727 txr->tx_ndesc = IX_DEF_TXD; 1728 } else { 1729 txr->tx_ndesc = ntxd; 1730 } 1731 1732 /* 1733 * Allocate TX head write-back buffer 1734 */ 1735 txr->tx_hdr = bus_dmamem_coherent_any(txr->tx_sc->parent_tag, 1736 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1737 &txr->tx_hdr_dtag, &txr->tx_hdr_map, &txr->tx_hdr_paddr); 1738 if (txr->tx_hdr == NULL) { 1739 device_printf(txr->tx_sc->dev, 1740 "Unable to allocate TX head write-back buffer\n"); 1741 return ENOMEM; 1742 } 1743 1744 /* 1745 * Allocate TX descriptor ring 1746 */ 1747 tsize = roundup2(txr->tx_ndesc * sizeof(union ixgbe_adv_tx_desc), 1748 IX_DBA_ALIGN); 1749 txr->tx_base = bus_dmamem_coherent_any(txr->tx_sc->parent_tag, 1750 IX_DBA_ALIGN, tsize, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1751 &txr->tx_base_dtag, &txr->tx_base_map, &txr->tx_base_paddr); 1752 if (txr->tx_base == NULL) { 1753 device_printf(txr->tx_sc->dev, 1754 "Unable to allocate TX Descriptor memory\n"); 1755 return ENOMEM; 1756 } 1757 1758 tsize = __VM_CACHELINE_ALIGN(sizeof(struct ix_tx_buf) * txr->tx_ndesc); 1759 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1760 1761 /* 1762 * Create DMA tag for TX buffers 1763 */ 1764 error = bus_dma_tag_create(txr->tx_sc->parent_tag, 1765 1, 0, /* alignment, bounds */ 1766 BUS_SPACE_MAXADDR, /* lowaddr */ 1767 BUS_SPACE_MAXADDR, /* highaddr */ 1768 NULL, NULL, /* filter, filterarg */ 1769 IX_TSO_SIZE, /* maxsize */ 1770 IX_MAX_SCATTER, /* nsegments */ 1771 PAGE_SIZE, /* maxsegsize */ 1772 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1773 BUS_DMA_ONEBPAGE, /* flags */ 1774 &txr->tx_tag); 1775 if (error) { 1776 device_printf(txr->tx_sc->dev, 1777 "Unable to allocate TX DMA tag\n"); 1778 kfree(txr->tx_buf, M_DEVBUF); 1779 txr->tx_buf = NULL; 1780 return error; 1781 } 1782 1783 /* 1784 * Create DMA maps for TX buffers 1785 */ 1786 for (i = 0; i < txr->tx_ndesc; ++i) { 1787 struct ix_tx_buf *txbuf = &txr->tx_buf[i]; 1788 1789 error = bus_dmamap_create(txr->tx_tag, 1790 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1791 if (error) { 1792 device_printf(txr->tx_sc->dev, 1793 "Unable to create TX DMA map\n"); 1794 ix_destroy_tx_ring(txr, i); 1795 return error; 1796 } 1797 } 1798 1799 /* 1800 * Initialize various watermark 1801 */ 1802 txr->tx_wreg_nsegs = IX_DEF_TXWREG_NSEGS; 1803 txr->tx_intr_nsegs = txr->tx_ndesc / 16; 1804 1805 return 0; 1806 } 1807 1808 static void 1809 ix_destroy_tx_ring(struct ix_tx_ring *txr, int ndesc) 1810 { 1811 int i; 1812 1813 if (txr->tx_hdr != NULL) { 1814 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_map); 1815 bus_dmamem_free(txr->tx_hdr_dtag, 1816 __DEVOLATILE(void *, txr->tx_hdr), txr->tx_hdr_map); 1817 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1818 txr->tx_hdr = NULL; 1819 } 1820 1821 if (txr->tx_base != NULL) { 1822 bus_dmamap_unload(txr->tx_base_dtag, txr->tx_base_map); 1823 bus_dmamem_free(txr->tx_base_dtag, txr->tx_base, 1824 txr->tx_base_map); 1825 bus_dma_tag_destroy(txr->tx_base_dtag); 1826 txr->tx_base = NULL; 1827 } 1828 1829 if (txr->tx_buf == NULL) 1830 return; 1831 1832 for (i = 0; i < ndesc; ++i) { 1833 struct ix_tx_buf *txbuf = &txr->tx_buf[i]; 1834 1835 KKASSERT(txbuf->m_head == NULL); 1836 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1837 } 1838 bus_dma_tag_destroy(txr->tx_tag); 1839 1840 kfree(txr->tx_buf, M_DEVBUF); 1841 txr->tx_buf = NULL; 1842 } 1843 1844 static void 1845 ix_init_tx_ring(struct ix_tx_ring *txr) 1846 { 1847 /* Clear the old ring contents */ 1848 bzero(txr->tx_base, sizeof(union ixgbe_adv_tx_desc) * txr->tx_ndesc); 1849 1850 /* Clear TX head write-back buffer */ 1851 *(txr->tx_hdr) = 0; 1852 1853 /* Reset indices */ 1854 txr->tx_next_avail = 0; 1855 txr->tx_next_clean = 0; 1856 txr->tx_nsegs = 0; 1857 1858 /* Set number of descriptors available */ 1859 txr->tx_avail = txr->tx_ndesc; 1860 1861 /* Enable this TX ring */ 1862 txr->tx_flags |= IX_TXFLAG_ENABLED; 1863 } 1864 1865 static void 1866 ix_init_tx_unit(struct ix_softc *sc) 1867 { 1868 struct ixgbe_hw *hw = &sc->hw; 1869 int i; 1870 1871 /* 1872 * Setup the Base and Length of the Tx Descriptor Ring 1873 */ 1874 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1875 struct ix_tx_ring *txr = &sc->tx_rings[i]; 1876 uint64_t tdba = txr->tx_base_paddr; 1877 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1878 uint32_t txctrl; 1879 1880 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (uint32_t)tdba); 1881 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (uint32_t)(tdba >> 32)); 1882 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), 1883 txr->tx_ndesc * sizeof(union ixgbe_adv_tx_desc)); 1884 1885 /* Setup the HW Tx Head and Tail descriptor pointers */ 1886 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); 1887 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); 1888 1889 /* Disable TX head write-back relax ordering */ 1890 switch (hw->mac.type) { 1891 case ixgbe_mac_82598EB: 1892 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1893 break; 1894 case ixgbe_mac_82599EB: 1895 case ixgbe_mac_X540: 1896 default: 1897 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 1898 break; 1899 } 1900 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1901 switch (hw->mac.type) { 1902 case ixgbe_mac_82598EB: 1903 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); 1904 break; 1905 case ixgbe_mac_82599EB: 1906 case ixgbe_mac_X540: 1907 default: 1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl); 1909 break; 1910 } 1911 1912 /* Enable TX head write-back */ 1913 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i), 1914 (uint32_t)(hdr_paddr >> 32)); 1915 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i), 1916 ((uint32_t)hdr_paddr) | IXGBE_TDWBAL_HEAD_WB_ENABLE); 1917 } 1918 1919 if (hw->mac.type != ixgbe_mac_82598EB) { 1920 uint32_t dmatxctl, rttdcs; 1921 1922 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1923 dmatxctl |= IXGBE_DMATXCTL_TE; 1924 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 1925 1926 /* Disable arbiter to set MTQC */ 1927 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 1928 rttdcs |= IXGBE_RTTDCS_ARBDIS; 1929 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 1930 1931 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 1932 1933 /* Reenable aribter */ 1934 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 1935 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 1936 } 1937 } 1938 1939 static int 1940 ix_tx_ctx_setup(struct ix_tx_ring *txr, const struct mbuf *mp, 1941 uint32_t *cmd_type_len, uint32_t *olinfo_status) 1942 { 1943 struct ixgbe_adv_tx_context_desc *TXD; 1944 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0; 1945 int ehdrlen, ip_hlen = 0, ctxd; 1946 boolean_t offload = TRUE; 1947 1948 /* First check if TSO is to be used */ 1949 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 1950 return ix_tso_ctx_setup(txr, mp, 1951 cmd_type_len, olinfo_status); 1952 } 1953 1954 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) 1955 offload = FALSE; 1956 1957 /* Indicate the whole packet as payload when not doing TSO */ 1958 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; 1959 1960 /* 1961 * In advanced descriptors the vlan tag must be placed into the 1962 * context descriptor. Hence we need to make one even if not 1963 * doing checksum offloads. 1964 */ 1965 if (mp->m_flags & M_VLANTAG) { 1966 vlan_macip_lens |= htole16(mp->m_pkthdr.ether_vlantag) << 1967 IXGBE_ADVTXD_VLAN_SHIFT; 1968 } else if (!offload) { 1969 /* No TX descriptor is consumed */ 1970 return 0; 1971 } 1972 1973 /* Set the ether header length */ 1974 ehdrlen = mp->m_pkthdr.csum_lhlen; 1975 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 1976 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; 1977 1978 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 1979 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; 1980 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 1981 ip_hlen = mp->m_pkthdr.csum_iphlen; 1982 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 1983 } 1984 vlan_macip_lens |= ip_hlen; 1985 1986 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1987 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 1988 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 1989 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 1990 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; 1991 1992 if (mp->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1993 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; 1994 1995 /* Now ready a context descriptor */ 1996 ctxd = txr->tx_next_avail; 1997 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; 1998 1999 /* Now copy bits into descriptor */ 2000 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2001 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2002 TXD->seqnum_seed = htole32(0); 2003 TXD->mss_l4len_idx = htole32(0); 2004 2005 /* We've consumed the first desc, adjust counters */ 2006 if (++ctxd == txr->tx_ndesc) 2007 ctxd = 0; 2008 txr->tx_next_avail = ctxd; 2009 --txr->tx_avail; 2010 2011 /* One TX descriptor is consumed */ 2012 return 1; 2013 } 2014 2015 static int 2016 ix_tso_ctx_setup(struct ix_tx_ring *txr, const struct mbuf *mp, 2017 uint32_t *cmd_type_len, uint32_t *olinfo_status) 2018 { 2019 struct ixgbe_adv_tx_context_desc *TXD; 2020 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2021 uint32_t mss_l4len_idx = 0, paylen; 2022 int ctxd, ehdrlen, ip_hlen, tcp_hlen; 2023 2024 ehdrlen = mp->m_pkthdr.csum_lhlen; 2025 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 2026 2027 ip_hlen = mp->m_pkthdr.csum_iphlen; 2028 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 2029 2030 tcp_hlen = mp->m_pkthdr.csum_thlen; 2031 KASSERT(tcp_hlen > 0, ("invalid tcp hlen")); 2032 2033 ctxd = txr->tx_next_avail; 2034 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; 2035 2036 if (mp->m_flags & M_VLANTAG) { 2037 vlan_macip_lens |= htole16(mp->m_pkthdr.ether_vlantag) << 2038 IXGBE_ADVTXD_VLAN_SHIFT; 2039 } 2040 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; 2041 vlan_macip_lens |= ip_hlen; 2042 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 2043 2044 /* ADV DTYPE TUCMD */ 2045 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2046 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2047 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2048 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 2049 2050 /* MSS L4LEN IDX */ 2051 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); 2052 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); 2053 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 2054 2055 TXD->seqnum_seed = htole32(0); 2056 2057 if (++ctxd == txr->tx_ndesc) 2058 ctxd = 0; 2059 2060 txr->tx_avail--; 2061 txr->tx_next_avail = ctxd; 2062 2063 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2064 2065 /* This is used in the transmit desc in encap */ 2066 paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; 2067 2068 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; 2069 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; 2070 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; 2071 2072 /* One TX descriptor is consumed */ 2073 return 1; 2074 } 2075 2076 static void 2077 ix_txeof(struct ix_tx_ring *txr, int hdr) 2078 { 2079 int first, avail; 2080 2081 if (txr->tx_avail == txr->tx_ndesc) 2082 return; 2083 2084 first = txr->tx_next_clean; 2085 if (first == hdr) 2086 return; 2087 2088 avail = txr->tx_avail; 2089 while (first != hdr) { 2090 struct ix_tx_buf *txbuf = &txr->tx_buf[first]; 2091 2092 ++avail; 2093 if (txbuf->m_head) { 2094 bus_dmamap_unload(txr->tx_tag, txbuf->map); 2095 m_freem(txbuf->m_head); 2096 txbuf->m_head = NULL; 2097 } 2098 if (++first == txr->tx_ndesc) 2099 first = 0; 2100 } 2101 txr->tx_next_clean = first; 2102 txr->tx_avail = avail; 2103 2104 if (txr->tx_avail > IX_MAX_SCATTER + IX_TX_RESERVED) { 2105 ifsq_clr_oactive(txr->tx_ifsq); 2106 txr->tx_watchdog.wd_timer = 0; 2107 } 2108 } 2109 2110 static int 2111 ix_create_rx_ring(struct ix_rx_ring *rxr) 2112 { 2113 int i, rsize, error, nrxd; 2114 2115 /* 2116 * Validate number of receive descriptors. It must not exceed 2117 * hardware maximum, and must be multiple of IX_DBA_ALIGN. 2118 */ 2119 nrxd = device_getenv_int(rxr->rx_sc->dev, "rxd", ix_rxd); 2120 if (((nrxd * sizeof(union ixgbe_adv_rx_desc)) % IX_DBA_ALIGN) != 0 || 2121 nrxd < IX_MIN_RXD || nrxd > IX_MAX_RXD) { 2122 device_printf(rxr->rx_sc->dev, 2123 "Using %d RX descriptors instead of %d!\n", 2124 IX_DEF_RXD, nrxd); 2125 rxr->rx_ndesc = IX_DEF_RXD; 2126 } else { 2127 rxr->rx_ndesc = nrxd; 2128 } 2129 2130 /* 2131 * Allocate RX descriptor ring 2132 */ 2133 rsize = roundup2(rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc), 2134 IX_DBA_ALIGN); 2135 rxr->rx_base = bus_dmamem_coherent_any(rxr->rx_sc->parent_tag, 2136 IX_DBA_ALIGN, rsize, BUS_DMA_WAITOK | BUS_DMA_ZERO, 2137 &rxr->rx_base_dtag, &rxr->rx_base_map, &rxr->rx_base_paddr); 2138 if (rxr->rx_base == NULL) { 2139 device_printf(rxr->rx_sc->dev, 2140 "Unable to allocate TX Descriptor memory\n"); 2141 return ENOMEM; 2142 } 2143 2144 rsize = __VM_CACHELINE_ALIGN(sizeof(struct ix_rx_buf) * rxr->rx_ndesc); 2145 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2146 2147 /* 2148 * Create DMA tag for RX buffers 2149 */ 2150 error = bus_dma_tag_create(rxr->rx_sc->parent_tag, 2151 1, 0, /* alignment, bounds */ 2152 BUS_SPACE_MAXADDR, /* lowaddr */ 2153 BUS_SPACE_MAXADDR, /* highaddr */ 2154 NULL, NULL, /* filter, filterarg */ 2155 PAGE_SIZE, /* maxsize */ 2156 1, /* nsegments */ 2157 PAGE_SIZE, /* maxsegsize */ 2158 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2159 &rxr->rx_tag); 2160 if (error) { 2161 device_printf(rxr->rx_sc->dev, 2162 "Unable to create RX DMA tag\n"); 2163 kfree(rxr->rx_buf, M_DEVBUF); 2164 rxr->rx_buf = NULL; 2165 return error; 2166 } 2167 2168 /* 2169 * Create spare DMA map for RX buffers 2170 */ 2171 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2172 &rxr->rx_sparemap); 2173 if (error) { 2174 device_printf(rxr->rx_sc->dev, 2175 "Unable to create spare RX DMA map\n"); 2176 bus_dma_tag_destroy(rxr->rx_tag); 2177 kfree(rxr->rx_buf, M_DEVBUF); 2178 rxr->rx_buf = NULL; 2179 return error; 2180 } 2181 2182 /* 2183 * Create DMA maps for RX buffers 2184 */ 2185 for (i = 0; i < rxr->rx_ndesc; ++i) { 2186 struct ix_rx_buf *rxbuf = &rxr->rx_buf[i]; 2187 2188 error = bus_dmamap_create(rxr->rx_tag, 2189 BUS_DMA_WAITOK, &rxbuf->map); 2190 if (error) { 2191 device_printf(rxr->rx_sc->dev, 2192 "Unable to create RX dma map\n"); 2193 ix_destroy_rx_ring(rxr, i); 2194 return error; 2195 } 2196 } 2197 2198 /* 2199 * Initialize various watermark 2200 */ 2201 rxr->rx_wreg_nsegs = IX_DEF_RXWREG_NSEGS; 2202 2203 return 0; 2204 } 2205 2206 static void 2207 ix_destroy_rx_ring(struct ix_rx_ring *rxr, int ndesc) 2208 { 2209 int i; 2210 2211 if (rxr->rx_base != NULL) { 2212 bus_dmamap_unload(rxr->rx_base_dtag, rxr->rx_base_map); 2213 bus_dmamem_free(rxr->rx_base_dtag, rxr->rx_base, 2214 rxr->rx_base_map); 2215 bus_dma_tag_destroy(rxr->rx_base_dtag); 2216 rxr->rx_base = NULL; 2217 } 2218 2219 if (rxr->rx_buf == NULL) 2220 return; 2221 2222 for (i = 0; i < ndesc; ++i) { 2223 struct ix_rx_buf *rxbuf = &rxr->rx_buf[i]; 2224 2225 KKASSERT(rxbuf->m_head == NULL); 2226 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2227 } 2228 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2229 bus_dma_tag_destroy(rxr->rx_tag); 2230 2231 kfree(rxr->rx_buf, M_DEVBUF); 2232 rxr->rx_buf = NULL; 2233 } 2234 2235 /* 2236 ** Used to detect a descriptor that has 2237 ** been merged by Hardware RSC. 2238 */ 2239 static __inline uint32_t 2240 ix_rsc_count(union ixgbe_adv_rx_desc *rx) 2241 { 2242 return (le32toh(rx->wb.lower.lo_dword.data) & 2243 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; 2244 } 2245 2246 #if 0 2247 /********************************************************************* 2248 * 2249 * Initialize Hardware RSC (LRO) feature on 82599 2250 * for an RX ring, this is toggled by the LRO capability 2251 * even though it is transparent to the stack. 2252 * 2253 * NOTE: since this HW feature only works with IPV4 and 2254 * our testing has shown soft LRO to be as effective 2255 * I have decided to disable this by default. 2256 * 2257 **********************************************************************/ 2258 static void 2259 ix_setup_hw_rsc(struct ix_rx_ring *rxr) 2260 { 2261 struct ix_softc *sc = rxr->rx_sc; 2262 struct ixgbe_hw *hw = &sc->hw; 2263 uint32_t rscctrl, rdrxctl; 2264 2265 #if 0 2266 /* If turning LRO/RSC off we need to disable it */ 2267 if ((sc->arpcom.ac_if.if_capenable & IFCAP_LRO) == 0) { 2268 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); 2269 rscctrl &= ~IXGBE_RSCCTL_RSCEN; 2270 return; 2271 } 2272 #endif 2273 2274 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2275 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2276 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 2277 rdrxctl |= IXGBE_RDRXCTL_RSCACKC; 2278 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2279 2280 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); 2281 rscctrl |= IXGBE_RSCCTL_RSCEN; 2282 /* 2283 ** Limit the total number of descriptors that 2284 ** can be combined, so it does not exceed 64K 2285 */ 2286 if (rxr->mbuf_sz == MCLBYTES) 2287 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2288 else if (rxr->mbuf_sz == MJUMPAGESIZE) 2289 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 2290 else if (rxr->mbuf_sz == MJUM9BYTES) 2291 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2292 else /* Using 16K cluster */ 2293 rscctrl |= IXGBE_RSCCTL_MAXDESC_1; 2294 2295 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); 2296 2297 /* Enable TCP header recognition */ 2298 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 2299 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | 2300 IXGBE_PSRTYPE_TCPHDR)); 2301 2302 /* Disable RSC for ACK packets */ 2303 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 2304 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 2305 2306 rxr->hw_rsc = TRUE; 2307 } 2308 #endif 2309 2310 static int 2311 ix_init_rx_ring(struct ix_rx_ring *rxr) 2312 { 2313 int i; 2314 2315 /* Clear the ring contents */ 2316 bzero(rxr->rx_base, rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc)); 2317 2318 /* XXX we need JUMPAGESIZE for RSC too */ 2319 if (rxr->rx_sc->max_frame_size <= MCLBYTES) 2320 rxr->rx_mbuf_sz = MCLBYTES; 2321 else 2322 rxr->rx_mbuf_sz = MJUMPAGESIZE; 2323 2324 /* Now replenish the mbufs */ 2325 for (i = 0; i < rxr->rx_ndesc; ++i) { 2326 int error; 2327 2328 error = ix_newbuf(rxr, i, TRUE); 2329 if (error) 2330 return error; 2331 } 2332 2333 /* Setup our descriptor indices */ 2334 rxr->rx_next_check = 0; 2335 rxr->rx_flags &= ~IX_RXRING_FLAG_DISC; 2336 2337 #if 0 2338 /* 2339 ** Now set up the LRO interface: 2340 */ 2341 if (ixgbe_rsc_enable) 2342 ix_setup_hw_rsc(rxr); 2343 #endif 2344 2345 return 0; 2346 } 2347 2348 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2349 2350 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 2351 2352 static void 2353 ix_init_rx_unit(struct ix_softc *sc) 2354 { 2355 struct ixgbe_hw *hw = &sc->hw; 2356 struct ifnet *ifp = &sc->arpcom.ac_if; 2357 uint32_t bufsz, rxctrl, fctrl, rxcsum, hlreg; 2358 int i; 2359 2360 /* 2361 * Make sure receives are disabled while setting up the descriptor ring 2362 */ 2363 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2364 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 2365 2366 /* Enable broadcasts */ 2367 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2368 fctrl |= IXGBE_FCTRL_BAM; 2369 fctrl |= IXGBE_FCTRL_DPF; 2370 fctrl |= IXGBE_FCTRL_PMCF; 2371 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2372 2373 /* Set for Jumbo Frames? */ 2374 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2375 if (ifp->if_mtu > ETHERMTU) 2376 hlreg |= IXGBE_HLREG0_JUMBOEN; 2377 else 2378 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 2379 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 2380 2381 KKASSERT(sc->rx_rings[0].rx_mbuf_sz >= MCLBYTES); 2382 bufsz = (sc->rx_rings[0].rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2383 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2384 2385 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2386 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 2387 uint64_t rdba = rxr->rx_base_paddr; 2388 uint32_t srrctl; 2389 2390 /* Setup the Base and Length of the Rx Descriptor Ring */ 2391 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (uint32_t)rdba); 2392 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (uint32_t)(rdba >> 32)); 2393 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), 2394 rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc)); 2395 2396 /* 2397 * Set up the SRRCTL register 2398 */ 2399 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 2400 2401 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2402 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2403 srrctl |= bufsz; 2404 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2405 if (sc->rx_ring_inuse > 1) { 2406 /* See the commend near ix_enable_rx_drop() */ 2407 switch (sc->fc) { 2408 case ixgbe_fc_rx_pause: 2409 case ixgbe_fc_tx_pause: 2410 case ixgbe_fc_full: 2411 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 2412 if (i == 0 && bootverbose) { 2413 if_printf(ifp, "flow control %d, " 2414 "disable RX drop\n", sc->fc); 2415 } 2416 break; 2417 2418 case ixgbe_fc_none: 2419 srrctl |= IXGBE_SRRCTL_DROP_EN; 2420 if (i == 0 && bootverbose) { 2421 if_printf(ifp, "flow control %d, " 2422 "enable RX drop\n", sc->fc); 2423 } 2424 break; 2425 2426 default: 2427 break; 2428 } 2429 } 2430 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); 2431 2432 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 2433 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); 2434 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); 2435 } 2436 2437 if (sc->hw.mac.type != ixgbe_mac_82598EB) 2438 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 0); 2439 2440 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2441 2442 /* 2443 * Setup RSS 2444 */ 2445 if (IX_ENABLE_HWRSS(sc)) { 2446 uint8_t key[IX_NRSSRK * IX_RSSRK_SIZE]; 2447 int j, r; 2448 2449 /* 2450 * NOTE: 2451 * When we reach here, RSS has already been disabled 2452 * in ix_stop(), so we could safely configure RSS key 2453 * and redirect table. 2454 */ 2455 2456 /* 2457 * Configure RSS key 2458 */ 2459 toeplitz_get_key(key, sizeof(key)); 2460 for (i = 0; i < IX_NRSSRK; ++i) { 2461 uint32_t rssrk; 2462 2463 rssrk = IX_RSSRK_VAL(key, i); 2464 IX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", 2465 i, rssrk); 2466 2467 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rssrk); 2468 } 2469 2470 /* 2471 * Configure RSS redirect table in following fashion: 2472 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2473 */ 2474 r = 0; 2475 for (j = 0; j < IX_NRETA; ++j) { 2476 uint32_t reta = 0; 2477 2478 for (i = 0; i < IX_RETA_SIZE; ++i) { 2479 uint32_t q; 2480 2481 q = r % sc->rx_ring_inuse; 2482 reta |= q << (8 * i); 2483 ++r; 2484 } 2485 IX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2486 IXGBE_WRITE_REG(hw, IXGBE_RETA(j), reta); 2487 } 2488 2489 /* 2490 * Enable multiple receive queues. 2491 * Enable IPv4 RSS standard hash functions. 2492 */ 2493 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 2494 IXGBE_MRQC_RSSEN | 2495 IXGBE_MRQC_RSS_FIELD_IPV4 | 2496 IXGBE_MRQC_RSS_FIELD_IPV4_TCP); 2497 2498 /* 2499 * NOTE: 2500 * PCSD must be enabled to enable multiple 2501 * receive queues. 2502 */ 2503 rxcsum |= IXGBE_RXCSUM_PCSD; 2504 } 2505 2506 if (ifp->if_capenable & IFCAP_RXCSUM) 2507 rxcsum |= IXGBE_RXCSUM_PCSD; 2508 2509 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2510 } 2511 2512 static __inline void 2513 ix_rx_refresh(struct ix_rx_ring *rxr, int i) 2514 { 2515 if (--i < 0) 2516 i = rxr->rx_ndesc - 1; 2517 IXGBE_WRITE_REG(&rxr->rx_sc->hw, IXGBE_RDT(rxr->rx_idx), i); 2518 } 2519 2520 static __inline void 2521 ix_rxcsum(uint32_t staterr, struct mbuf *mp, uint32_t ptype) 2522 { 2523 if ((ptype & 2524 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_IPV4_EX)) == 0) { 2525 /* Not IPv4 */ 2526 return; 2527 } 2528 2529 if ((staterr & (IXGBE_RXD_STAT_IPCS | IXGBE_RXDADV_ERR_IPE)) == 2530 IXGBE_RXD_STAT_IPCS) 2531 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 2532 2533 if ((ptype & 2534 (IXGBE_RXDADV_PKTTYPE_TCP | IXGBE_RXDADV_PKTTYPE_UDP)) == 0) { 2535 /* 2536 * - Neither TCP nor UDP 2537 * - IPv4 fragment 2538 */ 2539 return; 2540 } 2541 2542 if ((staterr & (IXGBE_RXD_STAT_L4CS | IXGBE_RXDADV_ERR_TCPE)) == 2543 IXGBE_RXD_STAT_L4CS) { 2544 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR | 2545 CSUM_FRAG_NOT_CHECKED; 2546 mp->m_pkthdr.csum_data = htons(0xffff); 2547 } 2548 } 2549 2550 static __inline struct pktinfo * 2551 ix_rssinfo(struct mbuf *m, struct pktinfo *pi, 2552 uint32_t hash, uint32_t hashtype, uint32_t ptype) 2553 { 2554 switch (hashtype) { 2555 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: 2556 pi->pi_netisr = NETISR_IP; 2557 pi->pi_flags = 0; 2558 pi->pi_l3proto = IPPROTO_TCP; 2559 break; 2560 2561 case IXGBE_RXDADV_RSSTYPE_IPV4: 2562 if ((ptype & IXGBE_RXDADV_PKTTYPE_UDP) == 0) { 2563 /* Not UDP or is fragment */ 2564 return NULL; 2565 } 2566 pi->pi_netisr = NETISR_IP; 2567 pi->pi_flags = 0; 2568 pi->pi_l3proto = IPPROTO_UDP; 2569 break; 2570 2571 default: 2572 return NULL; 2573 } 2574 2575 m->m_flags |= M_HASH; 2576 m->m_pkthdr.hash = toeplitz_hash(hash); 2577 return pi; 2578 } 2579 2580 static __inline void 2581 ix_setup_rxdesc(union ixgbe_adv_rx_desc *rxd, const struct ix_rx_buf *rxbuf) 2582 { 2583 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2584 rxd->wb.upper.status_error = 0; 2585 } 2586 2587 static void 2588 ix_rx_discard(struct ix_rx_ring *rxr, int i, boolean_t eop) 2589 { 2590 struct ix_rx_buf *rxbuf = &rxr->rx_buf[i]; 2591 2592 /* 2593 * XXX discard may not be correct 2594 */ 2595 if (eop) { 2596 IFNET_STAT_INC(&rxr->rx_sc->arpcom.ac_if, ierrors, 1); 2597 rxr->rx_flags &= ~IX_RXRING_FLAG_DISC; 2598 } else { 2599 rxr->rx_flags |= IX_RXRING_FLAG_DISC; 2600 } 2601 if (rxbuf->fmp != NULL) { 2602 m_freem(rxbuf->fmp); 2603 rxbuf->fmp = NULL; 2604 rxbuf->lmp = NULL; 2605 } 2606 ix_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2607 } 2608 2609 static void 2610 ix_rxeof(struct ix_rx_ring *rxr, int count) 2611 { 2612 struct ifnet *ifp = &rxr->rx_sc->arpcom.ac_if; 2613 int i, nsegs = 0, cpuid = mycpuid; 2614 2615 i = rxr->rx_next_check; 2616 while (count != 0) { 2617 struct ix_rx_buf *rxbuf, *nbuf = NULL; 2618 union ixgbe_adv_rx_desc *cur; 2619 struct mbuf *sendmp = NULL, *mp; 2620 struct pktinfo *pi = NULL, pi0; 2621 uint32_t rsc = 0, ptype, staterr, hash, hashtype; 2622 uint16_t len; 2623 boolean_t eop; 2624 2625 cur = &rxr->rx_base[i]; 2626 staterr = le32toh(cur->wb.upper.status_error); 2627 2628 if ((staterr & IXGBE_RXD_STAT_DD) == 0) 2629 break; 2630 ++nsegs; 2631 2632 rxbuf = &rxr->rx_buf[i]; 2633 mp = rxbuf->m_head; 2634 2635 len = le16toh(cur->wb.upper.length); 2636 ptype = le32toh(cur->wb.lower.lo_dword.data) & 2637 IXGBE_RXDADV_PKTTYPE_MASK; 2638 hash = le32toh(cur->wb.lower.hi_dword.rss); 2639 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2640 IXGBE_RXDADV_RSSTYPE_MASK; 2641 2642 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); 2643 if (eop) 2644 --count; 2645 2646 /* 2647 * Make sure bad packets are discarded 2648 */ 2649 if ((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) || 2650 (rxr->rx_flags & IX_RXRING_FLAG_DISC)) { 2651 ix_rx_discard(rxr, i, eop); 2652 goto next_desc; 2653 } 2654 2655 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, BUS_DMASYNC_POSTREAD); 2656 if (ix_newbuf(rxr, i, FALSE) != 0) { 2657 ix_rx_discard(rxr, i, eop); 2658 goto next_desc; 2659 } 2660 2661 /* 2662 * On 82599 which supports a hardware LRO, packets 2663 * need not be fragmented across sequential descriptors, 2664 * rather the next descriptor is indicated in bits 2665 * of the descriptor. This also means that we might 2666 * proceses more than one packet at a time, something 2667 * that has never been true before, it required 2668 * eliminating global chain pointers in favor of what 2669 * we are doing here. 2670 */ 2671 if (!eop) { 2672 int nextp; 2673 2674 /* 2675 * Figure out the next descriptor 2676 * of this frame. 2677 */ 2678 if (rxr->rx_flags & IX_RXRING_FLAG_LRO) 2679 rsc = ix_rsc_count(cur); 2680 if (rsc) { /* Get hardware index */ 2681 nextp = ((staterr & 2682 IXGBE_RXDADV_NEXTP_MASK) >> 2683 IXGBE_RXDADV_NEXTP_SHIFT); 2684 } else { /* Just sequential */ 2685 nextp = i + 1; 2686 if (nextp == rxr->rx_ndesc) 2687 nextp = 0; 2688 } 2689 nbuf = &rxr->rx_buf[nextp]; 2690 prefetch(nbuf); 2691 } 2692 mp->m_len = len; 2693 2694 /* 2695 * Rather than using the fmp/lmp global pointers 2696 * we now keep the head of a packet chain in the 2697 * buffer struct and pass this along from one 2698 * descriptor to the next, until we get EOP. 2699 */ 2700 if (rxbuf->fmp == NULL) { 2701 mp->m_pkthdr.len = len; 2702 rxbuf->fmp = mp; 2703 rxbuf->lmp = mp; 2704 } else { 2705 rxbuf->fmp->m_pkthdr.len += len; 2706 rxbuf->lmp->m_next = mp; 2707 rxbuf->lmp = mp; 2708 } 2709 2710 if (nbuf != NULL) { 2711 /* 2712 * Not the last fragment of this frame, 2713 * pass this fragment list on 2714 */ 2715 nbuf->fmp = rxbuf->fmp; 2716 nbuf->lmp = rxbuf->lmp; 2717 } else { 2718 /* 2719 * Send this frame 2720 */ 2721 sendmp = rxbuf->fmp; 2722 2723 sendmp->m_pkthdr.rcvif = ifp; 2724 IFNET_STAT_INC(ifp, ipackets, 1); 2725 #ifdef IX_RSS_DEBUG 2726 rxr->rx_pkts++; 2727 #endif 2728 2729 /* Process vlan info */ 2730 if (staterr & IXGBE_RXD_STAT_VP) { 2731 sendmp->m_pkthdr.ether_vlantag = 2732 le16toh(cur->wb.upper.vlan); 2733 sendmp->m_flags |= M_VLANTAG; 2734 } 2735 if (ifp->if_capenable & IFCAP_RXCSUM) 2736 ix_rxcsum(staterr, sendmp, ptype); 2737 if (ifp->if_capenable & IFCAP_RSS) { 2738 pi = ix_rssinfo(sendmp, &pi0, 2739 hash, hashtype, ptype); 2740 } 2741 } 2742 rxbuf->fmp = NULL; 2743 rxbuf->lmp = NULL; 2744 next_desc: 2745 /* Advance our pointers to the next descriptor. */ 2746 if (++i == rxr->rx_ndesc) 2747 i = 0; 2748 2749 if (sendmp != NULL) 2750 ifp->if_input(ifp, sendmp, pi, cpuid); 2751 2752 if (nsegs >= rxr->rx_wreg_nsegs) { 2753 ix_rx_refresh(rxr, i); 2754 nsegs = 0; 2755 } 2756 } 2757 rxr->rx_next_check = i; 2758 2759 if (nsegs > 0) 2760 ix_rx_refresh(rxr, i); 2761 } 2762 2763 static void 2764 ix_set_vlan(struct ix_softc *sc) 2765 { 2766 struct ixgbe_hw *hw = &sc->hw; 2767 uint32_t ctrl; 2768 2769 if (hw->mac.type == ixgbe_mac_82598EB) { 2770 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2771 ctrl |= IXGBE_VLNCTRL_VME; 2772 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2773 } else { 2774 int i; 2775 2776 /* 2777 * On 82599 and later chips the VLAN enable is 2778 * per queue in RXDCTL 2779 */ 2780 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2781 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 2782 ctrl |= IXGBE_RXDCTL_VME; 2783 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); 2784 } 2785 } 2786 } 2787 2788 static void 2789 ix_enable_intr(struct ix_softc *sc) 2790 { 2791 struct ixgbe_hw *hw = &sc->hw; 2792 uint32_t fwsm; 2793 int i; 2794 2795 for (i = 0; i < sc->intr_cnt; ++i) 2796 lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize); 2797 2798 sc->intr_mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2799 2800 /* Enable Fan Failure detection */ 2801 if (hw->device_id == IXGBE_DEV_ID_82598AT) 2802 sc->intr_mask |= IXGBE_EIMS_GPI_SDP1; 2803 2804 switch (sc->hw.mac.type) { 2805 case ixgbe_mac_82599EB: 2806 sc->intr_mask |= IXGBE_EIMS_ECC; 2807 sc->intr_mask |= IXGBE_EIMS_GPI_SDP0; 2808 sc->intr_mask |= IXGBE_EIMS_GPI_SDP1; 2809 sc->intr_mask |= IXGBE_EIMS_GPI_SDP2; 2810 break; 2811 2812 case ixgbe_mac_X540: 2813 sc->intr_mask |= IXGBE_EIMS_ECC; 2814 /* Detect if Thermal Sensor is enabled */ 2815 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 2816 if (fwsm & IXGBE_FWSM_TS_ENABLED) 2817 sc->intr_mask |= IXGBE_EIMS_TS; 2818 /* FALL THROUGH */ 2819 default: 2820 break; 2821 } 2822 2823 /* With MSI-X we use auto clear for RX and TX rings */ 2824 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 2825 /* 2826 * There are no EIAC1/EIAC2 for newer chips; the related 2827 * bits for TX and RX rings > 16 are always auto clear. 2828 * 2829 * XXX which bits? There are _no_ documented EICR1 and 2830 * EICR2 at all; only EICR. 2831 */ 2832 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE); 2833 } else { 2834 sc->intr_mask |= IX_TX_INTR_MASK | IX_RX0_INTR_MASK; 2835 2836 KKASSERT(sc->rx_ring_inuse <= IX_MIN_RXRING_RSS); 2837 if (sc->rx_ring_inuse == IX_MIN_RXRING_RSS) 2838 sc->intr_mask |= IX_RX1_INTR_MASK; 2839 } 2840 2841 IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask); 2842 2843 /* 2844 * Enable RX and TX rings for MSI-X 2845 */ 2846 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 2847 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2848 const struct ix_tx_ring *txr = &sc->tx_rings[i]; 2849 2850 if (txr->tx_intr_vec >= 0) { 2851 IXGBE_WRITE_REG(hw, txr->tx_eims, 2852 txr->tx_eims_val); 2853 } 2854 } 2855 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2856 const struct ix_rx_ring *rxr = &sc->rx_rings[i]; 2857 2858 KKASSERT(rxr->rx_intr_vec >= 0); 2859 IXGBE_WRITE_REG(hw, rxr->rx_eims, rxr->rx_eims_val); 2860 } 2861 } 2862 2863 IXGBE_WRITE_FLUSH(hw); 2864 } 2865 2866 static void 2867 ix_disable_intr(struct ix_softc *sc) 2868 { 2869 int i; 2870 2871 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2872 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 2873 2874 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2875 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 2876 } else { 2877 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 2878 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 2879 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 2880 } 2881 IXGBE_WRITE_FLUSH(&sc->hw); 2882 2883 for (i = 0; i < sc->intr_cnt; ++i) 2884 lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize); 2885 } 2886 2887 uint16_t 2888 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg) 2889 { 2890 return pci_read_config(((struct ixgbe_osdep *)hw->back)->dev, 2891 reg, 2); 2892 } 2893 2894 void 2895 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value) 2896 { 2897 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev, 2898 reg, value, 2); 2899 } 2900 2901 static void 2902 ix_slot_info(struct ix_softc *sc) 2903 { 2904 struct ixgbe_hw *hw = &sc->hw; 2905 device_t dev = sc->dev; 2906 struct ixgbe_mac_info *mac = &hw->mac; 2907 uint16_t link; 2908 uint32_t offset; 2909 2910 /* For most devices simply call the shared code routine */ 2911 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { 2912 ixgbe_get_bus_info(hw); 2913 goto display; 2914 } 2915 2916 /* 2917 * For the Quad port adapter we need to parse back 2918 * up the PCI tree to find the speed of the expansion 2919 * slot into which this adapter is plugged. A bit more work. 2920 */ 2921 dev = device_get_parent(device_get_parent(dev)); 2922 #ifdef IXGBE_DEBUG 2923 device_printf(dev, "parent pcib = %x,%x,%x\n", 2924 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 2925 #endif 2926 dev = device_get_parent(device_get_parent(dev)); 2927 #ifdef IXGBE_DEBUG 2928 device_printf(dev, "slot pcib = %x,%x,%x\n", 2929 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 2930 #endif 2931 /* Now get the PCI Express Capabilities offset */ 2932 offset = pci_get_pciecap_ptr(dev); 2933 /* ...and read the Link Status Register */ 2934 link = pci_read_config(dev, offset + PCIER_LINKSTAT, 2); 2935 switch (link & IXGBE_PCI_LINK_WIDTH) { 2936 case IXGBE_PCI_LINK_WIDTH_1: 2937 hw->bus.width = ixgbe_bus_width_pcie_x1; 2938 break; 2939 case IXGBE_PCI_LINK_WIDTH_2: 2940 hw->bus.width = ixgbe_bus_width_pcie_x2; 2941 break; 2942 case IXGBE_PCI_LINK_WIDTH_4: 2943 hw->bus.width = ixgbe_bus_width_pcie_x4; 2944 break; 2945 case IXGBE_PCI_LINK_WIDTH_8: 2946 hw->bus.width = ixgbe_bus_width_pcie_x8; 2947 break; 2948 default: 2949 hw->bus.width = ixgbe_bus_width_unknown; 2950 break; 2951 } 2952 2953 switch (link & IXGBE_PCI_LINK_SPEED) { 2954 case IXGBE_PCI_LINK_SPEED_2500: 2955 hw->bus.speed = ixgbe_bus_speed_2500; 2956 break; 2957 case IXGBE_PCI_LINK_SPEED_5000: 2958 hw->bus.speed = ixgbe_bus_speed_5000; 2959 break; 2960 case IXGBE_PCI_LINK_SPEED_8000: 2961 hw->bus.speed = ixgbe_bus_speed_8000; 2962 break; 2963 default: 2964 hw->bus.speed = ixgbe_bus_speed_unknown; 2965 break; 2966 } 2967 2968 mac->ops.set_lan_id(hw); 2969 2970 display: 2971 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2972 hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" : 2973 hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : 2974 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : "Unknown", 2975 hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : 2976 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : 2977 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : "Unknown"); 2978 2979 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP && 2980 hw->bus.width <= ixgbe_bus_width_pcie_x4 && 2981 hw->bus.speed == ixgbe_bus_speed_2500) { 2982 device_printf(dev, "For optimal performance a x8 " 2983 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 2984 } else if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP && 2985 hw->bus.width <= ixgbe_bus_width_pcie_x8 && 2986 hw->bus.speed < ixgbe_bus_speed_8000) { 2987 device_printf(dev, "For optimal performance a x8 " 2988 "PCIE Gen3 slot is required.\n"); 2989 } 2990 } 2991 2992 /* 2993 * TODO comment is incorrect 2994 * 2995 * Setup the correct IVAR register for a particular MSIX interrupt 2996 * - entry is the register array entry 2997 * - vector is the MSIX vector for this queue 2998 * - type is RX/TX/MISC 2999 */ 3000 static void 3001 ix_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, 3002 int8_t type) 3003 { 3004 struct ixgbe_hw *hw = &sc->hw; 3005 uint32_t ivar, index; 3006 3007 vector |= IXGBE_IVAR_ALLOC_VAL; 3008 3009 switch (hw->mac.type) { 3010 case ixgbe_mac_82598EB: 3011 if (type == -1) 3012 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3013 else 3014 entry += (type * 64); 3015 index = (entry >> 2) & 0x1F; 3016 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3017 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3018 ivar |= (vector << (8 * (entry & 0x3))); 3019 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3020 break; 3021 3022 case ixgbe_mac_82599EB: 3023 case ixgbe_mac_X540: 3024 if (type == -1) { /* MISC IVAR */ 3025 index = (entry & 1) * 8; 3026 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3027 ivar &= ~(0xFF << index); 3028 ivar |= (vector << index); 3029 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3030 } else { /* RX/TX IVARS */ 3031 index = (16 * (entry & 1)) + (8 * type); 3032 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3033 ivar &= ~(0xFF << index); 3034 ivar |= (vector << index); 3035 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3036 } 3037 3038 default: 3039 break; 3040 } 3041 } 3042 3043 static boolean_t 3044 ix_sfp_probe(struct ix_softc *sc) 3045 { 3046 struct ixgbe_hw *hw = &sc->hw; 3047 3048 if (hw->phy.type == ixgbe_phy_nl && 3049 hw->phy.sfp_type == ixgbe_sfp_type_not_present) { 3050 int32_t ret; 3051 3052 ret = hw->phy.ops.identify_sfp(hw); 3053 if (ret) 3054 return FALSE; 3055 3056 ret = hw->phy.ops.reset(hw); 3057 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3058 if_printf(&sc->arpcom.ac_if, 3059 "Unsupported SFP+ module detected! " 3060 "Reload driver with supported module.\n"); 3061 sc->sfp_probe = FALSE; 3062 return FALSE; 3063 } 3064 if_printf(&sc->arpcom.ac_if, "SFP+ module detected!\n"); 3065 3066 /* We now have supported optics */ 3067 sc->sfp_probe = FALSE; 3068 /* Set the optics type so system reports correctly */ 3069 ix_setup_optics(sc); 3070 3071 return TRUE; 3072 } 3073 return FALSE; 3074 } 3075 3076 static void 3077 ix_handle_link(struct ix_softc *sc) 3078 { 3079 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3080 ix_update_link_status(sc); 3081 } 3082 3083 /* 3084 * Handling SFP module 3085 */ 3086 static void 3087 ix_handle_mod(struct ix_softc *sc) 3088 { 3089 struct ixgbe_hw *hw = &sc->hw; 3090 uint32_t err; 3091 3092 err = hw->phy.ops.identify_sfp(hw); 3093 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3094 if_printf(&sc->arpcom.ac_if, 3095 "Unsupported SFP+ module type was detected.\n"); 3096 return; 3097 } 3098 err = hw->mac.ops.setup_sfp(hw); 3099 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3100 if_printf(&sc->arpcom.ac_if, 3101 "Setup failure - unsupported SFP+ module type.\n"); 3102 return; 3103 } 3104 ix_handle_msf(sc); 3105 } 3106 3107 /* 3108 * Handling MSF (multispeed fiber) 3109 */ 3110 static void 3111 ix_handle_msf(struct ix_softc *sc) 3112 { 3113 struct ixgbe_hw *hw = &sc->hw; 3114 uint32_t autoneg; 3115 3116 autoneg = hw->phy.autoneg_advertised; 3117 if (!autoneg && hw->mac.ops.get_link_capabilities != NULL) { 3118 bool negotiate; 3119 3120 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3121 } 3122 if (hw->mac.ops.setup_link != NULL) 3123 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3124 } 3125 3126 static void 3127 ix_update_stats(struct ix_softc *sc) 3128 { 3129 struct ifnet *ifp = &sc->arpcom.ac_if; 3130 struct ixgbe_hw *hw = &sc->hw; 3131 uint32_t missed_rx = 0, bprc, lxon, lxoff, total; 3132 uint64_t total_missed_rx = 0; 3133 int i; 3134 3135 sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3136 sc->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3137 sc->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3138 sc->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3139 3140 /* 3141 * Note: These are for the 8 possible traffic classes, which 3142 * in current implementation is unused, therefore only 0 should 3143 * read real data. 3144 */ 3145 for (i = 0; i < 8; i++) { 3146 uint32_t mp; 3147 3148 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3149 /* missed_rx tallies misses for the gprc workaround */ 3150 missed_rx += mp; 3151 /* global total per queue */ 3152 sc->stats.mpc[i] += mp; 3153 3154 /* Running comprehensive total for stats display */ 3155 total_missed_rx += sc->stats.mpc[i]; 3156 3157 if (hw->mac.type == ixgbe_mac_82598EB) { 3158 sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3159 sc->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 3160 sc->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 3161 sc->stats.pxonrxc[i] += 3162 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3163 } else { 3164 sc->stats.pxonrxc[i] += 3165 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3166 } 3167 sc->stats.pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3168 sc->stats.pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3169 sc->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3170 sc->stats.pxon2offc[i] += 3171 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3172 } 3173 for (i = 0; i < 16; i++) { 3174 sc->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3175 sc->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3176 sc->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3177 } 3178 sc->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3179 sc->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3180 sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3181 3182 /* Hardware workaround, gprc counts missed packets */ 3183 sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3184 sc->stats.gprc -= missed_rx; 3185 3186 if (hw->mac.type != ixgbe_mac_82598EB) { 3187 sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 3188 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3189 sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 3190 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3191 sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 3192 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3193 sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3194 sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3195 } else { 3196 sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3197 sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3198 /* 82598 only has a counter in the high register */ 3199 sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3200 sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3201 sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3202 } 3203 3204 /* 3205 * Workaround: mprc hardware is incorrectly counting 3206 * broadcasts, so for now we subtract those. 3207 */ 3208 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3209 sc->stats.bprc += bprc; 3210 sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3211 if (hw->mac.type == ixgbe_mac_82598EB) 3212 sc->stats.mprc -= bprc; 3213 3214 sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3215 sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3216 sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3217 sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3218 sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3219 sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3220 3221 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3222 sc->stats.lxontxc += lxon; 3223 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3224 sc->stats.lxofftxc += lxoff; 3225 total = lxon + lxoff; 3226 3227 sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 3228 sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3229 sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3230 sc->stats.gptc -= total; 3231 sc->stats.mptc -= total; 3232 sc->stats.ptc64 -= total; 3233 sc->stats.gotc -= total * ETHER_MIN_LEN; 3234 3235 sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3236 sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3237 sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3238 sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3239 sc->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3240 sc->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3241 sc->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3242 sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3243 sc->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3244 sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3245 sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3246 sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3247 sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3248 sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3249 sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3250 sc->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3251 sc->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3252 sc->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3253 /* Only read FCOE on 82599 */ 3254 if (hw->mac.type != ixgbe_mac_82598EB) { 3255 sc->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3256 sc->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3257 sc->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3258 sc->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3259 sc->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3260 } 3261 3262 /* Rx Errors */ 3263 IFNET_STAT_SET(ifp, iqdrops, total_missed_rx); 3264 IFNET_STAT_SET(ifp, ierrors, sc->stats.crcerrs + sc->stats.rlec); 3265 } 3266 3267 #if 0 3268 /* 3269 * Add sysctl variables, one per statistic, to the system. 3270 */ 3271 static void 3272 ix_add_hw_stats(struct ix_softc *sc) 3273 { 3274 3275 device_t dev = sc->dev; 3276 3277 struct ix_tx_ring *txr = sc->tx_rings; 3278 struct ix_rx_ring *rxr = sc->rx_rings; 3279 3280 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 3281 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 3282 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 3283 struct ixgbe_hw_stats *stats = &sc->stats; 3284 3285 struct sysctl_oid *stat_node, *queue_node; 3286 struct sysctl_oid_list *stat_list, *queue_list; 3287 3288 #define QUEUE_NAME_LEN 32 3289 char namebuf[QUEUE_NAME_LEN]; 3290 3291 /* MAC stats get the own sub node */ 3292 3293 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 3294 CTLFLAG_RD, NULL, "MAC Statistics"); 3295 stat_list = SYSCTL_CHILDREN(stat_node); 3296 3297 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 3298 CTLFLAG_RD, &stats->crcerrs, 3299 "CRC Errors"); 3300 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 3301 CTLFLAG_RD, &stats->illerrc, 3302 "Illegal Byte Errors"); 3303 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 3304 CTLFLAG_RD, &stats->errbc, 3305 "Byte Errors"); 3306 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 3307 CTLFLAG_RD, &stats->mspdc, 3308 "MAC Short Packets Discarded"); 3309 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 3310 CTLFLAG_RD, &stats->mlfc, 3311 "MAC Local Faults"); 3312 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 3313 CTLFLAG_RD, &stats->mrfc, 3314 "MAC Remote Faults"); 3315 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 3316 CTLFLAG_RD, &stats->rlec, 3317 "Receive Length Errors"); 3318 3319 /* Flow Control stats */ 3320 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 3321 CTLFLAG_RD, &stats->lxontxc, 3322 "Link XON Transmitted"); 3323 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 3324 CTLFLAG_RD, &stats->lxonrxc, 3325 "Link XON Received"); 3326 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 3327 CTLFLAG_RD, &stats->lxofftxc, 3328 "Link XOFF Transmitted"); 3329 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 3330 CTLFLAG_RD, &stats->lxoffrxc, 3331 "Link XOFF Received"); 3332 3333 /* Packet Reception Stats */ 3334 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 3335 CTLFLAG_RD, &stats->tor, 3336 "Total Octets Received"); 3337 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 3338 CTLFLAG_RD, &stats->gorc, 3339 "Good Octets Received"); 3340 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 3341 CTLFLAG_RD, &stats->tpr, 3342 "Total Packets Received"); 3343 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 3344 CTLFLAG_RD, &stats->gprc, 3345 "Good Packets Received"); 3346 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 3347 CTLFLAG_RD, &stats->mprc, 3348 "Multicast Packets Received"); 3349 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 3350 CTLFLAG_RD, &stats->bprc, 3351 "Broadcast Packets Received"); 3352 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 3353 CTLFLAG_RD, &stats->prc64, 3354 "64 byte frames received "); 3355 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 3356 CTLFLAG_RD, &stats->prc127, 3357 "65-127 byte frames received"); 3358 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 3359 CTLFLAG_RD, &stats->prc255, 3360 "128-255 byte frames received"); 3361 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 3362 CTLFLAG_RD, &stats->prc511, 3363 "256-511 byte frames received"); 3364 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 3365 CTLFLAG_RD, &stats->prc1023, 3366 "512-1023 byte frames received"); 3367 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 3368 CTLFLAG_RD, &stats->prc1522, 3369 "1023-1522 byte frames received"); 3370 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 3371 CTLFLAG_RD, &stats->ruc, 3372 "Receive Undersized"); 3373 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 3374 CTLFLAG_RD, &stats->rfc, 3375 "Fragmented Packets Received "); 3376 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 3377 CTLFLAG_RD, &stats->roc, 3378 "Oversized Packets Received"); 3379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 3380 CTLFLAG_RD, &stats->rjc, 3381 "Received Jabber"); 3382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 3383 CTLFLAG_RD, &stats->mngprc, 3384 "Management Packets Received"); 3385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 3386 CTLFLAG_RD, &stats->mngptc, 3387 "Management Packets Dropped"); 3388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 3389 CTLFLAG_RD, &stats->xec, 3390 "Checksum Errors"); 3391 3392 /* Packet Transmission Stats */ 3393 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 3394 CTLFLAG_RD, &stats->gotc, 3395 "Good Octets Transmitted"); 3396 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 3397 CTLFLAG_RD, &stats->tpt, 3398 "Total Packets Transmitted"); 3399 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 3400 CTLFLAG_RD, &stats->gptc, 3401 "Good Packets Transmitted"); 3402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 3403 CTLFLAG_RD, &stats->bptc, 3404 "Broadcast Packets Transmitted"); 3405 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 3406 CTLFLAG_RD, &stats->mptc, 3407 "Multicast Packets Transmitted"); 3408 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 3409 CTLFLAG_RD, &stats->mngptc, 3410 "Management Packets Transmitted"); 3411 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 3412 CTLFLAG_RD, &stats->ptc64, 3413 "64 byte frames transmitted "); 3414 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 3415 CTLFLAG_RD, &stats->ptc127, 3416 "65-127 byte frames transmitted"); 3417 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 3418 CTLFLAG_RD, &stats->ptc255, 3419 "128-255 byte frames transmitted"); 3420 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 3421 CTLFLAG_RD, &stats->ptc511, 3422 "256-511 byte frames transmitted"); 3423 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 3424 CTLFLAG_RD, &stats->ptc1023, 3425 "512-1023 byte frames transmitted"); 3426 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 3427 CTLFLAG_RD, &stats->ptc1522, 3428 "1024-1522 byte frames transmitted"); 3429 } 3430 #endif 3431 3432 /* 3433 * Enable the hardware to drop packets when the buffer is full. 3434 * This is useful when multiple RX rings are used, so that no 3435 * single RX ring being full stalls the entire RX engine. We 3436 * only enable this when multiple RX rings are used and when 3437 * flow control is disabled. 3438 */ 3439 static void 3440 ix_enable_rx_drop(struct ix_softc *sc) 3441 { 3442 struct ixgbe_hw *hw = &sc->hw; 3443 int i; 3444 3445 if (bootverbose) { 3446 if_printf(&sc->arpcom.ac_if, 3447 "flow control %d, enable RX drop\n", sc->fc); 3448 } 3449 3450 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3451 uint32_t srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 3452 3453 srrctl |= IXGBE_SRRCTL_DROP_EN; 3454 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); 3455 } 3456 } 3457 3458 static void 3459 ix_disable_rx_drop(struct ix_softc *sc) 3460 { 3461 struct ixgbe_hw *hw = &sc->hw; 3462 int i; 3463 3464 if (bootverbose) { 3465 if_printf(&sc->arpcom.ac_if, 3466 "flow control %d, disable RX drop\n", sc->fc); 3467 } 3468 3469 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3470 uint32_t srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 3471 3472 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3473 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); 3474 } 3475 } 3476 3477 static int 3478 ix_sysctl_flowctrl(SYSCTL_HANDLER_ARGS) 3479 { 3480 struct ix_softc *sc = (struct ix_softc *)arg1; 3481 struct ifnet *ifp = &sc->arpcom.ac_if; 3482 int error, fc; 3483 3484 fc = sc->fc; 3485 error = sysctl_handle_int(oidp, &fc, 0, req); 3486 if (error || req->newptr == NULL) 3487 return error; 3488 3489 switch (fc) { 3490 case ixgbe_fc_rx_pause: 3491 case ixgbe_fc_tx_pause: 3492 case ixgbe_fc_full: 3493 case ixgbe_fc_none: 3494 break; 3495 default: 3496 return EINVAL; 3497 } 3498 3499 ifnet_serialize_all(ifp); 3500 3501 /* Don't bother if it's not changed */ 3502 if (sc->fc == fc) 3503 goto done; 3504 sc->fc = fc; 3505 3506 /* Don't do anything, if the interface is not up yet */ 3507 if ((ifp->if_flags & IFF_RUNNING) == 0) 3508 goto done; 3509 3510 if (sc->rx_ring_inuse > 1) { 3511 switch (sc->fc) { 3512 case ixgbe_fc_rx_pause: 3513 case ixgbe_fc_tx_pause: 3514 case ixgbe_fc_full: 3515 ix_disable_rx_drop(sc); 3516 break; 3517 3518 case ixgbe_fc_none: 3519 ix_enable_rx_drop(sc); 3520 break; 3521 3522 default: 3523 panic("leading fc check mismatch"); 3524 } 3525 } 3526 3527 sc->hw.fc.requested_mode = sc->fc; 3528 /* Don't autoneg if forcing a value */ 3529 sc->hw.fc.disable_fc_autoneg = TRUE; 3530 ixgbe_fc_enable(&sc->hw); 3531 3532 done: 3533 ifnet_deserialize_all(ifp); 3534 return error; 3535 } 3536 3537 #ifdef foo 3538 /* XXX not working properly w/ 82599 connected w/ DAC */ 3539 /* XXX only work after the interface is up */ 3540 static int 3541 ix_sysctl_advspeed(SYSCTL_HANDLER_ARGS) 3542 { 3543 struct ix_softc *sc = (struct ix_softc *)arg1; 3544 struct ifnet *ifp = &sc->arpcom.ac_if; 3545 struct ixgbe_hw *hw = &sc->hw; 3546 ixgbe_link_speed speed; 3547 int error, advspeed; 3548 3549 advspeed = sc->advspeed; 3550 error = sysctl_handle_int(oidp, &advspeed, 0, req); 3551 if (error || req->newptr == NULL) 3552 return error; 3553 3554 if (!(hw->phy.media_type == ixgbe_media_type_copper || 3555 hw->phy.multispeed_fiber)) 3556 return EOPNOTSUPP; 3557 if (hw->mac.ops.setup_link == NULL) 3558 return EOPNOTSUPP; 3559 3560 switch (advspeed) { 3561 case 0: /* auto */ 3562 speed = IXGBE_LINK_SPEED_UNKNOWN; 3563 break; 3564 3565 case 1: /* 1Gb */ 3566 speed = IXGBE_LINK_SPEED_1GB_FULL; 3567 break; 3568 3569 case 2: /* 100Mb */ 3570 speed = IXGBE_LINK_SPEED_100_FULL; 3571 break; 3572 3573 case 3: /* 1Gb/10Gb */ 3574 speed = IXGBE_LINK_SPEED_1GB_FULL | 3575 IXGBE_LINK_SPEED_10GB_FULL; 3576 break; 3577 3578 default: 3579 return EINVAL; 3580 } 3581 3582 ifnet_serialize_all(ifp); 3583 3584 if (sc->advspeed == advspeed) /* no change */ 3585 goto done; 3586 3587 if ((speed & IXGBE_LINK_SPEED_100_FULL) && 3588 hw->mac.type != ixgbe_mac_X540) { 3589 error = EOPNOTSUPP; 3590 goto done; 3591 } 3592 3593 sc->advspeed = advspeed; 3594 3595 if ((ifp->if_flags & IFF_RUNNING) == 0) 3596 goto done; 3597 3598 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 3599 ix_config_link(sc); 3600 } else { 3601 hw->mac.autotry_restart = TRUE; 3602 hw->mac.ops.setup_link(hw, speed, sc->link_up); 3603 } 3604 3605 done: 3606 ifnet_deserialize_all(ifp); 3607 return error; 3608 } 3609 #endif 3610 3611 static void 3612 ix_setup_serialize(struct ix_softc *sc) 3613 { 3614 int i = 0, j; 3615 3616 /* Main + RX + TX */ 3617 sc->nserialize = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt; 3618 sc->serializes = 3619 kmalloc(sc->nserialize * sizeof(struct lwkt_serialize *), 3620 M_DEVBUF, M_WAITOK | M_ZERO); 3621 3622 /* 3623 * Setup serializes 3624 * 3625 * NOTE: Order is critical 3626 */ 3627 3628 KKASSERT(i < sc->nserialize); 3629 sc->serializes[i++] = &sc->main_serialize; 3630 3631 for (j = 0; j < sc->rx_ring_cnt; ++j) { 3632 KKASSERT(i < sc->nserialize); 3633 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 3634 } 3635 3636 for (j = 0; j < sc->tx_ring_cnt; ++j) { 3637 KKASSERT(i < sc->nserialize); 3638 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 3639 } 3640 3641 KKASSERT(i == sc->nserialize); 3642 } 3643 3644 static int 3645 ix_alloc_intr(struct ix_softc *sc) 3646 { 3647 struct ix_intr_data *intr; 3648 u_int intr_flags; 3649 3650 ix_alloc_msix(sc); 3651 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3652 ix_set_ring_inuse(sc, FALSE); 3653 return 0; 3654 } 3655 3656 if (sc->intr_data != NULL) 3657 kfree(sc->intr_data, M_DEVBUF); 3658 3659 sc->intr_cnt = 1; 3660 sc->intr_data = kmalloc(sizeof(struct ix_intr_data), M_DEVBUF, 3661 M_WAITOK | M_ZERO); 3662 intr = &sc->intr_data[0]; 3663 3664 /* 3665 * Allocate MSI/legacy interrupt resource 3666 */ 3667 sc->intr_type = pci_alloc_1intr(sc->dev, ix_msi_enable, 3668 &intr->intr_rid, &intr_flags); 3669 3670 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 3671 &intr->intr_rid, intr_flags); 3672 if (intr->intr_res == NULL) { 3673 device_printf(sc->dev, "Unable to allocate bus resource: " 3674 "interrupt\n"); 3675 return ENXIO; 3676 } 3677 3678 intr->intr_serialize = &sc->main_serialize; 3679 intr->intr_cpuid = rman_get_cpuid(intr->intr_res); 3680 intr->intr_func = ix_intr; 3681 intr->intr_funcarg = sc; 3682 intr->intr_rate = IX_INTR_RATE; 3683 intr->intr_use = IX_INTR_USE_RXTX; 3684 3685 sc->tx_rings[0].tx_intr_cpuid = intr->intr_cpuid; 3686 sc->tx_rings[0].tx_intr_vec = IX_TX_INTR_VEC; 3687 3688 sc->rx_rings[0].rx_intr_vec = IX_RX0_INTR_VEC; 3689 3690 ix_set_ring_inuse(sc, FALSE); 3691 3692 KKASSERT(sc->rx_ring_inuse <= IX_MIN_RXRING_RSS); 3693 if (sc->rx_ring_inuse == IX_MIN_RXRING_RSS) 3694 sc->rx_rings[1].rx_intr_vec = IX_RX1_INTR_VEC; 3695 3696 return 0; 3697 } 3698 3699 static void 3700 ix_free_intr(struct ix_softc *sc) 3701 { 3702 if (sc->intr_data == NULL) 3703 return; 3704 3705 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3706 struct ix_intr_data *intr = &sc->intr_data[0]; 3707 3708 KKASSERT(sc->intr_cnt == 1); 3709 if (intr->intr_res != NULL) { 3710 bus_release_resource(sc->dev, SYS_RES_IRQ, 3711 intr->intr_rid, intr->intr_res); 3712 } 3713 if (sc->intr_type == PCI_INTR_TYPE_MSI) 3714 pci_release_msi(sc->dev); 3715 3716 kfree(sc->intr_data, M_DEVBUF); 3717 } else { 3718 ix_free_msix(sc, TRUE); 3719 } 3720 } 3721 3722 static void 3723 ix_set_ring_inuse(struct ix_softc *sc, boolean_t polling) 3724 { 3725 sc->rx_ring_inuse = ix_get_rxring_inuse(sc, polling); 3726 sc->tx_ring_inuse = ix_get_txring_inuse(sc, polling); 3727 if (bootverbose) { 3728 if_printf(&sc->arpcom.ac_if, 3729 "RX rings %d/%d, TX rings %d/%d\n", 3730 sc->rx_ring_inuse, sc->rx_ring_cnt, 3731 sc->tx_ring_inuse, sc->tx_ring_cnt); 3732 } 3733 } 3734 3735 static int 3736 ix_get_rxring_inuse(const struct ix_softc *sc, boolean_t polling) 3737 { 3738 if (!IX_ENABLE_HWRSS(sc)) 3739 return 1; 3740 3741 if (polling) 3742 return sc->rx_ring_cnt; 3743 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 3744 return IX_MIN_RXRING_RSS; 3745 else 3746 return sc->rx_ring_msix; 3747 } 3748 3749 static int 3750 ix_get_txring_inuse(const struct ix_softc *sc, boolean_t polling) 3751 { 3752 if (!IX_ENABLE_HWTSS(sc)) 3753 return 1; 3754 3755 if (polling) 3756 return sc->tx_ring_cnt; 3757 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 3758 return 1; 3759 else 3760 return sc->tx_ring_msix; 3761 } 3762 3763 static int 3764 ix_setup_intr(struct ix_softc *sc) 3765 { 3766 int i; 3767 3768 for (i = 0; i < sc->intr_cnt; ++i) { 3769 struct ix_intr_data *intr = &sc->intr_data[i]; 3770 int error; 3771 3772 error = bus_setup_intr_descr(sc->dev, intr->intr_res, 3773 INTR_MPSAFE, intr->intr_func, intr->intr_funcarg, 3774 &intr->intr_hand, intr->intr_serialize, intr->intr_desc); 3775 if (error) { 3776 device_printf(sc->dev, "can't setup %dth intr\n", i); 3777 ix_teardown_intr(sc, i); 3778 return error; 3779 } 3780 } 3781 return 0; 3782 } 3783 3784 static void 3785 ix_teardown_intr(struct ix_softc *sc, int intr_cnt) 3786 { 3787 int i; 3788 3789 if (sc->intr_data == NULL) 3790 return; 3791 3792 for (i = 0; i < intr_cnt; ++i) { 3793 struct ix_intr_data *intr = &sc->intr_data[i]; 3794 3795 bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand); 3796 } 3797 } 3798 3799 static void 3800 ix_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3801 { 3802 struct ix_softc *sc = ifp->if_softc; 3803 3804 ifnet_serialize_array_enter(sc->serializes, sc->nserialize, slz); 3805 } 3806 3807 static void 3808 ix_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3809 { 3810 struct ix_softc *sc = ifp->if_softc; 3811 3812 ifnet_serialize_array_exit(sc->serializes, sc->nserialize, slz); 3813 } 3814 3815 static int 3816 ix_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3817 { 3818 struct ix_softc *sc = ifp->if_softc; 3819 3820 return ifnet_serialize_array_try(sc->serializes, sc->nserialize, slz); 3821 } 3822 3823 #ifdef INVARIANTS 3824 3825 static void 3826 ix_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3827 boolean_t serialized) 3828 { 3829 struct ix_softc *sc = ifp->if_softc; 3830 3831 ifnet_serialize_array_assert(sc->serializes, sc->nserialize, slz, 3832 serialized); 3833 } 3834 3835 #endif /* INVARIANTS */ 3836 3837 static void 3838 ix_free_rings(struct ix_softc *sc) 3839 { 3840 int i; 3841 3842 if (sc->tx_rings != NULL) { 3843 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3844 struct ix_tx_ring *txr = &sc->tx_rings[i]; 3845 3846 ix_destroy_tx_ring(txr, txr->tx_ndesc); 3847 } 3848 kfree(sc->tx_rings, M_DEVBUF); 3849 } 3850 3851 if (sc->rx_rings != NULL) { 3852 for (i =0; i < sc->rx_ring_cnt; ++i) { 3853 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 3854 3855 ix_destroy_rx_ring(rxr, rxr->rx_ndesc); 3856 } 3857 kfree(sc->rx_rings, M_DEVBUF); 3858 } 3859 3860 if (sc->parent_tag != NULL) 3861 bus_dma_tag_destroy(sc->parent_tag); 3862 } 3863 3864 static void 3865 ix_watchdog(struct ifaltq_subque *ifsq) 3866 { 3867 struct ix_tx_ring *txr = ifsq_get_priv(ifsq); 3868 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3869 struct ix_softc *sc = ifp->if_softc; 3870 int i; 3871 3872 KKASSERT(txr->tx_ifsq == ifsq); 3873 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3874 3875 /* 3876 * If the interface has been paused then don't do the watchdog check 3877 */ 3878 if (IXGBE_READ_REG(&sc->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) { 3879 txr->tx_watchdog.wd_timer = 5; 3880 return; 3881 } 3882 3883 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3884 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->tx_idx, 3885 IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->tx_idx)), 3886 IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->tx_idx))); 3887 if_printf(ifp, "TX(%d) desc avail = %d, next TX to Clean = %d\n", 3888 txr->tx_idx, txr->tx_avail, txr->tx_next_clean); 3889 3890 ix_init(sc); 3891 for (i = 0; i < sc->tx_ring_inuse; ++i) 3892 ifsq_devstart_sched(sc->tx_rings[i].tx_ifsq); 3893 } 3894 3895 static void 3896 ix_free_tx_ring(struct ix_tx_ring *txr) 3897 { 3898 int i; 3899 3900 for (i = 0; i < txr->tx_ndesc; ++i) { 3901 struct ix_tx_buf *txbuf = &txr->tx_buf[i]; 3902 3903 if (txbuf->m_head != NULL) { 3904 bus_dmamap_unload(txr->tx_tag, txbuf->map); 3905 m_freem(txbuf->m_head); 3906 txbuf->m_head = NULL; 3907 } 3908 } 3909 } 3910 3911 static void 3912 ix_free_rx_ring(struct ix_rx_ring *rxr) 3913 { 3914 int i; 3915 3916 for (i = 0; i < rxr->rx_ndesc; ++i) { 3917 struct ix_rx_buf *rxbuf = &rxr->rx_buf[i]; 3918 3919 if (rxbuf->fmp != NULL) { 3920 m_freem(rxbuf->fmp); 3921 rxbuf->fmp = NULL; 3922 rxbuf->lmp = NULL; 3923 } else { 3924 KKASSERT(rxbuf->lmp == NULL); 3925 } 3926 if (rxbuf->m_head != NULL) { 3927 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 3928 m_freem(rxbuf->m_head); 3929 rxbuf->m_head = NULL; 3930 } 3931 } 3932 } 3933 3934 static int 3935 ix_newbuf(struct ix_rx_ring *rxr, int i, boolean_t wait) 3936 { 3937 struct mbuf *m; 3938 bus_dma_segment_t seg; 3939 bus_dmamap_t map; 3940 struct ix_rx_buf *rxbuf; 3941 int flags, error, nseg; 3942 3943 flags = M_NOWAIT; 3944 if (__predict_false(wait)) 3945 flags = M_WAITOK; 3946 3947 m = m_getjcl(flags, MT_DATA, M_PKTHDR, rxr->rx_mbuf_sz); 3948 if (m == NULL) { 3949 if (wait) { 3950 if_printf(&rxr->rx_sc->arpcom.ac_if, 3951 "Unable to allocate RX mbuf\n"); 3952 } 3953 return ENOBUFS; 3954 } 3955 m->m_len = m->m_pkthdr.len = rxr->rx_mbuf_sz; 3956 3957 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 3958 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 3959 if (error) { 3960 m_freem(m); 3961 if (wait) { 3962 if_printf(&rxr->rx_sc->arpcom.ac_if, 3963 "Unable to load RX mbuf\n"); 3964 } 3965 return error; 3966 } 3967 3968 rxbuf = &rxr->rx_buf[i]; 3969 if (rxbuf->m_head != NULL) 3970 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 3971 3972 map = rxbuf->map; 3973 rxbuf->map = rxr->rx_sparemap; 3974 rxr->rx_sparemap = map; 3975 3976 rxbuf->m_head = m; 3977 rxbuf->paddr = seg.ds_addr; 3978 3979 ix_setup_rxdesc(&rxr->rx_base[i], rxbuf); 3980 return 0; 3981 } 3982 3983 static void 3984 ix_add_sysctl(struct ix_softc *sc) 3985 { 3986 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 3987 struct sysctl_oid *tree = device_get_sysctl_tree(sc->dev); 3988 #ifdef IX_RSS_DEBUG 3989 char node[32]; 3990 int i; 3991 #endif 3992 3993 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3994 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 3995 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3996 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 3997 "# of RX rings used"); 3998 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3999 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 4000 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4001 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 4002 "# of TX rings used"); 4003 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4004 OID_AUTO, "rxd", CTLTYPE_INT | CTLFLAG_RD, 4005 sc, 0, ix_sysctl_rxd, "I", 4006 "# of RX descs"); 4007 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4008 OID_AUTO, "txd", CTLTYPE_INT | CTLFLAG_RD, 4009 sc, 0, ix_sysctl_txd, "I", 4010 "# of TX descs"); 4011 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4012 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 4013 sc, 0, ix_sysctl_tx_wreg_nsegs, "I", 4014 "# of segments sent before write to hardware register"); 4015 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4016 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW, 4017 sc, 0, ix_sysctl_rx_wreg_nsegs, "I", 4018 "# of received segments sent before write to hardware register"); 4019 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4020 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 4021 sc, 0, ix_sysctl_tx_intr_nsegs, "I", 4022 "# of segments per TX interrupt"); 4023 4024 #ifdef IFPOLL_ENABLE 4025 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4026 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 4027 sc, 0, ix_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 4028 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4029 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 4030 sc, 0, ix_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 4031 #endif 4032 4033 #define IX_ADD_INTR_RATE_SYSCTL(sc, use, name) \ 4034 do { \ 4035 ix_add_intr_rate_sysctl(sc, IX_INTR_USE_##use, #name, \ 4036 ix_sysctl_##name, #use " interrupt rate"); \ 4037 } while (0) 4038 4039 IX_ADD_INTR_RATE_SYSCTL(sc, RXTX, rxtx_intr_rate); 4040 IX_ADD_INTR_RATE_SYSCTL(sc, RX, rx_intr_rate); 4041 IX_ADD_INTR_RATE_SYSCTL(sc, TX, tx_intr_rate); 4042 IX_ADD_INTR_RATE_SYSCTL(sc, STATUS, sts_intr_rate); 4043 4044 #undef IX_ADD_INTR_RATE_SYSCTL 4045 4046 #ifdef IX_RSS_DEBUG 4047 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4048 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 4049 "RSS debug level"); 4050 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4051 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 4052 SYSCTL_ADD_ULONG(ctx, 4053 SYSCTL_CHILDREN(tree), OID_AUTO, node, 4054 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts, "RXed packets"); 4055 } 4056 #endif 4057 4058 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4059 OID_AUTO, "flowctrl", CTLTYPE_INT | CTLFLAG_RW, 4060 sc, 0, ix_sysctl_flowctrl, "I", 4061 "flow control, 0 - off, 1 - rx pause, 2 - tx pause, 3 - full"); 4062 4063 #ifdef foo 4064 /* 4065 * Allow a kind of speed control by forcing the autoneg 4066 * advertised speed list to only a certain value, this 4067 * supports 1G on 82599 devices, and 100Mb on X540. 4068 */ 4069 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4070 OID_AUTO, "advspeed", CTLTYPE_INT | CTLFLAG_RW, 4071 sc, 0, ix_sysctl_advspeed, "I", 4072 "advertised link speed, " 4073 "0 - auto, 1 - 1Gb, 2 - 100Mb, 3 - 1Gb/10Gb"); 4074 #endif 4075 4076 #if 0 4077 ix_add_hw_stats(sc); 4078 #endif 4079 4080 } 4081 4082 static int 4083 ix_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4084 { 4085 struct ix_softc *sc = (void *)arg1; 4086 struct ifnet *ifp = &sc->arpcom.ac_if; 4087 int error, nsegs, i; 4088 4089 nsegs = sc->tx_rings[0].tx_wreg_nsegs; 4090 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4091 if (error || req->newptr == NULL) 4092 return error; 4093 if (nsegs < 0) 4094 return EINVAL; 4095 4096 ifnet_serialize_all(ifp); 4097 for (i = 0; i < sc->tx_ring_cnt; ++i) 4098 sc->tx_rings[i].tx_wreg_nsegs = nsegs; 4099 ifnet_deserialize_all(ifp); 4100 4101 return 0; 4102 } 4103 4104 static int 4105 ix_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4106 { 4107 struct ix_softc *sc = (void *)arg1; 4108 struct ifnet *ifp = &sc->arpcom.ac_if; 4109 int error, nsegs, i; 4110 4111 nsegs = sc->rx_rings[0].rx_wreg_nsegs; 4112 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4113 if (error || req->newptr == NULL) 4114 return error; 4115 if (nsegs < 0) 4116 return EINVAL; 4117 4118 ifnet_serialize_all(ifp); 4119 for (i = 0; i < sc->rx_ring_cnt; ++i) 4120 sc->rx_rings[i].rx_wreg_nsegs =nsegs; 4121 ifnet_deserialize_all(ifp); 4122 4123 return 0; 4124 } 4125 4126 static int 4127 ix_sysctl_txd(SYSCTL_HANDLER_ARGS) 4128 { 4129 struct ix_softc *sc = (void *)arg1; 4130 int txd; 4131 4132 txd = sc->tx_rings[0].tx_ndesc; 4133 return sysctl_handle_int(oidp, &txd, 0, req); 4134 } 4135 4136 static int 4137 ix_sysctl_rxd(SYSCTL_HANDLER_ARGS) 4138 { 4139 struct ix_softc *sc = (void *)arg1; 4140 int rxd; 4141 4142 rxd = sc->rx_rings[0].rx_ndesc; 4143 return sysctl_handle_int(oidp, &rxd, 0, req); 4144 } 4145 4146 static int 4147 ix_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 4148 { 4149 struct ix_softc *sc = (void *)arg1; 4150 struct ifnet *ifp = &sc->arpcom.ac_if; 4151 struct ix_tx_ring *txr = &sc->tx_rings[0]; 4152 int error, nsegs; 4153 4154 nsegs = txr->tx_intr_nsegs; 4155 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4156 if (error || req->newptr == NULL) 4157 return error; 4158 if (nsegs < 0) 4159 return EINVAL; 4160 4161 ifnet_serialize_all(ifp); 4162 4163 if (nsegs >= txr->tx_ndesc - IX_MAX_SCATTER - IX_TX_RESERVED) { 4164 error = EINVAL; 4165 } else { 4166 int i; 4167 4168 error = 0; 4169 for (i = 0; i < sc->tx_ring_cnt; ++i) 4170 sc->tx_rings[i].tx_intr_nsegs = nsegs; 4171 } 4172 4173 ifnet_deserialize_all(ifp); 4174 4175 return error; 4176 } 4177 4178 static void 4179 ix_set_eitr(struct ix_softc *sc, int idx, int rate) 4180 { 4181 uint32_t eitr, eitr_intvl; 4182 4183 eitr = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(idx)); 4184 eitr_intvl = 1000000000 / 256 / rate; 4185 4186 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 4187 eitr &= ~IX_EITR_INTVL_MASK_82598; 4188 if (eitr_intvl == 0) 4189 eitr_intvl = 1; 4190 else if (eitr_intvl > IX_EITR_INTVL_MASK_82598) 4191 eitr_intvl = IX_EITR_INTVL_MASK_82598; 4192 } else { 4193 eitr &= ~IX_EITR_INTVL_MASK; 4194 4195 eitr_intvl &= ~IX_EITR_INTVL_RSVD_MASK; 4196 if (eitr_intvl == 0) 4197 eitr_intvl = IX_EITR_INTVL_MIN; 4198 else if (eitr_intvl > IX_EITR_INTVL_MAX) 4199 eitr_intvl = IX_EITR_INTVL_MAX; 4200 } 4201 eitr |= eitr_intvl; 4202 4203 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(idx), eitr); 4204 } 4205 4206 static int 4207 ix_sysctl_rxtx_intr_rate(SYSCTL_HANDLER_ARGS) 4208 { 4209 return ix_sysctl_intr_rate(oidp, arg1, arg2, req, IX_INTR_USE_RXTX); 4210 } 4211 4212 static int 4213 ix_sysctl_rx_intr_rate(SYSCTL_HANDLER_ARGS) 4214 { 4215 return ix_sysctl_intr_rate(oidp, arg1, arg2, req, IX_INTR_USE_RX); 4216 } 4217 4218 static int 4219 ix_sysctl_tx_intr_rate(SYSCTL_HANDLER_ARGS) 4220 { 4221 return ix_sysctl_intr_rate(oidp, arg1, arg2, req, IX_INTR_USE_TX); 4222 } 4223 4224 static int 4225 ix_sysctl_sts_intr_rate(SYSCTL_HANDLER_ARGS) 4226 { 4227 return ix_sysctl_intr_rate(oidp, arg1, arg2, req, IX_INTR_USE_STATUS); 4228 } 4229 4230 static int 4231 ix_sysctl_intr_rate(SYSCTL_HANDLER_ARGS, int use) 4232 { 4233 struct ix_softc *sc = (void *)arg1; 4234 struct ifnet *ifp = &sc->arpcom.ac_if; 4235 int error, rate, i; 4236 4237 rate = 0; 4238 for (i = 0; i < sc->intr_cnt; ++i) { 4239 if (sc->intr_data[i].intr_use == use) { 4240 rate = sc->intr_data[i].intr_rate; 4241 break; 4242 } 4243 } 4244 4245 error = sysctl_handle_int(oidp, &rate, 0, req); 4246 if (error || req->newptr == NULL) 4247 return error; 4248 if (rate <= 0) 4249 return EINVAL; 4250 4251 ifnet_serialize_all(ifp); 4252 4253 for (i = 0; i < sc->intr_cnt; ++i) { 4254 if (sc->intr_data[i].intr_use == use) { 4255 sc->intr_data[i].intr_rate = rate; 4256 if (ifp->if_flags & IFF_RUNNING) 4257 ix_set_eitr(sc, i, rate); 4258 } 4259 } 4260 4261 ifnet_deserialize_all(ifp); 4262 4263 return error; 4264 } 4265 4266 static void 4267 ix_add_intr_rate_sysctl(struct ix_softc *sc, int use, 4268 const char *name, int (*handler)(SYSCTL_HANDLER_ARGS), const char *desc) 4269 { 4270 int i; 4271 4272 for (i = 0; i < sc->intr_cnt; ++i) { 4273 if (sc->intr_data[i].intr_use == use) { 4274 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 4275 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 4276 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW, 4277 sc, 0, handler, "I", desc); 4278 break; 4279 } 4280 } 4281 } 4282 4283 static void 4284 ix_set_timer_cpuid(struct ix_softc *sc, boolean_t polling) 4285 { 4286 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 4287 sc->timer_cpuid = 0; /* XXX fixed */ 4288 else 4289 sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res); 4290 } 4291 4292 static void 4293 ix_alloc_msix(struct ix_softc *sc) 4294 { 4295 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 4296 struct ix_intr_data *intr; 4297 int i, x, error; 4298 int offset, offset_def, agg_rxtx, ring_max; 4299 boolean_t aggregate, setup = FALSE; 4300 4301 msix_enable = ix_msix_enable; 4302 /* 4303 * Don't enable MSI-X on 82598 by default, see: 4304 * 82598 specification update errata #38 4305 */ 4306 if (sc->hw.mac.type == ixgbe_mac_82598EB) 4307 msix_enable = 0; 4308 msix_enable = device_getenv_int(sc->dev, "msix.enable", msix_enable); 4309 if (!msix_enable) 4310 return; 4311 4312 msix_cnt = pci_msix_count(sc->dev); 4313 #ifdef IX_MSIX_DEBUG 4314 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4315 #endif 4316 if (msix_cnt <= 1) { 4317 /* One MSI-X model does not make sense */ 4318 return; 4319 } 4320 4321 i = 0; 4322 while ((1 << (i + 1)) <= msix_cnt) 4323 ++i; 4324 msix_cnt2 = 1 << i; 4325 4326 if (bootverbose) { 4327 device_printf(sc->dev, "MSI-X count %d/%d\n", 4328 msix_cnt2, msix_cnt); 4329 } 4330 4331 KKASSERT(msix_cnt >= msix_cnt2); 4332 if (msix_cnt == msix_cnt2) { 4333 /* We need at least one MSI-X for link status */ 4334 msix_cnt2 >>= 1; 4335 if (msix_cnt2 <= 1) { 4336 /* One MSI-X for RX/TX does not make sense */ 4337 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 4338 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 4339 return; 4340 } 4341 KKASSERT(msix_cnt > msix_cnt2); 4342 4343 if (bootverbose) { 4344 device_printf(sc->dev, "MSI-X count eq fixup %d/%d\n", 4345 msix_cnt2, msix_cnt); 4346 } 4347 } 4348 4349 /* 4350 * Make sure that we don't break interrupt related registers 4351 * (EIMS, etc) limitation. 4352 * 4353 * NOTE: msix_cnt > msix_cnt2, when we reach here 4354 */ 4355 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 4356 if (msix_cnt2 > IX_MAX_MSIX_82598) 4357 msix_cnt2 = IX_MAX_MSIX_82598; 4358 } else { 4359 if (msix_cnt2 > IX_MAX_MSIX) 4360 msix_cnt2 = IX_MAX_MSIX; 4361 } 4362 msix_cnt = msix_cnt2 + 1; /* +1 for status */ 4363 4364 if (bootverbose) { 4365 device_printf(sc->dev, "MSI-X count max fixup %d/%d\n", 4366 msix_cnt2, msix_cnt); 4367 } 4368 4369 sc->rx_ring_msix = sc->rx_ring_cnt; 4370 if (sc->rx_ring_msix > msix_cnt2) 4371 sc->rx_ring_msix = msix_cnt2; 4372 4373 sc->tx_ring_msix = sc->tx_ring_cnt; 4374 if (sc->tx_ring_msix > msix_cnt2) 4375 sc->tx_ring_msix = msix_cnt2; 4376 4377 ring_max = sc->rx_ring_msix; 4378 if (ring_max < sc->tx_ring_msix) 4379 ring_max = sc->tx_ring_msix; 4380 4381 /* Allow user to force independent RX/TX MSI-X handling */ 4382 agg_rxtx = device_getenv_int(sc->dev, "msix.agg_rxtx", 4383 ix_msix_agg_rxtx); 4384 4385 if (!agg_rxtx && msix_cnt >= sc->tx_ring_msix + sc->rx_ring_msix + 1) { 4386 /* 4387 * Independent TX/RX MSI-X 4388 */ 4389 aggregate = FALSE; 4390 if (bootverbose) 4391 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 4392 alloc_cnt = sc->tx_ring_msix + sc->rx_ring_msix; 4393 } else { 4394 /* 4395 * Aggregate TX/RX MSI-X 4396 */ 4397 aggregate = TRUE; 4398 if (bootverbose) 4399 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 4400 alloc_cnt = msix_cnt2; 4401 if (alloc_cnt > ring_max) 4402 alloc_cnt = ring_max; 4403 KKASSERT(alloc_cnt >= sc->rx_ring_msix && 4404 alloc_cnt >= sc->tx_ring_msix); 4405 } 4406 ++alloc_cnt; /* For status */ 4407 4408 if (bootverbose) { 4409 device_printf(sc->dev, "MSI-X alloc %d, " 4410 "RX ring %d, TX ring %d\n", alloc_cnt, 4411 sc->rx_ring_msix, sc->tx_ring_msix); 4412 } 4413 4414 sc->msix_mem_rid = PCIR_BAR(IX_MSIX_BAR_82598); 4415 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4416 &sc->msix_mem_rid, RF_ACTIVE); 4417 if (sc->msix_mem_res == NULL) { 4418 sc->msix_mem_rid = PCIR_BAR(IX_MSIX_BAR_82599); 4419 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, 4420 SYS_RES_MEMORY, &sc->msix_mem_rid, RF_ACTIVE); 4421 if (sc->msix_mem_res == NULL) { 4422 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4423 return; 4424 } 4425 } 4426 4427 sc->intr_cnt = alloc_cnt; 4428 sc->intr_data = kmalloc(sizeof(struct ix_intr_data) * sc->intr_cnt, 4429 M_DEVBUF, M_WAITOK | M_ZERO); 4430 for (x = 0; x < sc->intr_cnt; ++x) { 4431 intr = &sc->intr_data[x]; 4432 intr->intr_rid = -1; 4433 intr->intr_rate = IX_INTR_RATE; 4434 } 4435 4436 x = 0; 4437 if (!aggregate) { 4438 /* 4439 * RX rings 4440 */ 4441 if (sc->rx_ring_msix == ncpus2) { 4442 offset = 0; 4443 } else { 4444 offset_def = (sc->rx_ring_msix * 4445 device_get_unit(sc->dev)) % ncpus2; 4446 4447 offset = device_getenv_int(sc->dev, 4448 "msix.rxoff", offset_def); 4449 if (offset >= ncpus2 || 4450 offset % sc->rx_ring_msix != 0) { 4451 device_printf(sc->dev, 4452 "invalid msix.rxoff %d, use %d\n", 4453 offset, offset_def); 4454 offset = offset_def; 4455 } 4456 } 4457 ix_conf_rx_msix(sc, 0, &x, offset); 4458 4459 /* 4460 * TX rings 4461 */ 4462 if (sc->tx_ring_msix == ncpus2) { 4463 offset = 0; 4464 } else { 4465 offset_def = (sc->tx_ring_msix * 4466 device_get_unit(sc->dev)) % ncpus2; 4467 4468 offset = device_getenv_int(sc->dev, 4469 "msix.txoff", offset_def); 4470 if (offset >= ncpus2 || 4471 offset % sc->tx_ring_msix != 0) { 4472 device_printf(sc->dev, 4473 "invalid msix.txoff %d, use %d\n", 4474 offset, offset_def); 4475 offset = offset_def; 4476 } 4477 } 4478 ix_conf_tx_msix(sc, 0, &x, offset); 4479 } else { 4480 int ring_agg; 4481 4482 ring_agg = sc->rx_ring_msix; 4483 if (ring_agg > sc->tx_ring_msix) 4484 ring_agg = sc->tx_ring_msix; 4485 4486 if (ring_max == ncpus2) { 4487 offset = 0; 4488 } else { 4489 offset_def = (ring_max * device_get_unit(sc->dev)) % 4490 ncpus2; 4491 4492 offset = device_getenv_int(sc->dev, "msix.off", 4493 offset_def); 4494 if (offset >= ncpus2 || offset % ring_max != 0) { 4495 device_printf(sc->dev, 4496 "invalid msix.off %d, use %d\n", 4497 offset, offset_def); 4498 offset = offset_def; 4499 } 4500 } 4501 4502 for (i = 0; i < ring_agg; ++i) { 4503 struct ix_tx_ring *txr = &sc->tx_rings[i]; 4504 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 4505 4506 KKASSERT(x < sc->intr_cnt); 4507 rxr->rx_intr_vec = x; 4508 ix_setup_msix_eims(sc, x, 4509 &rxr->rx_eims, &rxr->rx_eims_val); 4510 rxr->rx_txr = txr; 4511 /* NOTE: Leave TX ring's intr_vec negative */ 4512 4513 intr = &sc->intr_data[x++]; 4514 4515 intr->intr_serialize = &rxr->rx_serialize; 4516 intr->intr_func = ix_msix_rxtx; 4517 intr->intr_funcarg = rxr; 4518 intr->intr_use = IX_INTR_USE_RXTX; 4519 4520 intr->intr_cpuid = i + offset; 4521 KKASSERT(intr->intr_cpuid < ncpus2); 4522 txr->tx_intr_cpuid = intr->intr_cpuid; 4523 4524 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), 4525 "%s rxtx%d", device_get_nameunit(sc->dev), i); 4526 intr->intr_desc = intr->intr_desc0; 4527 } 4528 4529 if (ring_agg != ring_max) { 4530 if (ring_max == sc->tx_ring_msix) 4531 ix_conf_tx_msix(sc, i, &x, offset); 4532 else 4533 ix_conf_rx_msix(sc, i, &x, offset); 4534 } 4535 } 4536 4537 /* 4538 * Status MSI-X 4539 */ 4540 KKASSERT(x < sc->intr_cnt); 4541 sc->sts_msix_vec = x; 4542 4543 intr = &sc->intr_data[x++]; 4544 4545 intr->intr_serialize = &sc->main_serialize; 4546 intr->intr_func = ix_msix_status; 4547 intr->intr_funcarg = sc; 4548 intr->intr_cpuid = 0; 4549 intr->intr_use = IX_INTR_USE_STATUS; 4550 4551 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s sts", 4552 device_get_nameunit(sc->dev)); 4553 intr->intr_desc = intr->intr_desc0; 4554 4555 KKASSERT(x == sc->intr_cnt); 4556 4557 error = pci_setup_msix(sc->dev); 4558 if (error) { 4559 device_printf(sc->dev, "Setup MSI-X failed\n"); 4560 goto back; 4561 } 4562 setup = TRUE; 4563 4564 for (i = 0; i < sc->intr_cnt; ++i) { 4565 intr = &sc->intr_data[i]; 4566 4567 error = pci_alloc_msix_vector(sc->dev, i, &intr->intr_rid, 4568 intr->intr_cpuid); 4569 if (error) { 4570 device_printf(sc->dev, 4571 "Unable to allocate MSI-X %d on cpu%d\n", i, 4572 intr->intr_cpuid); 4573 goto back; 4574 } 4575 4576 intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4577 &intr->intr_rid, RF_ACTIVE); 4578 if (intr->intr_res == NULL) { 4579 device_printf(sc->dev, 4580 "Unable to allocate MSI-X %d resource\n", i); 4581 error = ENOMEM; 4582 goto back; 4583 } 4584 } 4585 4586 pci_enable_msix(sc->dev); 4587 sc->intr_type = PCI_INTR_TYPE_MSIX; 4588 back: 4589 if (error) 4590 ix_free_msix(sc, setup); 4591 } 4592 4593 static void 4594 ix_free_msix(struct ix_softc *sc, boolean_t setup) 4595 { 4596 int i; 4597 4598 KKASSERT(sc->intr_cnt > 1); 4599 4600 for (i = 0; i < sc->intr_cnt; ++i) { 4601 struct ix_intr_data *intr = &sc->intr_data[i]; 4602 4603 if (intr->intr_res != NULL) { 4604 bus_release_resource(sc->dev, SYS_RES_IRQ, 4605 intr->intr_rid, intr->intr_res); 4606 } 4607 if (intr->intr_rid >= 0) 4608 pci_release_msix_vector(sc->dev, intr->intr_rid); 4609 } 4610 if (setup) 4611 pci_teardown_msix(sc->dev); 4612 4613 sc->intr_cnt = 0; 4614 kfree(sc->intr_data, M_DEVBUF); 4615 sc->intr_data = NULL; 4616 } 4617 4618 static void 4619 ix_conf_rx_msix(struct ix_softc *sc, int i, int *x0, int offset) 4620 { 4621 int x = *x0; 4622 4623 for (; i < sc->rx_ring_msix; ++i) { 4624 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 4625 struct ix_intr_data *intr; 4626 4627 KKASSERT(x < sc->intr_cnt); 4628 rxr->rx_intr_vec = x; 4629 ix_setup_msix_eims(sc, x, &rxr->rx_eims, &rxr->rx_eims_val); 4630 4631 intr = &sc->intr_data[x++]; 4632 4633 intr->intr_serialize = &rxr->rx_serialize; 4634 intr->intr_func = ix_msix_rx; 4635 intr->intr_funcarg = rxr; 4636 intr->intr_rate = IX_MSIX_RX_RATE; 4637 intr->intr_use = IX_INTR_USE_RX; 4638 4639 intr->intr_cpuid = i + offset; 4640 KKASSERT(intr->intr_cpuid < ncpus2); 4641 4642 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s rx%d", 4643 device_get_nameunit(sc->dev), i); 4644 intr->intr_desc = intr->intr_desc0; 4645 } 4646 *x0 = x; 4647 } 4648 4649 static void 4650 ix_conf_tx_msix(struct ix_softc *sc, int i, int *x0, int offset) 4651 { 4652 int x = *x0; 4653 4654 for (; i < sc->tx_ring_msix; ++i) { 4655 struct ix_tx_ring *txr = &sc->tx_rings[i]; 4656 struct ix_intr_data *intr; 4657 4658 KKASSERT(x < sc->intr_cnt); 4659 txr->tx_intr_vec = x; 4660 ix_setup_msix_eims(sc, x, &txr->tx_eims, &txr->tx_eims_val); 4661 4662 intr = &sc->intr_data[x++]; 4663 4664 intr->intr_serialize = &txr->tx_serialize; 4665 intr->intr_func = ix_msix_tx; 4666 intr->intr_funcarg = txr; 4667 intr->intr_rate = IX_MSIX_TX_RATE; 4668 intr->intr_use = IX_INTR_USE_TX; 4669 4670 intr->intr_cpuid = i + offset; 4671 KKASSERT(intr->intr_cpuid < ncpus2); 4672 txr->tx_intr_cpuid = intr->intr_cpuid; 4673 4674 ksnprintf(intr->intr_desc0, sizeof(intr->intr_desc0), "%s tx%d", 4675 device_get_nameunit(sc->dev), i); 4676 intr->intr_desc = intr->intr_desc0; 4677 } 4678 *x0 = x; 4679 } 4680 4681 static void 4682 ix_msix_rx(void *xrxr) 4683 { 4684 struct ix_rx_ring *rxr = xrxr; 4685 4686 ASSERT_SERIALIZED(&rxr->rx_serialize); 4687 4688 ix_rxeof(rxr, -1); 4689 IXGBE_WRITE_REG(&rxr->rx_sc->hw, rxr->rx_eims, rxr->rx_eims_val); 4690 } 4691 4692 static void 4693 ix_msix_tx(void *xtxr) 4694 { 4695 struct ix_tx_ring *txr = xtxr; 4696 4697 ASSERT_SERIALIZED(&txr->tx_serialize); 4698 4699 ix_txeof(txr, *(txr->tx_hdr)); 4700 if (!ifsq_is_empty(txr->tx_ifsq)) 4701 ifsq_devstart(txr->tx_ifsq); 4702 IXGBE_WRITE_REG(&txr->tx_sc->hw, txr->tx_eims, txr->tx_eims_val); 4703 } 4704 4705 static void 4706 ix_msix_rxtx(void *xrxr) 4707 { 4708 struct ix_rx_ring *rxr = xrxr; 4709 struct ix_tx_ring *txr; 4710 int hdr; 4711 4712 ASSERT_SERIALIZED(&rxr->rx_serialize); 4713 4714 ix_rxeof(rxr, -1); 4715 4716 /* 4717 * NOTE: 4718 * Since tx_next_clean is only changed by ix_txeof(), 4719 * which is called only in interrupt handler, the 4720 * check w/o holding tx serializer is MPSAFE. 4721 */ 4722 txr = rxr->rx_txr; 4723 hdr = *(txr->tx_hdr); 4724 if (hdr != txr->tx_next_clean) { 4725 lwkt_serialize_enter(&txr->tx_serialize); 4726 ix_txeof(txr, hdr); 4727 if (!ifsq_is_empty(txr->tx_ifsq)) 4728 ifsq_devstart(txr->tx_ifsq); 4729 lwkt_serialize_exit(&txr->tx_serialize); 4730 } 4731 4732 IXGBE_WRITE_REG(&rxr->rx_sc->hw, rxr->rx_eims, rxr->rx_eims_val); 4733 } 4734 4735 static void 4736 ix_intr_status(struct ix_softc *sc, uint32_t eicr) 4737 { 4738 struct ixgbe_hw *hw = &sc->hw; 4739 4740 /* Link status change */ 4741 if (eicr & IXGBE_EICR_LSC) 4742 ix_handle_link(sc); 4743 4744 if (hw->mac.type != ixgbe_mac_82598EB) { 4745 if (eicr & IXGBE_EICR_ECC) 4746 if_printf(&sc->arpcom.ac_if, "ECC ERROR!! Reboot!!\n"); 4747 else if (eicr & IXGBE_EICR_GPI_SDP1) 4748 ix_handle_msf(sc); 4749 else if (eicr & IXGBE_EICR_GPI_SDP2) 4750 ix_handle_mod(sc); 4751 } 4752 4753 /* Check for fan failure */ 4754 if (hw->device_id == IXGBE_DEV_ID_82598AT && 4755 (eicr & IXGBE_EICR_GPI_SDP1)) 4756 if_printf(&sc->arpcom.ac_if, "FAN FAILURE!! Replace!!\n"); 4757 4758 /* Check for over temp condition */ 4759 if (hw->mac.type == ixgbe_mac_X540 && (eicr & IXGBE_EICR_TS)) { 4760 if_printf(&sc->arpcom.ac_if, "OVER TEMP!! " 4761 "PHY IS SHUT DOWN!! Reboot\n"); 4762 } 4763 } 4764 4765 static void 4766 ix_msix_status(void *xsc) 4767 { 4768 struct ix_softc *sc = xsc; 4769 uint32_t eicr; 4770 4771 ASSERT_SERIALIZED(&sc->main_serialize); 4772 4773 eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR); 4774 ix_intr_status(sc, eicr); 4775 4776 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, sc->intr_mask); 4777 } 4778 4779 static void 4780 ix_setup_msix_eims(const struct ix_softc *sc, int x, 4781 uint32_t *eims, uint32_t *eims_val) 4782 { 4783 if (x < 32) { 4784 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 4785 KASSERT(x < IX_MAX_MSIX_82598, 4786 ("%s: invalid vector %d for 82598", 4787 device_get_nameunit(sc->dev), x)); 4788 *eims = IXGBE_EIMS; 4789 } else { 4790 *eims = IXGBE_EIMS_EX(0); 4791 } 4792 *eims_val = 1 << x; 4793 } else { 4794 KASSERT(x < IX_MAX_MSIX, ("%s: invalid vector %d", 4795 device_get_nameunit(sc->dev), x)); 4796 KASSERT(sc->hw.mac.type != ixgbe_mac_82598EB, 4797 ("%s: invalid vector %d for 82598", 4798 device_get_nameunit(sc->dev), x)); 4799 *eims = IXGBE_EIMS_EX(1); 4800 *eims_val = 1 << (x - 32); 4801 } 4802 } 4803 4804 #ifdef IFPOLL_ENABLE 4805 4806 static void 4807 ix_npoll_status(struct ifnet *ifp) 4808 { 4809 struct ix_softc *sc = ifp->if_softc; 4810 uint32_t eicr; 4811 4812 ASSERT_SERIALIZED(&sc->main_serialize); 4813 4814 eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR); 4815 ix_intr_status(sc, eicr); 4816 } 4817 4818 static void 4819 ix_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4820 { 4821 struct ix_tx_ring *txr = arg; 4822 4823 ASSERT_SERIALIZED(&txr->tx_serialize); 4824 4825 ix_txeof(txr, *(txr->tx_hdr)); 4826 if (!ifsq_is_empty(txr->tx_ifsq)) 4827 ifsq_devstart(txr->tx_ifsq); 4828 } 4829 4830 static void 4831 ix_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4832 { 4833 struct ix_rx_ring *rxr = arg; 4834 4835 ASSERT_SERIALIZED(&rxr->rx_serialize); 4836 4837 ix_rxeof(rxr, cycle); 4838 } 4839 4840 static void 4841 ix_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4842 { 4843 struct ix_softc *sc = ifp->if_softc; 4844 int i, txr_cnt, rxr_cnt; 4845 4846 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4847 4848 if (info) { 4849 int off; 4850 4851 info->ifpi_status.status_func = ix_npoll_status; 4852 info->ifpi_status.serializer = &sc->main_serialize; 4853 4854 txr_cnt = ix_get_txring_inuse(sc, TRUE); 4855 off = sc->tx_npoll_off; 4856 for (i = 0; i < txr_cnt; ++i) { 4857 struct ix_tx_ring *txr = &sc->tx_rings[i]; 4858 int idx = i + off; 4859 4860 KKASSERT(idx < ncpus2); 4861 info->ifpi_tx[idx].poll_func = ix_npoll_tx; 4862 info->ifpi_tx[idx].arg = txr; 4863 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 4864 ifsq_set_cpuid(txr->tx_ifsq, idx); 4865 } 4866 4867 rxr_cnt = ix_get_rxring_inuse(sc, TRUE); 4868 off = sc->rx_npoll_off; 4869 for (i = 0; i < rxr_cnt; ++i) { 4870 struct ix_rx_ring *rxr = &sc->rx_rings[i]; 4871 int idx = i + off; 4872 4873 KKASSERT(idx < ncpus2); 4874 info->ifpi_rx[idx].poll_func = ix_npoll_rx; 4875 info->ifpi_rx[idx].arg = rxr; 4876 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 4877 } 4878 4879 if (ifp->if_flags & IFF_RUNNING) { 4880 if (rxr_cnt == sc->rx_ring_inuse && 4881 txr_cnt == sc->tx_ring_inuse) { 4882 ix_set_timer_cpuid(sc, TRUE); 4883 ix_disable_intr(sc); 4884 } else { 4885 ix_init(sc); 4886 } 4887 } 4888 } else { 4889 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4890 struct ix_tx_ring *txr = &sc->tx_rings[i]; 4891 4892 ifsq_set_cpuid(txr->tx_ifsq, txr->tx_intr_cpuid); 4893 } 4894 4895 if (ifp->if_flags & IFF_RUNNING) { 4896 txr_cnt = ix_get_txring_inuse(sc, FALSE); 4897 rxr_cnt = ix_get_rxring_inuse(sc, FALSE); 4898 4899 if (rxr_cnt == sc->rx_ring_inuse && 4900 txr_cnt == sc->tx_ring_inuse) { 4901 ix_set_timer_cpuid(sc, FALSE); 4902 ix_enable_intr(sc); 4903 } else { 4904 ix_init(sc); 4905 } 4906 } 4907 } 4908 } 4909 4910 static int 4911 ix_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 4912 { 4913 struct ix_softc *sc = (void *)arg1; 4914 struct ifnet *ifp = &sc->arpcom.ac_if; 4915 int error, off; 4916 4917 off = sc->rx_npoll_off; 4918 error = sysctl_handle_int(oidp, &off, 0, req); 4919 if (error || req->newptr == NULL) 4920 return error; 4921 if (off < 0) 4922 return EINVAL; 4923 4924 ifnet_serialize_all(ifp); 4925 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 4926 error = EINVAL; 4927 } else { 4928 error = 0; 4929 sc->rx_npoll_off = off; 4930 } 4931 ifnet_deserialize_all(ifp); 4932 4933 return error; 4934 } 4935 4936 static int 4937 ix_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 4938 { 4939 struct ix_softc *sc = (void *)arg1; 4940 struct ifnet *ifp = &sc->arpcom.ac_if; 4941 int error, off; 4942 4943 off = sc->tx_npoll_off; 4944 error = sysctl_handle_int(oidp, &off, 0, req); 4945 if (error || req->newptr == NULL) 4946 return error; 4947 if (off < 0) 4948 return EINVAL; 4949 4950 ifnet_serialize_all(ifp); 4951 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 4952 error = EINVAL; 4953 } else { 4954 error = 0; 4955 sc->tx_npoll_off = off; 4956 } 4957 ifnet_deserialize_all(ifp); 4958 4959 return error; 4960 } 4961 4962 #endif /* IFPOLL_ENABLE */ 4963