1 /****************************************************************************** 2 3 Copyright (c) 2006-2013, Myricom Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Myricom Inc, nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 $FreeBSD: head/sys/dev/mxge/if_mxge.c 254263 2013-08-12 23:30:01Z scottl $ 29 30 ***************************************************************************/ 31 32 #include "opt_ifpoll.h" 33 #include "opt_inet.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/endian.h> 40 #include <sys/in_cksum.h> 41 #include <sys/sockio.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/module.h> 46 #include <sys/serialize.h> 47 #include <sys/socket.h> 48 #include <sys/sysctl.h> 49 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/ifq_var.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_poll.h> 57 58 #include <net/bpf.h> 59 60 #include <net/if_types.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/zlib.h> 63 #include <net/toeplitz.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 #include <netinet/tcp.h> 69 70 #include <sys/bus.h> 71 #include <sys/rman.h> 72 73 #include <bus/pci/pcireg.h> 74 #include <bus/pci/pcivar.h> 75 #include <bus/pci/pci_private.h> /* XXX for pci_cfg_restore */ 76 77 #include <vm/vm.h> /* for pmap_mapdev() */ 78 #include <vm/pmap.h> 79 80 #if defined(__i386__) || defined(__x86_64__) 81 #include <machine/specialreg.h> 82 #endif 83 84 #include <dev/netif/mxge/mxge_mcp.h> 85 #include <dev/netif/mxge/mcp_gen_header.h> 86 #include <dev/netif/mxge/if_mxge_var.h> 87 88 #define MXGE_RX_SMALL_BUFLEN (MHLEN - MXGEFW_PAD) 89 #define MXGE_HWRSS_KEYLEN 16 90 91 /* Tunable params */ 92 static int mxge_nvidia_ecrc_enable = 1; 93 static int mxge_force_firmware = 0; 94 static int mxge_intr_coal_delay = MXGE_INTR_COAL_DELAY; 95 static int mxge_deassert_wait = 1; 96 static int mxge_flow_control = 1; 97 static int mxge_ticks; 98 static int mxge_num_slices = 0; 99 static int mxge_always_promisc = 0; 100 static int mxge_throttle = 0; 101 static int mxge_msi_enable = 1; 102 static int mxge_msix_enable = 1; 103 static int mxge_multi_tx = 1; 104 /* 105 * Don't use RSS by default, its just too slow 106 */ 107 static int mxge_use_rss = 0; 108 109 static const char *mxge_fw_unaligned = "mxge_ethp_z8e"; 110 static const char *mxge_fw_aligned = "mxge_eth_z8e"; 111 static const char *mxge_fw_rss_aligned = "mxge_rss_eth_z8e"; 112 static const char *mxge_fw_rss_unaligned = "mxge_rss_ethp_z8e"; 113 114 TUNABLE_INT("hw.mxge.num_slices", &mxge_num_slices); 115 TUNABLE_INT("hw.mxge.flow_control_enabled", &mxge_flow_control); 116 TUNABLE_INT("hw.mxge.intr_coal_delay", &mxge_intr_coal_delay); 117 TUNABLE_INT("hw.mxge.nvidia_ecrc_enable", &mxge_nvidia_ecrc_enable); 118 TUNABLE_INT("hw.mxge.force_firmware", &mxge_force_firmware); 119 TUNABLE_INT("hw.mxge.deassert_wait", &mxge_deassert_wait); 120 TUNABLE_INT("hw.mxge.ticks", &mxge_ticks); 121 TUNABLE_INT("hw.mxge.always_promisc", &mxge_always_promisc); 122 TUNABLE_INT("hw.mxge.throttle", &mxge_throttle); 123 TUNABLE_INT("hw.mxge.multi_tx", &mxge_multi_tx); 124 TUNABLE_INT("hw.mxge.use_rss", &mxge_use_rss); 125 TUNABLE_INT("hw.mxge.msi.enable", &mxge_msi_enable); 126 TUNABLE_INT("hw.mxge.msix.enable", &mxge_msix_enable); 127 128 static int mxge_probe(device_t dev); 129 static int mxge_attach(device_t dev); 130 static int mxge_detach(device_t dev); 131 static int mxge_shutdown(device_t dev); 132 133 static int mxge_alloc_intr(struct mxge_softc *sc); 134 static void mxge_free_intr(struct mxge_softc *sc); 135 static int mxge_setup_intr(struct mxge_softc *sc); 136 static void mxge_teardown_intr(struct mxge_softc *sc, int cnt); 137 138 static device_method_t mxge_methods[] = { 139 /* Device interface */ 140 DEVMETHOD(device_probe, mxge_probe), 141 DEVMETHOD(device_attach, mxge_attach), 142 DEVMETHOD(device_detach, mxge_detach), 143 DEVMETHOD(device_shutdown, mxge_shutdown), 144 DEVMETHOD_END 145 }; 146 147 static driver_t mxge_driver = { 148 "mxge", 149 mxge_methods, 150 sizeof(mxge_softc_t), 151 }; 152 153 static devclass_t mxge_devclass; 154 155 /* Declare ourselves to be a child of the PCI bus.*/ 156 DRIVER_MODULE(mxge, pci, mxge_driver, mxge_devclass, NULL, NULL); 157 MODULE_DEPEND(mxge, firmware, 1, 1, 1); 158 MODULE_DEPEND(mxge, zlib, 1, 1, 1); 159 160 static int mxge_load_firmware(mxge_softc_t *sc, int adopt); 161 static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data); 162 static void mxge_close(mxge_softc_t *sc, int down); 163 static int mxge_open(mxge_softc_t *sc); 164 static void mxge_tick(void *arg); 165 static void mxge_watchdog_reset(mxge_softc_t *sc); 166 static void mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice); 167 168 static int 169 mxge_probe(device_t dev) 170 { 171 if (pci_get_vendor(dev) == MXGE_PCI_VENDOR_MYRICOM && 172 (pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E || 173 pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E_9)) { 174 int rev = pci_get_revid(dev); 175 176 switch (rev) { 177 case MXGE_PCI_REV_Z8E: 178 device_set_desc(dev, "Myri10G-PCIE-8A"); 179 break; 180 case MXGE_PCI_REV_Z8ES: 181 device_set_desc(dev, "Myri10G-PCIE-8B"); 182 break; 183 default: 184 device_set_desc(dev, "Myri10G-PCIE-8??"); 185 device_printf(dev, "Unrecognized rev %d NIC\n", rev); 186 break; 187 } 188 return 0; 189 } 190 return ENXIO; 191 } 192 193 static void 194 mxge_enable_wc(mxge_softc_t *sc) 195 { 196 #if defined(__i386__) || defined(__x86_64__) 197 vm_offset_t len; 198 199 sc->wc = 1; 200 len = rman_get_size(sc->mem_res); 201 pmap_change_attr((vm_offset_t) sc->sram, len / PAGE_SIZE, 202 PAT_WRITE_COMBINING); 203 #endif 204 } 205 206 static int 207 mxge_dma_alloc(mxge_softc_t *sc, bus_dmamem_t *dma, size_t bytes, 208 bus_size_t alignment) 209 { 210 bus_size_t boundary; 211 int err; 212 213 if (bytes > 4096 && alignment == 4096) 214 boundary = 0; 215 else 216 boundary = 4096; 217 218 err = bus_dmamem_coherent(sc->parent_dmat, alignment, boundary, 219 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, bytes, 220 BUS_DMA_WAITOK | BUS_DMA_ZERO, dma); 221 if (err != 0) { 222 device_printf(sc->dev, "bus_dmamem_coherent failed: %d\n", err); 223 return err; 224 } 225 return 0; 226 } 227 228 static void 229 mxge_dma_free(bus_dmamem_t *dma) 230 { 231 bus_dmamap_unload(dma->dmem_tag, dma->dmem_map); 232 bus_dmamem_free(dma->dmem_tag, dma->dmem_addr, dma->dmem_map); 233 bus_dma_tag_destroy(dma->dmem_tag); 234 } 235 236 /* 237 * The eeprom strings on the lanaiX have the format 238 * SN=x\0 239 * MAC=x:x:x:x:x:x\0 240 * PC=text\0 241 */ 242 static int 243 mxge_parse_strings(mxge_softc_t *sc) 244 { 245 const char *ptr; 246 int i, found_mac, found_sn2; 247 char *endptr; 248 249 ptr = sc->eeprom_strings; 250 found_mac = 0; 251 found_sn2 = 0; 252 while (*ptr != '\0') { 253 if (strncmp(ptr, "MAC=", 4) == 0) { 254 ptr += 4; 255 for (i = 0;;) { 256 sc->mac_addr[i] = strtoul(ptr, &endptr, 16); 257 if (endptr - ptr != 2) 258 goto abort; 259 ptr = endptr; 260 if (++i == 6) 261 break; 262 if (*ptr++ != ':') 263 goto abort; 264 } 265 found_mac = 1; 266 } else if (strncmp(ptr, "PC=", 3) == 0) { 267 ptr += 3; 268 strlcpy(sc->product_code_string, ptr, 269 sizeof(sc->product_code_string)); 270 } else if (!found_sn2 && (strncmp(ptr, "SN=", 3) == 0)) { 271 ptr += 3; 272 strlcpy(sc->serial_number_string, ptr, 273 sizeof(sc->serial_number_string)); 274 } else if (strncmp(ptr, "SN2=", 4) == 0) { 275 /* SN2 takes precedence over SN */ 276 ptr += 4; 277 found_sn2 = 1; 278 strlcpy(sc->serial_number_string, ptr, 279 sizeof(sc->serial_number_string)); 280 } 281 while (*ptr++ != '\0') {} 282 } 283 284 if (found_mac) 285 return 0; 286 287 abort: 288 device_printf(sc->dev, "failed to parse eeprom_strings\n"); 289 return ENXIO; 290 } 291 292 #if defined(__i386__) || defined(__x86_64__) 293 294 static void 295 mxge_enable_nvidia_ecrc(mxge_softc_t *sc) 296 { 297 uint32_t val; 298 unsigned long base, off; 299 char *va, *cfgptr; 300 device_t pdev, mcp55; 301 uint16_t vendor_id, device_id, word; 302 uintptr_t bus, slot, func, ivend, idev; 303 uint32_t *ptr32; 304 305 if (!mxge_nvidia_ecrc_enable) 306 return; 307 308 pdev = device_get_parent(device_get_parent(sc->dev)); 309 if (pdev == NULL) { 310 device_printf(sc->dev, "could not find parent?\n"); 311 return; 312 } 313 vendor_id = pci_read_config(pdev, PCIR_VENDOR, 2); 314 device_id = pci_read_config(pdev, PCIR_DEVICE, 2); 315 316 if (vendor_id != 0x10de) 317 return; 318 319 base = 0; 320 321 if (device_id == 0x005d) { 322 /* ck804, base address is magic */ 323 base = 0xe0000000UL; 324 } else if (device_id >= 0x0374 && device_id <= 0x378) { 325 /* mcp55, base address stored in chipset */ 326 mcp55 = pci_find_bsf(0, 0, 0); 327 if (mcp55 && 328 0x10de == pci_read_config(mcp55, PCIR_VENDOR, 2) && 329 0x0369 == pci_read_config(mcp55, PCIR_DEVICE, 2)) { 330 word = pci_read_config(mcp55, 0x90, 2); 331 base = ((unsigned long)word & 0x7ffeU) << 25; 332 } 333 } 334 if (!base) 335 return; 336 337 /* 338 * XXXX 339 * Test below is commented because it is believed that doing 340 * config read/write beyond 0xff will access the config space 341 * for the next larger function. Uncomment this and remove 342 * the hacky pmap_mapdev() way of accessing config space when 343 * DragonFly grows support for extended pcie config space access. 344 */ 345 #if 0 346 /* 347 * See if we can, by some miracle, access the extended 348 * config space 349 */ 350 val = pci_read_config(pdev, 0x178, 4); 351 if (val != 0xffffffff) { 352 val |= 0x40; 353 pci_write_config(pdev, 0x178, val, 4); 354 return; 355 } 356 #endif 357 /* 358 * Rather than using normal pci config space writes, we must 359 * map the Nvidia config space ourselves. This is because on 360 * opteron/nvidia class machine the 0xe000000 mapping is 361 * handled by the nvidia chipset, that means the internal PCI 362 * device (the on-chip northbridge), or the amd-8131 bridge 363 * and things behind them are not visible by this method. 364 */ 365 366 BUS_READ_IVAR(device_get_parent(pdev), pdev, 367 PCI_IVAR_BUS, &bus); 368 BUS_READ_IVAR(device_get_parent(pdev), pdev, 369 PCI_IVAR_SLOT, &slot); 370 BUS_READ_IVAR(device_get_parent(pdev), pdev, 371 PCI_IVAR_FUNCTION, &func); 372 BUS_READ_IVAR(device_get_parent(pdev), pdev, 373 PCI_IVAR_VENDOR, &ivend); 374 BUS_READ_IVAR(device_get_parent(pdev), pdev, 375 PCI_IVAR_DEVICE, &idev); 376 377 off = base + 0x00100000UL * (unsigned long)bus + 378 0x00001000UL * (unsigned long)(func + 8 * slot); 379 380 /* map it into the kernel */ 381 va = pmap_mapdev(trunc_page((vm_paddr_t)off), PAGE_SIZE); 382 if (va == NULL) { 383 device_printf(sc->dev, "pmap_kenter_temporary didn't\n"); 384 return; 385 } 386 /* get a pointer to the config space mapped into the kernel */ 387 cfgptr = va + (off & PAGE_MASK); 388 389 /* make sure that we can really access it */ 390 vendor_id = *(uint16_t *)(cfgptr + PCIR_VENDOR); 391 device_id = *(uint16_t *)(cfgptr + PCIR_DEVICE); 392 if (!(vendor_id == ivend && device_id == idev)) { 393 device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n", 394 vendor_id, device_id); 395 pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); 396 return; 397 } 398 399 ptr32 = (uint32_t*)(cfgptr + 0x178); 400 val = *ptr32; 401 402 if (val == 0xffffffff) { 403 device_printf(sc->dev, "extended mapping failed\n"); 404 pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); 405 return; 406 } 407 *ptr32 = val | 0x40; 408 pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); 409 if (bootverbose) { 410 device_printf(sc->dev, "Enabled ECRC on upstream " 411 "Nvidia bridge at %d:%d:%d\n", 412 (int)bus, (int)slot, (int)func); 413 } 414 } 415 416 #else /* __i386__ || __x86_64__ */ 417 418 static void 419 mxge_enable_nvidia_ecrc(mxge_softc_t *sc) 420 { 421 device_printf(sc->dev, "Nforce 4 chipset on non-x86/x86_64!?!?!\n"); 422 } 423 424 #endif 425 426 static int 427 mxge_dma_test(mxge_softc_t *sc, int test_type) 428 { 429 mxge_cmd_t cmd; 430 bus_addr_t dmatest_bus = sc->dmabench_dma.dmem_busaddr; 431 int status; 432 uint32_t len; 433 const char *test = " "; 434 435 /* 436 * Run a small DMA test. 437 * The magic multipliers to the length tell the firmware 438 * to do DMA read, write, or read+write tests. The 439 * results are returned in cmd.data0. The upper 16 440 * bits of the return is the number of transfers completed. 441 * The lower 16 bits is the time in 0.5us ticks that the 442 * transfers took to complete. 443 */ 444 445 len = sc->tx_boundary; 446 447 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); 448 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); 449 cmd.data2 = len * 0x10000; 450 status = mxge_send_cmd(sc, test_type, &cmd); 451 if (status != 0) { 452 test = "read"; 453 goto abort; 454 } 455 sc->read_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); 456 457 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); 458 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); 459 cmd.data2 = len * 0x1; 460 status = mxge_send_cmd(sc, test_type, &cmd); 461 if (status != 0) { 462 test = "write"; 463 goto abort; 464 } 465 sc->write_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); 466 467 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); 468 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); 469 cmd.data2 = len * 0x10001; 470 status = mxge_send_cmd(sc, test_type, &cmd); 471 if (status != 0) { 472 test = "read/write"; 473 goto abort; 474 } 475 sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) / 476 (cmd.data0 & 0xffff); 477 478 abort: 479 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) { 480 device_printf(sc->dev, "DMA %s benchmark failed: %d\n", 481 test, status); 482 } 483 return status; 484 } 485 486 /* 487 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput 488 * when the PCI-E Completion packets are aligned on an 8-byte 489 * boundary. Some PCI-E chip sets always align Completion packets; on 490 * the ones that do not, the alignment can be enforced by enabling 491 * ECRC generation (if supported). 492 * 493 * When PCI-E Completion packets are not aligned, it is actually more 494 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. 495 * 496 * If the driver can neither enable ECRC nor verify that it has 497 * already been enabled, then it must use a firmware image which works 498 * around unaligned completion packets (ethp_z8e.dat), and it should 499 * also ensure that it never gives the device a Read-DMA which is 500 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 501 * enabled, then the driver should use the aligned (eth_z8e.dat) 502 * firmware image, and set tx_boundary to 4KB. 503 */ 504 static int 505 mxge_firmware_probe(mxge_softc_t *sc) 506 { 507 device_t dev = sc->dev; 508 int reg, status; 509 uint16_t pectl; 510 511 sc->tx_boundary = 4096; 512 513 /* 514 * Verify the max read request size was set to 4KB 515 * before trying the test with 4KB. 516 */ 517 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 518 pectl = pci_read_config(dev, reg + 0x8, 2); 519 if ((pectl & (5 << 12)) != (5 << 12)) { 520 device_printf(dev, "Max Read Req. size != 4k (0x%x)\n", 521 pectl); 522 sc->tx_boundary = 2048; 523 } 524 } 525 526 /* 527 * Load the optimized firmware (which assumes aligned PCIe 528 * completions) in order to see if it works on this host. 529 */ 530 sc->fw_name = mxge_fw_aligned; 531 status = mxge_load_firmware(sc, 1); 532 if (status != 0) 533 return status; 534 535 /* 536 * Enable ECRC if possible 537 */ 538 mxge_enable_nvidia_ecrc(sc); 539 540 /* 541 * Run a DMA test which watches for unaligned completions and 542 * aborts on the first one seen. Not required on Z8ES or newer. 543 */ 544 if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES) 545 return 0; 546 547 status = mxge_dma_test(sc, MXGEFW_CMD_UNALIGNED_TEST); 548 if (status == 0) 549 return 0; /* keep the aligned firmware */ 550 551 if (status != E2BIG) 552 device_printf(dev, "DMA test failed: %d\n", status); 553 if (status == ENOSYS) { 554 device_printf(dev, "Falling back to ethp! " 555 "Please install up to date fw\n"); 556 } 557 return status; 558 } 559 560 static int 561 mxge_select_firmware(mxge_softc_t *sc) 562 { 563 int aligned = 0; 564 int force_firmware = mxge_force_firmware; 565 566 if (sc->throttle) 567 force_firmware = sc->throttle; 568 569 if (force_firmware != 0) { 570 if (force_firmware == 1) 571 aligned = 1; 572 else 573 aligned = 0; 574 if (bootverbose) { 575 device_printf(sc->dev, 576 "Assuming %s completions (forced)\n", 577 aligned ? "aligned" : "unaligned"); 578 } 579 goto abort; 580 } 581 582 /* 583 * If the PCIe link width is 4 or less, we can use the aligned 584 * firmware and skip any checks 585 */ 586 if (sc->link_width != 0 && sc->link_width <= 4) { 587 device_printf(sc->dev, "PCIe x%d Link, " 588 "expect reduced performance\n", sc->link_width); 589 aligned = 1; 590 goto abort; 591 } 592 593 if (mxge_firmware_probe(sc) == 0) 594 return 0; 595 596 abort: 597 if (aligned) { 598 sc->fw_name = mxge_fw_aligned; 599 sc->tx_boundary = 4096; 600 } else { 601 sc->fw_name = mxge_fw_unaligned; 602 sc->tx_boundary = 2048; 603 } 604 return mxge_load_firmware(sc, 0); 605 } 606 607 static int 608 mxge_validate_firmware(mxge_softc_t *sc, const mcp_gen_header_t *hdr) 609 { 610 if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) { 611 if_printf(sc->ifp, "Bad firmware type: 0x%x\n", 612 be32toh(hdr->mcp_type)); 613 return EIO; 614 } 615 616 /* Save firmware version for sysctl */ 617 strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version)); 618 if (bootverbose) 619 if_printf(sc->ifp, "firmware id: %s\n", hdr->version); 620 621 ksscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major, 622 &sc->fw_ver_minor, &sc->fw_ver_tiny); 623 624 if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR && 625 sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) { 626 if_printf(sc->ifp, "Found firmware version %s\n", 627 sc->fw_version); 628 if_printf(sc->ifp, "Driver needs %d.%d\n", 629 MXGEFW_VERSION_MAJOR, MXGEFW_VERSION_MINOR); 630 return EINVAL; 631 } 632 return 0; 633 } 634 635 static void * 636 z_alloc(void *nil, u_int items, u_int size) 637 { 638 return kmalloc(items * size, M_TEMP, M_WAITOK); 639 } 640 641 static void 642 z_free(void *nil, void *ptr) 643 { 644 kfree(ptr, M_TEMP); 645 } 646 647 static int 648 mxge_load_firmware_helper(mxge_softc_t *sc, uint32_t *limit) 649 { 650 z_stream zs; 651 char *inflate_buffer; 652 const struct firmware *fw; 653 const mcp_gen_header_t *hdr; 654 unsigned hdr_offset; 655 int status; 656 unsigned int i; 657 char dummy; 658 size_t fw_len; 659 660 fw = firmware_get(sc->fw_name); 661 if (fw == NULL) { 662 if_printf(sc->ifp, "Could not find firmware image %s\n", 663 sc->fw_name); 664 return ENOENT; 665 } 666 667 /* Setup zlib and decompress f/w */ 668 bzero(&zs, sizeof(zs)); 669 zs.zalloc = z_alloc; 670 zs.zfree = z_free; 671 status = inflateInit(&zs); 672 if (status != Z_OK) { 673 status = EIO; 674 goto abort_with_fw; 675 } 676 677 /* 678 * The uncompressed size is stored as the firmware version, 679 * which would otherwise go unused 680 */ 681 fw_len = (size_t)fw->version; 682 inflate_buffer = kmalloc(fw_len, M_TEMP, M_WAITOK); 683 zs.avail_in = fw->datasize; 684 zs.next_in = __DECONST(char *, fw->data); 685 zs.avail_out = fw_len; 686 zs.next_out = inflate_buffer; 687 status = inflate(&zs, Z_FINISH); 688 if (status != Z_STREAM_END) { 689 if_printf(sc->ifp, "zlib %d\n", status); 690 status = EIO; 691 goto abort_with_buffer; 692 } 693 694 /* Check id */ 695 hdr_offset = 696 htobe32(*(const uint32_t *)(inflate_buffer + MCP_HEADER_PTR_OFFSET)); 697 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw_len) { 698 if_printf(sc->ifp, "Bad firmware file"); 699 status = EIO; 700 goto abort_with_buffer; 701 } 702 hdr = (const void*)(inflate_buffer + hdr_offset); 703 704 status = mxge_validate_firmware(sc, hdr); 705 if (status != 0) 706 goto abort_with_buffer; 707 708 /* Copy the inflated firmware to NIC SRAM. */ 709 for (i = 0; i < fw_len; i += 256) { 710 mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i, inflate_buffer + i, 711 min(256U, (unsigned)(fw_len - i))); 712 wmb(); 713 dummy = *sc->sram; 714 wmb(); 715 } 716 717 *limit = fw_len; 718 status = 0; 719 abort_with_buffer: 720 kfree(inflate_buffer, M_TEMP); 721 inflateEnd(&zs); 722 abort_with_fw: 723 firmware_put(fw, FIRMWARE_UNLOAD); 724 return status; 725 } 726 727 /* 728 * Enable or disable periodic RDMAs from the host to make certain 729 * chipsets resend dropped PCIe messages 730 */ 731 static void 732 mxge_dummy_rdma(mxge_softc_t *sc, int enable) 733 { 734 char buf_bytes[72]; 735 volatile uint32_t *confirm; 736 volatile char *submit; 737 uint32_t *buf, dma_low, dma_high; 738 int i; 739 740 buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); 741 742 /* Clear confirmation addr */ 743 confirm = (volatile uint32_t *)sc->cmd; 744 *confirm = 0; 745 wmb(); 746 747 /* 748 * Send an rdma command to the PCIe engine, and wait for the 749 * response in the confirmation address. The firmware should 750 * write a -1 there to indicate it is alive and well 751 */ 752 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.dmem_busaddr); 753 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.dmem_busaddr); 754 buf[0] = htobe32(dma_high); /* confirm addr MSW */ 755 buf[1] = htobe32(dma_low); /* confirm addr LSW */ 756 buf[2] = htobe32(0xffffffff); /* confirm data */ 757 dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.dmem_busaddr); 758 dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.dmem_busaddr); 759 buf[3] = htobe32(dma_high); /* dummy addr MSW */ 760 buf[4] = htobe32(dma_low); /* dummy addr LSW */ 761 buf[5] = htobe32(enable); /* enable? */ 762 763 submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA); 764 765 mxge_pio_copy(submit, buf, 64); 766 wmb(); 767 DELAY(1000); 768 wmb(); 769 i = 0; 770 while (*confirm != 0xffffffff && i < 20) { 771 DELAY(1000); 772 i++; 773 } 774 if (*confirm != 0xffffffff) { 775 if_printf(sc->ifp, "dummy rdma %s failed (%p = 0x%x)", 776 (enable ? "enable" : "disable"), confirm, *confirm); 777 } 778 } 779 780 static int 781 mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data) 782 { 783 mcp_cmd_t *buf; 784 char buf_bytes[sizeof(*buf) + 8]; 785 volatile mcp_cmd_response_t *response = sc->cmd; 786 volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD; 787 uint32_t dma_low, dma_high; 788 int err, sleep_total = 0; 789 790 /* Ensure buf is aligned to 8 bytes */ 791 buf = (mcp_cmd_t *)((unsigned long)(buf_bytes + 7) & ~7UL); 792 793 buf->data0 = htobe32(data->data0); 794 buf->data1 = htobe32(data->data1); 795 buf->data2 = htobe32(data->data2); 796 buf->cmd = htobe32(cmd); 797 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.dmem_busaddr); 798 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.dmem_busaddr); 799 800 buf->response_addr.low = htobe32(dma_low); 801 buf->response_addr.high = htobe32(dma_high); 802 803 response->result = 0xffffffff; 804 wmb(); 805 mxge_pio_copy((volatile void *)cmd_addr, buf, sizeof (*buf)); 806 807 /* 808 * Wait up to 20ms 809 */ 810 err = EAGAIN; 811 for (sleep_total = 0; sleep_total < 20; sleep_total++) { 812 wmb(); 813 switch (be32toh(response->result)) { 814 case 0: 815 data->data0 = be32toh(response->data); 816 err = 0; 817 break; 818 case 0xffffffff: 819 DELAY(1000); 820 break; 821 case MXGEFW_CMD_UNKNOWN: 822 err = ENOSYS; 823 break; 824 case MXGEFW_CMD_ERROR_UNALIGNED: 825 err = E2BIG; 826 break; 827 case MXGEFW_CMD_ERROR_BUSY: 828 err = EBUSY; 829 break; 830 case MXGEFW_CMD_ERROR_I2C_ABSENT: 831 err = ENXIO; 832 break; 833 default: 834 if_printf(sc->ifp, "command %d failed, result = %d\n", 835 cmd, be32toh(response->result)); 836 err = ENXIO; 837 break; 838 } 839 if (err != EAGAIN) 840 break; 841 } 842 if (err == EAGAIN) { 843 if_printf(sc->ifp, "command %d timed out result = %d\n", 844 cmd, be32toh(response->result)); 845 } 846 return err; 847 } 848 849 static int 850 mxge_adopt_running_firmware(mxge_softc_t *sc) 851 { 852 struct mcp_gen_header *hdr; 853 const size_t bytes = sizeof(struct mcp_gen_header); 854 size_t hdr_offset; 855 int status; 856 857 /* 858 * Find running firmware header 859 */ 860 hdr_offset = 861 htobe32(*(volatile uint32_t *)(sc->sram + MCP_HEADER_PTR_OFFSET)); 862 863 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) { 864 if_printf(sc->ifp, "Running firmware has bad header offset " 865 "(%zu)\n", hdr_offset); 866 return EIO; 867 } 868 869 /* 870 * Copy header of running firmware from SRAM to host memory to 871 * validate firmware 872 */ 873 hdr = kmalloc(bytes, M_DEVBUF, M_WAITOK); 874 bus_space_read_region_1(rman_get_bustag(sc->mem_res), 875 rman_get_bushandle(sc->mem_res), hdr_offset, (char *)hdr, bytes); 876 status = mxge_validate_firmware(sc, hdr); 877 kfree(hdr, M_DEVBUF); 878 879 /* 880 * Check to see if adopted firmware has bug where adopting 881 * it will cause broadcasts to be filtered unless the NIC 882 * is kept in ALLMULTI mode 883 */ 884 if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && 885 sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) { 886 sc->adopted_rx_filter_bug = 1; 887 if_printf(sc->ifp, "Adopting fw %d.%d.%d: " 888 "working around rx filter bug\n", 889 sc->fw_ver_major, sc->fw_ver_minor, sc->fw_ver_tiny); 890 } 891 892 return status; 893 } 894 895 static int 896 mxge_load_firmware(mxge_softc_t *sc, int adopt) 897 { 898 volatile uint32_t *confirm; 899 volatile char *submit; 900 char buf_bytes[72]; 901 uint32_t *buf, size, dma_low, dma_high; 902 int status, i; 903 904 buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); 905 906 size = sc->sram_size; 907 status = mxge_load_firmware_helper(sc, &size); 908 if (status) { 909 if (!adopt) 910 return status; 911 912 /* 913 * Try to use the currently running firmware, if 914 * it is new enough 915 */ 916 status = mxge_adopt_running_firmware(sc); 917 if (status) { 918 if_printf(sc->ifp, 919 "failed to adopt running firmware\n"); 920 return status; 921 } 922 if_printf(sc->ifp, "Successfully adopted running firmware\n"); 923 924 if (sc->tx_boundary == 4096) { 925 if_printf(sc->ifp, 926 "Using firmware currently running on NIC. " 927 "For optimal\n"); 928 if_printf(sc->ifp, "performance consider loading " 929 "optimized firmware\n"); 930 } 931 sc->fw_name = mxge_fw_unaligned; 932 sc->tx_boundary = 2048; 933 return 0; 934 } 935 936 /* Clear confirmation addr */ 937 confirm = (volatile uint32_t *)sc->cmd; 938 *confirm = 0; 939 wmb(); 940 941 /* 942 * Send a reload command to the bootstrap MCP, and wait for the 943 * response in the confirmation address. The firmware should 944 * write a -1 there to indicate it is alive and well 945 */ 946 947 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.dmem_busaddr); 948 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.dmem_busaddr); 949 950 buf[0] = htobe32(dma_high); /* confirm addr MSW */ 951 buf[1] = htobe32(dma_low); /* confirm addr LSW */ 952 buf[2] = htobe32(0xffffffff); /* confirm data */ 953 954 /* 955 * FIX: All newest firmware should un-protect the bottom of 956 * the sram before handoff. However, the very first interfaces 957 * do not. Therefore the handoff copy must skip the first 8 bytes 958 */ 959 /* where the code starts*/ 960 buf[3] = htobe32(MXGE_FW_OFFSET + 8); 961 buf[4] = htobe32(size - 8); /* length of code */ 962 buf[5] = htobe32(8); /* where to copy to */ 963 buf[6] = htobe32(0); /* where to jump to */ 964 965 submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF); 966 mxge_pio_copy(submit, buf, 64); 967 wmb(); 968 DELAY(1000); 969 wmb(); 970 i = 0; 971 while (*confirm != 0xffffffff && i < 20) { 972 DELAY(1000*10); 973 i++; 974 } 975 if (*confirm != 0xffffffff) { 976 if_printf(sc->ifp,"handoff failed (%p = 0x%x)", 977 confirm, *confirm); 978 return ENXIO; 979 } 980 return 0; 981 } 982 983 static int 984 mxge_update_mac_address(mxge_softc_t *sc) 985 { 986 mxge_cmd_t cmd; 987 uint8_t *addr = sc->mac_addr; 988 989 cmd.data0 = (addr[0] << 24) | (addr[1] << 16) | 990 (addr[2] << 8) | addr[3]; 991 cmd.data1 = (addr[4] << 8) | (addr[5]); 992 return mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd); 993 } 994 995 static int 996 mxge_change_pause(mxge_softc_t *sc, int pause) 997 { 998 mxge_cmd_t cmd; 999 int status; 1000 1001 if (pause) 1002 status = mxge_send_cmd(sc, MXGEFW_ENABLE_FLOW_CONTROL, &cmd); 1003 else 1004 status = mxge_send_cmd(sc, MXGEFW_DISABLE_FLOW_CONTROL, &cmd); 1005 if (status) { 1006 if_printf(sc->ifp, "Failed to set flow control mode\n"); 1007 return ENXIO; 1008 } 1009 sc->pause = pause; 1010 return 0; 1011 } 1012 1013 static void 1014 mxge_change_promisc(mxge_softc_t *sc, int promisc) 1015 { 1016 mxge_cmd_t cmd; 1017 int status; 1018 1019 if (mxge_always_promisc) 1020 promisc = 1; 1021 1022 if (promisc) 1023 status = mxge_send_cmd(sc, MXGEFW_ENABLE_PROMISC, &cmd); 1024 else 1025 status = mxge_send_cmd(sc, MXGEFW_DISABLE_PROMISC, &cmd); 1026 if (status) 1027 if_printf(sc->ifp, "Failed to set promisc mode\n"); 1028 } 1029 1030 static void 1031 mxge_set_multicast_list(mxge_softc_t *sc) 1032 { 1033 mxge_cmd_t cmd; 1034 struct ifmultiaddr *ifma; 1035 struct ifnet *ifp = sc->ifp; 1036 int err; 1037 1038 /* This firmware is known to not support multicast */ 1039 if (!sc->fw_multicast_support) 1040 return; 1041 1042 /* Disable multicast filtering while we play with the lists*/ 1043 err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd); 1044 if (err != 0) { 1045 if_printf(ifp, "Failed MXGEFW_ENABLE_ALLMULTI, " 1046 "error status: %d\n", err); 1047 return; 1048 } 1049 1050 if (sc->adopted_rx_filter_bug) 1051 return; 1052 1053 if (ifp->if_flags & IFF_ALLMULTI) { 1054 /* Request to disable multicast filtering, so quit here */ 1055 return; 1056 } 1057 1058 /* Flush all the filters */ 1059 err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd); 1060 if (err != 0) { 1061 if_printf(ifp, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, " 1062 "error status: %d\n", err); 1063 return; 1064 } 1065 1066 /* 1067 * Walk the multicast list, and add each address 1068 */ 1069 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1070 if (ifma->ifma_addr->sa_family != AF_LINK) 1071 continue; 1072 1073 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1074 &cmd.data0, 4); 1075 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr) + 4, 1076 &cmd.data1, 2); 1077 cmd.data0 = htonl(cmd.data0); 1078 cmd.data1 = htonl(cmd.data1); 1079 err = mxge_send_cmd(sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd); 1080 if (err != 0) { 1081 if_printf(ifp, "Failed MXGEFW_JOIN_MULTICAST_GROUP, " 1082 "error status: %d\n", err); 1083 /* Abort, leaving multicast filtering off */ 1084 return; 1085 } 1086 } 1087 1088 /* Enable multicast filtering */ 1089 err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd); 1090 if (err != 0) { 1091 if_printf(ifp, "Failed MXGEFW_DISABLE_ALLMULTI, " 1092 "error status: %d\n", err); 1093 } 1094 } 1095 1096 #if 0 1097 static int 1098 mxge_max_mtu(mxge_softc_t *sc) 1099 { 1100 mxge_cmd_t cmd; 1101 int status; 1102 1103 if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU) 1104 return MXGEFW_MAX_MTU - MXGEFW_PAD; 1105 1106 /* try to set nbufs to see if it we can 1107 use virtually contiguous jumbos */ 1108 cmd.data0 = 0; 1109 status = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, 1110 &cmd); 1111 if (status == 0) 1112 return MXGEFW_MAX_MTU - MXGEFW_PAD; 1113 1114 /* otherwise, we're limited to MJUMPAGESIZE */ 1115 return MJUMPAGESIZE - MXGEFW_PAD; 1116 } 1117 #endif 1118 1119 static int 1120 mxge_reset(mxge_softc_t *sc, int interrupts_setup) 1121 { 1122 struct mxge_slice_state *ss; 1123 mxge_rx_done_t *rx_done; 1124 volatile uint32_t *irq_claim; 1125 mxge_cmd_t cmd; 1126 int slice, status, rx_intr_size; 1127 1128 /* 1129 * Try to send a reset command to the card to see if it 1130 * is alive 1131 */ 1132 memset(&cmd, 0, sizeof (cmd)); 1133 status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); 1134 if (status != 0) { 1135 if_printf(sc->ifp, "failed reset\n"); 1136 return ENXIO; 1137 } 1138 1139 mxge_dummy_rdma(sc, 1); 1140 1141 /* 1142 * Set the intrq size 1143 * XXX assume 4byte mcp_slot 1144 */ 1145 rx_intr_size = sc->rx_intr_slots * sizeof(mcp_slot_t); 1146 cmd.data0 = rx_intr_size; 1147 status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); 1148 1149 /* 1150 * Even though we already know how many slices are supported 1151 * via mxge_slice_probe(), MXGEFW_CMD_GET_MAX_RSS_QUEUES 1152 * has magic side effects, and must be called after a reset. 1153 * It must be called prior to calling any RSS related cmds, 1154 * including assigning an interrupt queue for anything but 1155 * slice 0. It must also be called *after* 1156 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by 1157 * the firmware to compute offsets. 1158 */ 1159 if (sc->num_slices > 1) { 1160 /* Ask the maximum number of slices it supports */ 1161 status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); 1162 if (status != 0) { 1163 if_printf(sc->ifp, "failed to get number of slices\n"); 1164 return status; 1165 } 1166 1167 /* 1168 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior 1169 * to setting up the interrupt queue DMA 1170 */ 1171 cmd.data0 = sc->num_slices; 1172 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; 1173 if (sc->num_tx_rings > 1) 1174 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; 1175 status = mxge_send_cmd(sc, MXGEFW_CMD_ENABLE_RSS_QUEUES, &cmd); 1176 if (status != 0) { 1177 if_printf(sc->ifp, "failed to set number of slices\n"); 1178 return status; 1179 } 1180 } 1181 1182 if (interrupts_setup) { 1183 /* Now exchange information about interrupts */ 1184 for (slice = 0; slice < sc->num_slices; slice++) { 1185 ss = &sc->ss[slice]; 1186 1187 rx_done = &ss->rx_data.rx_done; 1188 memset(rx_done->entry, 0, rx_intr_size); 1189 1190 cmd.data0 = 1191 MXGE_LOWPART_TO_U32(ss->rx_done_dma.dmem_busaddr); 1192 cmd.data1 = 1193 MXGE_HIGHPART_TO_U32(ss->rx_done_dma.dmem_busaddr); 1194 cmd.data2 = slice; 1195 status |= mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_DMA, 1196 &cmd); 1197 } 1198 } 1199 1200 status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, 1201 &cmd); 1202 sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0); 1203 1204 status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd); 1205 irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0); 1206 1207 status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd); 1208 sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0); 1209 1210 if (status != 0) { 1211 if_printf(sc->ifp, "failed set interrupt parameters\n"); 1212 return status; 1213 } 1214 1215 *sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay); 1216 1217 /* Run a DMA benchmark */ 1218 mxge_dma_test(sc, MXGEFW_DMA_TEST); 1219 1220 for (slice = 0; slice < sc->num_slices; slice++) { 1221 ss = &sc->ss[slice]; 1222 1223 ss->irq_claim = irq_claim + (2 * slice); 1224 1225 /* Reset mcp/driver shared state back to 0 */ 1226 ss->rx_data.rx_done.idx = 0; 1227 ss->tx.req = 0; 1228 ss->tx.done = 0; 1229 ss->tx.pkt_done = 0; 1230 ss->tx.queue_active = 0; 1231 ss->tx.activate = 0; 1232 ss->tx.deactivate = 0; 1233 ss->rx_data.rx_big.cnt = 0; 1234 ss->rx_data.rx_small.cnt = 0; 1235 if (ss->fw_stats != NULL) 1236 bzero(ss->fw_stats, sizeof(*ss->fw_stats)); 1237 } 1238 sc->rdma_tags_available = 15; 1239 1240 status = mxge_update_mac_address(sc); 1241 mxge_change_promisc(sc, sc->ifp->if_flags & IFF_PROMISC); 1242 mxge_change_pause(sc, sc->pause); 1243 mxge_set_multicast_list(sc); 1244 1245 if (sc->throttle) { 1246 cmd.data0 = sc->throttle; 1247 if (mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd)) 1248 if_printf(sc->ifp, "can't enable throttle\n"); 1249 } 1250 return status; 1251 } 1252 1253 static int 1254 mxge_change_throttle(SYSCTL_HANDLER_ARGS) 1255 { 1256 mxge_cmd_t cmd; 1257 mxge_softc_t *sc; 1258 int err; 1259 unsigned int throttle; 1260 1261 sc = arg1; 1262 throttle = sc->throttle; 1263 err = sysctl_handle_int(oidp, &throttle, arg2, req); 1264 if (err != 0) 1265 return err; 1266 1267 if (throttle == sc->throttle) 1268 return 0; 1269 1270 if (throttle < MXGE_MIN_THROTTLE || throttle > MXGE_MAX_THROTTLE) 1271 return EINVAL; 1272 1273 ifnet_serialize_all(sc->ifp); 1274 1275 cmd.data0 = throttle; 1276 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd); 1277 if (err == 0) 1278 sc->throttle = throttle; 1279 1280 ifnet_deserialize_all(sc->ifp); 1281 return err; 1282 } 1283 1284 static int 1285 mxge_change_use_rss(SYSCTL_HANDLER_ARGS) 1286 { 1287 mxge_softc_t *sc; 1288 int err, use_rss; 1289 1290 sc = arg1; 1291 use_rss = sc->use_rss; 1292 err = sysctl_handle_int(oidp, &use_rss, arg2, req); 1293 if (err != 0) 1294 return err; 1295 1296 if (use_rss == sc->use_rss) 1297 return 0; 1298 1299 ifnet_serialize_all(sc->ifp); 1300 1301 sc->use_rss = use_rss; 1302 if (sc->ifp->if_flags & IFF_RUNNING) { 1303 mxge_close(sc, 0); 1304 mxge_open(sc); 1305 } 1306 1307 ifnet_deserialize_all(sc->ifp); 1308 return err; 1309 } 1310 1311 static int 1312 mxge_change_intr_coal(SYSCTL_HANDLER_ARGS) 1313 { 1314 mxge_softc_t *sc; 1315 unsigned int intr_coal_delay; 1316 int err; 1317 1318 sc = arg1; 1319 intr_coal_delay = sc->intr_coal_delay; 1320 err = sysctl_handle_int(oidp, &intr_coal_delay, arg2, req); 1321 if (err != 0) 1322 return err; 1323 1324 if (intr_coal_delay == sc->intr_coal_delay) 1325 return 0; 1326 1327 if (intr_coal_delay == 0 || intr_coal_delay > 1000*1000) 1328 return EINVAL; 1329 1330 ifnet_serialize_all(sc->ifp); 1331 1332 *sc->intr_coal_delay_ptr = htobe32(intr_coal_delay); 1333 sc->intr_coal_delay = intr_coal_delay; 1334 1335 ifnet_deserialize_all(sc->ifp); 1336 return err; 1337 } 1338 1339 static int 1340 mxge_change_flow_control(SYSCTL_HANDLER_ARGS) 1341 { 1342 mxge_softc_t *sc; 1343 unsigned int enabled; 1344 int err; 1345 1346 sc = arg1; 1347 enabled = sc->pause; 1348 err = sysctl_handle_int(oidp, &enabled, arg2, req); 1349 if (err != 0) 1350 return err; 1351 1352 if (enabled == sc->pause) 1353 return 0; 1354 1355 ifnet_serialize_all(sc->ifp); 1356 err = mxge_change_pause(sc, enabled); 1357 ifnet_deserialize_all(sc->ifp); 1358 1359 return err; 1360 } 1361 1362 static int 1363 mxge_handle_be32(SYSCTL_HANDLER_ARGS) 1364 { 1365 int err; 1366 1367 if (arg1 == NULL) 1368 return EFAULT; 1369 arg2 = be32toh(*(int *)arg1); 1370 arg1 = NULL; 1371 err = sysctl_handle_int(oidp, arg1, arg2, req); 1372 1373 return err; 1374 } 1375 1376 static void 1377 mxge_rem_sysctls(mxge_softc_t *sc) 1378 { 1379 if (sc->ss != NULL) { 1380 struct mxge_slice_state *ss; 1381 int slice; 1382 1383 for (slice = 0; slice < sc->num_slices; slice++) { 1384 ss = &sc->ss[slice]; 1385 if (ss->sysctl_tree != NULL) { 1386 sysctl_ctx_free(&ss->sysctl_ctx); 1387 ss->sysctl_tree = NULL; 1388 } 1389 } 1390 } 1391 1392 if (sc->slice_sysctl_tree != NULL) { 1393 sysctl_ctx_free(&sc->slice_sysctl_ctx); 1394 sc->slice_sysctl_tree = NULL; 1395 } 1396 } 1397 1398 static void 1399 mxge_add_sysctls(mxge_softc_t *sc) 1400 { 1401 struct sysctl_ctx_list *ctx; 1402 struct sysctl_oid_list *children; 1403 mcp_irq_data_t *fw; 1404 struct mxge_slice_state *ss; 1405 int slice; 1406 char slice_num[8]; 1407 1408 ctx = device_get_sysctl_ctx(sc->dev); 1409 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1410 fw = sc->ss[0].fw_stats; 1411 1412 /* 1413 * Random information 1414 */ 1415 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 1416 CTLFLAG_RD, &sc->fw_version, 0, "firmware version"); 1417 1418 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "serial_number", 1419 CTLFLAG_RD, &sc->serial_number_string, 0, "serial number"); 1420 1421 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "product_code", 1422 CTLFLAG_RD, &sc->product_code_string, 0, "product code"); 1423 1424 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcie_link_width", 1425 CTLFLAG_RD, &sc->link_width, 0, "link width"); 1426 1427 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_boundary", 1428 CTLFLAG_RD, &sc->tx_boundary, 0, "tx boundary"); 1429 1430 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_combine", 1431 CTLFLAG_RD, &sc->wc, 0, "write combining PIO"); 1432 1433 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_dma_MBs", 1434 CTLFLAG_RD, &sc->read_dma, 0, "DMA Read speed in MB/s"); 1435 1436 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_dma_MBs", 1437 CTLFLAG_RD, &sc->write_dma, 0, "DMA Write speed in MB/s"); 1438 1439 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_write_dma_MBs", 1440 CTLFLAG_RD, &sc->read_write_dma, 0, 1441 "DMA concurrent Read/Write speed in MB/s"); 1442 1443 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "watchdog_resets", 1444 CTLFLAG_RD, &sc->watchdog_resets, 0, 1445 "Number of times NIC was reset"); 1446 1447 /* 1448 * Performance related tunables 1449 */ 1450 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_delay", 1451 CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_intr_coal, "I", 1452 "Interrupt coalescing delay in usecs"); 1453 1454 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "throttle", 1455 CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_throttle, "I", 1456 "Transmit throttling"); 1457 1458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "flow_control_enabled", 1459 CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_flow_control, "I", 1460 "Interrupt coalescing delay in usecs"); 1461 1462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "use_rss", 1463 CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_use_rss, "I", 1464 "Use RSS"); 1465 1466 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "deassert_wait", 1467 CTLFLAG_RW, &mxge_deassert_wait, 0, 1468 "Wait for IRQ line to go low in ihandler"); 1469 1470 /* 1471 * Stats block from firmware is in network byte order. 1472 * Need to swap it 1473 */ 1474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_up", 1475 CTLTYPE_INT|CTLFLAG_RD, &fw->link_up, 0, 1476 mxge_handle_be32, "I", "link up"); 1477 1478 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_tags_available", 1479 CTLTYPE_INT|CTLFLAG_RD, &fw->rdma_tags_available, 0, 1480 mxge_handle_be32, "I", "rdma_tags_available"); 1481 1482 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_crc32", 1483 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_crc32, 0, 1484 mxge_handle_be32, "I", "dropped_bad_crc32"); 1485 1486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_phy", 1487 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_phy, 0, 1488 mxge_handle_be32, "I", "dropped_bad_phy"); 1489 1490 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_error_or_filtered", 1491 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_error_or_filtered, 0, 1492 mxge_handle_be32, "I", "dropped_link_error_or_filtered"); 1493 1494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_overflow", 1495 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_overflow, 0, 1496 mxge_handle_be32, "I", "dropped_link_overflow"); 1497 1498 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_multicast_filtered", 1499 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_multicast_filtered, 0, 1500 mxge_handle_be32, "I", "dropped_multicast_filtered"); 1501 1502 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_big_buffer", 1503 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_big_buffer, 0, 1504 mxge_handle_be32, "I", "dropped_no_big_buffer"); 1505 1506 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_small_buffer", 1507 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_small_buffer, 0, 1508 mxge_handle_be32, "I", "dropped_no_small_buffer"); 1509 1510 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_overrun", 1511 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_overrun, 0, 1512 mxge_handle_be32, "I", "dropped_overrun"); 1513 1514 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_pause", 1515 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_pause, 0, 1516 mxge_handle_be32, "I", "dropped_pause"); 1517 1518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_runt", 1519 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_runt, 0, 1520 mxge_handle_be32, "I", "dropped_runt"); 1521 1522 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_unicast_filtered", 1523 CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_unicast_filtered, 0, 1524 mxge_handle_be32, "I", "dropped_unicast_filtered"); 1525 1526 /* add counters exported for debugging from all slices */ 1527 sysctl_ctx_init(&sc->slice_sysctl_ctx); 1528 sc->slice_sysctl_tree = SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, 1529 children, OID_AUTO, "slice", CTLFLAG_RD, 0, ""); 1530 if (sc->slice_sysctl_tree == NULL) { 1531 device_printf(sc->dev, "can't add slice sysctl node\n"); 1532 return; 1533 } 1534 1535 for (slice = 0; slice < sc->num_slices; slice++) { 1536 ss = &sc->ss[slice]; 1537 sysctl_ctx_init(&ss->sysctl_ctx); 1538 ctx = &ss->sysctl_ctx; 1539 children = SYSCTL_CHILDREN(sc->slice_sysctl_tree); 1540 ksprintf(slice_num, "%d", slice); 1541 ss->sysctl_tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, 1542 slice_num, CTLFLAG_RD, 0, ""); 1543 if (ss->sysctl_tree == NULL) { 1544 device_printf(sc->dev, 1545 "can't add %d slice sysctl node\n", slice); 1546 return; /* XXX continue? */ 1547 } 1548 children = SYSCTL_CHILDREN(ss->sysctl_tree); 1549 1550 /* 1551 * XXX change to ULONG 1552 */ 1553 1554 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_small_cnt", 1555 CTLFLAG_RD, &ss->rx_data.rx_small.cnt, 0, "rx_small_cnt"); 1556 1557 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_big_cnt", 1558 CTLFLAG_RD, &ss->rx_data.rx_big.cnt, 0, "rx_small_cnt"); 1559 1560 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_req", 1561 CTLFLAG_RD, &ss->tx.req, 0, "tx_req"); 1562 1563 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_done", 1564 CTLFLAG_RD, &ss->tx.done, 0, "tx_done"); 1565 1566 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pkt_done", 1567 CTLFLAG_RD, &ss->tx.pkt_done, 0, "tx_done"); 1568 1569 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_queue_active", 1570 CTLFLAG_RD, &ss->tx.queue_active, 0, "tx_queue_active"); 1571 1572 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_activate", 1573 CTLFLAG_RD, &ss->tx.activate, 0, "tx_activate"); 1574 1575 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_deactivate", 1576 CTLFLAG_RD, &ss->tx.deactivate, 0, "tx_deactivate"); 1577 } 1578 } 1579 1580 /* 1581 * Copy an array of mcp_kreq_ether_send_t's to the mcp. Copy 1582 * backwards one at a time and handle ring wraps 1583 */ 1584 static __inline void 1585 mxge_submit_req_backwards(mxge_tx_ring_t *tx, 1586 mcp_kreq_ether_send_t *src, int cnt) 1587 { 1588 int idx, starting_slot; 1589 1590 starting_slot = tx->req; 1591 while (cnt > 1) { 1592 cnt--; 1593 idx = (starting_slot + cnt) & tx->mask; 1594 mxge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); 1595 wmb(); 1596 } 1597 } 1598 1599 /* 1600 * Copy an array of mcp_kreq_ether_send_t's to the mcp. Copy 1601 * at most 32 bytes at a time, so as to avoid involving the software 1602 * pio handler in the nic. We re-write the first segment's flags 1603 * to mark them valid only after writing the entire chain 1604 */ 1605 static __inline void 1606 mxge_submit_req(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src, int cnt) 1607 { 1608 int idx, i; 1609 uint32_t *src_ints; 1610 volatile uint32_t *dst_ints; 1611 mcp_kreq_ether_send_t *srcp; 1612 volatile mcp_kreq_ether_send_t *dstp, *dst; 1613 uint8_t last_flags; 1614 1615 idx = tx->req & tx->mask; 1616 1617 last_flags = src->flags; 1618 src->flags = 0; 1619 wmb(); 1620 dst = dstp = &tx->lanai[idx]; 1621 srcp = src; 1622 1623 if ((idx + cnt) < tx->mask) { 1624 for (i = 0; i < cnt - 1; i += 2) { 1625 mxge_pio_copy(dstp, srcp, 2 * sizeof(*src)); 1626 wmb(); /* force write every 32 bytes */ 1627 srcp += 2; 1628 dstp += 2; 1629 } 1630 } else { 1631 /* 1632 * Submit all but the first request, and ensure 1633 * that it is submitted below 1634 */ 1635 mxge_submit_req_backwards(tx, src, cnt); 1636 i = 0; 1637 } 1638 if (i < cnt) { 1639 /* Submit the first request */ 1640 mxge_pio_copy(dstp, srcp, sizeof(*src)); 1641 wmb(); /* barrier before setting valid flag */ 1642 } 1643 1644 /* Re-write the last 32-bits with the valid flags */ 1645 src->flags = last_flags; 1646 src_ints = (uint32_t *)src; 1647 src_ints+=3; 1648 dst_ints = (volatile uint32_t *)dst; 1649 dst_ints+=3; 1650 *dst_ints = *src_ints; 1651 tx->req += cnt; 1652 wmb(); 1653 } 1654 1655 static int 1656 mxge_pullup_tso(struct mbuf **mp) 1657 { 1658 int hoff, iphlen, thoff; 1659 struct mbuf *m; 1660 1661 m = *mp; 1662 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1663 1664 iphlen = m->m_pkthdr.csum_iphlen; 1665 thoff = m->m_pkthdr.csum_thlen; 1666 hoff = m->m_pkthdr.csum_lhlen; 1667 1668 KASSERT(iphlen > 0, ("invalid ip hlen")); 1669 KASSERT(thoff > 0, ("invalid tcp hlen")); 1670 KASSERT(hoff > 0, ("invalid ether hlen")); 1671 1672 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1673 m = m_pullup(m, hoff + iphlen + thoff); 1674 if (m == NULL) { 1675 *mp = NULL; 1676 return ENOBUFS; 1677 } 1678 *mp = m; 1679 } 1680 return 0; 1681 } 1682 1683 static int 1684 mxge_encap_tso(mxge_tx_ring_t *tx, struct mxge_buffer_state *info_map, 1685 struct mbuf *m, int busdma_seg_cnt) 1686 { 1687 mcp_kreq_ether_send_t *req; 1688 bus_dma_segment_t *seg; 1689 uint32_t low, high_swapped; 1690 int len, seglen, cum_len, cum_len_next; 1691 int next_is_first, chop, cnt, rdma_count, small; 1692 uint16_t pseudo_hdr_offset, cksum_offset, mss; 1693 uint8_t flags, flags_next; 1694 struct mxge_buffer_state *info_last; 1695 bus_dmamap_t map = info_map->map; 1696 1697 mss = m->m_pkthdr.tso_segsz; 1698 1699 /* 1700 * Negative cum_len signifies to the send loop that we are 1701 * still in the header portion of the TSO packet. 1702 */ 1703 cum_len = -(m->m_pkthdr.csum_lhlen + m->m_pkthdr.csum_iphlen + 1704 m->m_pkthdr.csum_thlen); 1705 1706 /* 1707 * TSO implies checksum offload on this hardware 1708 */ 1709 cksum_offset = m->m_pkthdr.csum_lhlen + m->m_pkthdr.csum_iphlen; 1710 flags = MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST; 1711 1712 /* 1713 * For TSO, pseudo_hdr_offset holds mss. The firmware figures 1714 * out where to put the checksum by parsing the header. 1715 */ 1716 pseudo_hdr_offset = htobe16(mss); 1717 1718 req = tx->req_list; 1719 seg = tx->seg_list; 1720 cnt = 0; 1721 rdma_count = 0; 1722 1723 /* 1724 * "rdma_count" is the number of RDMAs belonging to the current 1725 * packet BEFORE the current send request. For non-TSO packets, 1726 * this is equal to "count". 1727 * 1728 * For TSO packets, rdma_count needs to be reset to 0 after a 1729 * segment cut. 1730 * 1731 * The rdma_count field of the send request is the number of 1732 * RDMAs of the packet starting at that request. For TSO send 1733 * requests with one ore more cuts in the middle, this is the 1734 * number of RDMAs starting after the last cut in the request. 1735 * All previous segments before the last cut implicitly have 1 1736 * RDMA. 1737 * 1738 * Since the number of RDMAs is not known beforehand, it must be 1739 * filled-in retroactively - after each segmentation cut or at 1740 * the end of the entire packet. 1741 */ 1742 1743 while (busdma_seg_cnt) { 1744 /* 1745 * Break the busdma segment up into pieces 1746 */ 1747 low = MXGE_LOWPART_TO_U32(seg->ds_addr); 1748 high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); 1749 len = seg->ds_len; 1750 1751 while (len) { 1752 flags_next = flags & ~MXGEFW_FLAGS_FIRST; 1753 seglen = len; 1754 cum_len_next = cum_len + seglen; 1755 (req - rdma_count)->rdma_count = rdma_count + 1; 1756 if (__predict_true(cum_len >= 0)) { 1757 /* Payload */ 1758 chop = (cum_len_next > mss); 1759 cum_len_next = cum_len_next % mss; 1760 next_is_first = (cum_len_next == 0); 1761 flags |= chop * MXGEFW_FLAGS_TSO_CHOP; 1762 flags_next |= 1763 next_is_first * MXGEFW_FLAGS_FIRST; 1764 rdma_count |= -(chop | next_is_first); 1765 rdma_count += chop & !next_is_first; 1766 } else if (cum_len_next >= 0) { 1767 /* Header ends */ 1768 rdma_count = -1; 1769 cum_len_next = 0; 1770 seglen = -cum_len; 1771 small = (mss <= MXGEFW_SEND_SMALL_SIZE); 1772 flags_next = MXGEFW_FLAGS_TSO_PLD | 1773 MXGEFW_FLAGS_FIRST | 1774 (small * MXGEFW_FLAGS_SMALL); 1775 } 1776 1777 req->addr_high = high_swapped; 1778 req->addr_low = htobe32(low); 1779 req->pseudo_hdr_offset = pseudo_hdr_offset; 1780 req->pad = 0; 1781 req->rdma_count = 1; 1782 req->length = htobe16(seglen); 1783 req->cksum_offset = cksum_offset; 1784 req->flags = 1785 flags | ((cum_len & 1) * MXGEFW_FLAGS_ALIGN_ODD); 1786 low += seglen; 1787 len -= seglen; 1788 cum_len = cum_len_next; 1789 flags = flags_next; 1790 req++; 1791 cnt++; 1792 rdma_count++; 1793 if (__predict_false(cksum_offset > seglen)) 1794 cksum_offset -= seglen; 1795 else 1796 cksum_offset = 0; 1797 if (__predict_false(cnt > tx->max_desc)) 1798 goto drop; 1799 } 1800 busdma_seg_cnt--; 1801 seg++; 1802 } 1803 (req - rdma_count)->rdma_count = rdma_count; 1804 1805 do { 1806 req--; 1807 req->flags |= MXGEFW_FLAGS_TSO_LAST; 1808 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST))); 1809 1810 info_last = &tx->info[((cnt - 1) + tx->req) & tx->mask]; 1811 1812 info_map->map = info_last->map; 1813 info_last->map = map; 1814 info_last->m = m; 1815 1816 mxge_submit_req(tx, tx->req_list, cnt); 1817 1818 if (tx->send_go != NULL && tx->queue_active == 0) { 1819 /* Tell the NIC to start polling this slice */ 1820 *tx->send_go = 1; 1821 tx->queue_active = 1; 1822 tx->activate++; 1823 wmb(); 1824 } 1825 return 0; 1826 1827 drop: 1828 bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map); 1829 m_freem(m); 1830 return ENOBUFS; 1831 } 1832 1833 static int 1834 mxge_encap(mxge_tx_ring_t *tx, struct mbuf *m, bus_addr_t zeropad) 1835 { 1836 mcp_kreq_ether_send_t *req; 1837 bus_dma_segment_t *seg; 1838 bus_dmamap_t map; 1839 int cnt, cum_len, err, i, idx, odd_flag; 1840 uint16_t pseudo_hdr_offset; 1841 uint8_t flags, cksum_offset; 1842 struct mxge_buffer_state *info_map, *info_last; 1843 1844 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1845 err = mxge_pullup_tso(&m); 1846 if (__predict_false(err)) 1847 return err; 1848 } 1849 1850 /* 1851 * Map the frame for DMA 1852 */ 1853 idx = tx->req & tx->mask; 1854 info_map = &tx->info[idx]; 1855 map = info_map->map; 1856 1857 err = bus_dmamap_load_mbuf_defrag(tx->dmat, map, &m, 1858 tx->seg_list, tx->max_desc - 2, &cnt, BUS_DMA_NOWAIT); 1859 if (__predict_false(err != 0)) 1860 goto drop; 1861 bus_dmamap_sync(tx->dmat, map, BUS_DMASYNC_PREWRITE); 1862 1863 /* 1864 * TSO is different enough, we handle it in another routine 1865 */ 1866 if (m->m_pkthdr.csum_flags & CSUM_TSO) 1867 return mxge_encap_tso(tx, info_map, m, cnt); 1868 1869 req = tx->req_list; 1870 cksum_offset = 0; 1871 pseudo_hdr_offset = 0; 1872 flags = MXGEFW_FLAGS_NO_TSO; 1873 1874 /* 1875 * Checksum offloading 1876 */ 1877 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 1878 cksum_offset = m->m_pkthdr.csum_lhlen + m->m_pkthdr.csum_iphlen; 1879 pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data; 1880 pseudo_hdr_offset = htobe16(pseudo_hdr_offset); 1881 req->cksum_offset = cksum_offset; 1882 flags |= MXGEFW_FLAGS_CKSUM; 1883 odd_flag = MXGEFW_FLAGS_ALIGN_ODD; 1884 } else { 1885 odd_flag = 0; 1886 } 1887 if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE) 1888 flags |= MXGEFW_FLAGS_SMALL; 1889 1890 /* 1891 * Convert segments into a request list 1892 */ 1893 cum_len = 0; 1894 seg = tx->seg_list; 1895 req->flags = MXGEFW_FLAGS_FIRST; 1896 for (i = 0; i < cnt; i++) { 1897 req->addr_low = htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); 1898 req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); 1899 req->length = htobe16(seg->ds_len); 1900 req->cksum_offset = cksum_offset; 1901 if (cksum_offset > seg->ds_len) 1902 cksum_offset -= seg->ds_len; 1903 else 1904 cksum_offset = 0; 1905 req->pseudo_hdr_offset = pseudo_hdr_offset; 1906 req->pad = 0; /* complete solid 16-byte block */ 1907 req->rdma_count = 1; 1908 req->flags |= flags | ((cum_len & 1) * odd_flag); 1909 cum_len += seg->ds_len; 1910 seg++; 1911 req++; 1912 req->flags = 0; 1913 } 1914 req--; 1915 1916 /* 1917 * Pad runt to 60 bytes 1918 */ 1919 if (cum_len < 60) { 1920 req++; 1921 req->addr_low = htobe32(MXGE_LOWPART_TO_U32(zeropad)); 1922 req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(zeropad)); 1923 req->length = htobe16(60 - cum_len); 1924 req->cksum_offset = 0; 1925 req->pseudo_hdr_offset = pseudo_hdr_offset; 1926 req->pad = 0; /* complete solid 16-byte block */ 1927 req->rdma_count = 1; 1928 req->flags |= flags | ((cum_len & 1) * odd_flag); 1929 cnt++; 1930 } 1931 1932 tx->req_list[0].rdma_count = cnt; 1933 #if 0 1934 /* print what the firmware will see */ 1935 for (i = 0; i < cnt; i++) { 1936 kprintf("%d: addr: 0x%x 0x%x len:%d pso%d," 1937 "cso:%d, flags:0x%x, rdma:%d\n", 1938 i, (int)ntohl(tx->req_list[i].addr_high), 1939 (int)ntohl(tx->req_list[i].addr_low), 1940 (int)ntohs(tx->req_list[i].length), 1941 (int)ntohs(tx->req_list[i].pseudo_hdr_offset), 1942 tx->req_list[i].cksum_offset, tx->req_list[i].flags, 1943 tx->req_list[i].rdma_count); 1944 } 1945 kprintf("--------------\n"); 1946 #endif 1947 info_last = &tx->info[((cnt - 1) + tx->req) & tx->mask]; 1948 1949 info_map->map = info_last->map; 1950 info_last->map = map; 1951 info_last->m = m; 1952 1953 mxge_submit_req(tx, tx->req_list, cnt); 1954 1955 if (tx->send_go != NULL && tx->queue_active == 0) { 1956 /* Tell the NIC to start polling this slice */ 1957 *tx->send_go = 1; 1958 tx->queue_active = 1; 1959 tx->activate++; 1960 wmb(); 1961 } 1962 return 0; 1963 1964 drop: 1965 m_freem(m); 1966 return err; 1967 } 1968 1969 static void 1970 mxge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1971 { 1972 mxge_softc_t *sc = ifp->if_softc; 1973 mxge_tx_ring_t *tx = ifsq_get_priv(ifsq); 1974 bus_addr_t zeropad; 1975 int encap = 0; 1976 1977 KKASSERT(tx->ifsq == ifsq); 1978 ASSERT_SERIALIZED(&tx->tx_serialize); 1979 1980 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1981 return; 1982 1983 zeropad = sc->zeropad_dma.dmem_busaddr; 1984 while (tx->mask - (tx->req - tx->done) > tx->max_desc) { 1985 struct mbuf *m; 1986 int error; 1987 1988 m = ifsq_dequeue(ifsq); 1989 if (m == NULL) 1990 goto done; 1991 1992 BPF_MTAP(ifp, m); 1993 error = mxge_encap(tx, m, zeropad); 1994 if (!error) 1995 encap = 1; 1996 else 1997 IFNET_STAT_INC(ifp, oerrors, 1); 1998 } 1999 2000 /* Ran out of transmit slots */ 2001 ifsq_set_oactive(ifsq); 2002 done: 2003 if (encap) 2004 tx->watchdog.wd_timer = 5; 2005 } 2006 2007 static void 2008 mxge_watchdog(struct ifaltq_subque *ifsq) 2009 { 2010 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2011 struct mxge_softc *sc = ifp->if_softc; 2012 uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause); 2013 mxge_tx_ring_t *tx = ifsq_get_priv(ifsq); 2014 2015 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2016 2017 /* Check for pause blocking before resetting */ 2018 if (tx->watchdog_rx_pause == rx_pause) { 2019 mxge_warn_stuck(sc, tx, 0); 2020 mxge_watchdog_reset(sc); 2021 return; 2022 } else { 2023 if_printf(ifp, "Flow control blocking xmits, " 2024 "check link partner\n"); 2025 } 2026 tx->watchdog_rx_pause = rx_pause; 2027 } 2028 2029 /* 2030 * Copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy 2031 * at most 32 bytes at a time, so as to avoid involving the software 2032 * pio handler in the nic. We re-write the first segment's low 2033 * DMA address to mark it valid only after we write the entire chunk 2034 * in a burst 2035 */ 2036 static __inline void 2037 mxge_submit_8rx(volatile mcp_kreq_ether_recv_t *dst, 2038 mcp_kreq_ether_recv_t *src) 2039 { 2040 uint32_t low; 2041 2042 low = src->addr_low; 2043 src->addr_low = 0xffffffff; 2044 mxge_pio_copy(dst, src, 4 * sizeof (*src)); 2045 wmb(); 2046 mxge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src)); 2047 wmb(); 2048 src->addr_low = low; 2049 dst->addr_low = low; 2050 wmb(); 2051 } 2052 2053 static int 2054 mxge_get_buf_small(mxge_rx_ring_t *rx, bus_dmamap_t map, int idx, 2055 boolean_t init) 2056 { 2057 bus_dma_segment_t seg; 2058 struct mbuf *m; 2059 int cnt, err, mflag; 2060 2061 mflag = MB_DONTWAIT; 2062 if (__predict_false(init)) 2063 mflag = MB_WAIT; 2064 2065 m = m_gethdr(mflag, MT_DATA); 2066 if (m == NULL) { 2067 err = ENOBUFS; 2068 if (__predict_false(init)) { 2069 /* 2070 * During initialization, there 2071 * is nothing to setup; bail out 2072 */ 2073 return err; 2074 } 2075 goto done; 2076 } 2077 m->m_len = m->m_pkthdr.len = MHLEN; 2078 2079 err = bus_dmamap_load_mbuf_segment(rx->dmat, map, m, 2080 &seg, 1, &cnt, BUS_DMA_NOWAIT); 2081 if (err != 0) { 2082 m_freem(m); 2083 if (__predict_false(init)) { 2084 /* 2085 * During initialization, there 2086 * is nothing to setup; bail out 2087 */ 2088 return err; 2089 } 2090 goto done; 2091 } 2092 2093 rx->info[idx].m = m; 2094 rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr)); 2095 rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr)); 2096 2097 done: 2098 if ((idx & 7) == 7) 2099 mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); 2100 return err; 2101 } 2102 2103 static int 2104 mxge_get_buf_big(mxge_rx_ring_t *rx, bus_dmamap_t map, int idx, 2105 boolean_t init) 2106 { 2107 bus_dma_segment_t seg; 2108 struct mbuf *m; 2109 int cnt, err, mflag; 2110 2111 mflag = MB_DONTWAIT; 2112 if (__predict_false(init)) 2113 mflag = MB_WAIT; 2114 2115 if (rx->cl_size == MCLBYTES) 2116 m = m_getcl(mflag, MT_DATA, M_PKTHDR); 2117 else 2118 m = m_getjcl(mflag, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 2119 if (m == NULL) { 2120 err = ENOBUFS; 2121 if (__predict_false(init)) { 2122 /* 2123 * During initialization, there 2124 * is nothing to setup; bail out 2125 */ 2126 return err; 2127 } 2128 goto done; 2129 } 2130 m->m_len = m->m_pkthdr.len = rx->cl_size; 2131 2132 err = bus_dmamap_load_mbuf_segment(rx->dmat, map, m, 2133 &seg, 1, &cnt, BUS_DMA_NOWAIT); 2134 if (err != 0) { 2135 m_freem(m); 2136 if (__predict_false(init)) { 2137 /* 2138 * During initialization, there 2139 * is nothing to setup; bail out 2140 */ 2141 return err; 2142 } 2143 goto done; 2144 } 2145 2146 rx->info[idx].m = m; 2147 rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr)); 2148 rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr)); 2149 2150 done: 2151 if ((idx & 7) == 7) 2152 mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); 2153 return err; 2154 } 2155 2156 /* 2157 * Myri10GE hardware checksums are not valid if the sender 2158 * padded the frame with non-zero padding. This is because 2159 * the firmware just does a simple 16-bit 1s complement 2160 * checksum across the entire frame, excluding the first 14 2161 * bytes. It is best to simply to check the checksum and 2162 * tell the stack about it only if the checksum is good 2163 */ 2164 static __inline uint16_t 2165 mxge_rx_csum(struct mbuf *m, int csum) 2166 { 2167 const struct ether_header *eh; 2168 const struct ip *ip; 2169 uint16_t c; 2170 2171 eh = mtod(m, const struct ether_header *); 2172 2173 /* Only deal with IPv4 TCP & UDP for now */ 2174 if (__predict_false(eh->ether_type != htons(ETHERTYPE_IP))) 2175 return 1; 2176 2177 ip = (const struct ip *)(eh + 1); 2178 if (__predict_false(ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP)) 2179 return 1; 2180 2181 #ifdef INET 2182 c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2183 htonl(ntohs(csum) + ntohs(ip->ip_len) + 2184 - (ip->ip_hl << 2) + ip->ip_p)); 2185 #else 2186 c = 1; 2187 #endif 2188 c ^= 0xffff; 2189 return c; 2190 } 2191 2192 static void 2193 mxge_vlan_tag_remove(struct mbuf *m, uint32_t *csum) 2194 { 2195 struct ether_vlan_header *evl; 2196 uint32_t partial; 2197 2198 evl = mtod(m, struct ether_vlan_header *); 2199 2200 /* 2201 * Fix checksum by subtracting EVL_ENCAPLEN bytes after 2202 * what the firmware thought was the end of the ethernet 2203 * header. 2204 */ 2205 2206 /* Put checksum into host byte order */ 2207 *csum = ntohs(*csum); 2208 2209 partial = ntohl(*(uint32_t *)(mtod(m, char *) + ETHER_HDR_LEN)); 2210 *csum += ~partial; 2211 *csum += ((*csum) < ~partial); 2212 *csum = ((*csum) >> 16) + ((*csum) & 0xFFFF); 2213 *csum = ((*csum) >> 16) + ((*csum) & 0xFFFF); 2214 2215 /* 2216 * Restore checksum to network byte order; 2217 * later consumers expect this 2218 */ 2219 *csum = htons(*csum); 2220 2221 /* save the tag */ 2222 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 2223 m->m_flags |= M_VLANTAG; 2224 2225 /* 2226 * Remove the 802.1q header by copying the Ethernet 2227 * addresses over it and adjusting the beginning of 2228 * the data in the mbuf. The encapsulated Ethernet 2229 * type field is already in place. 2230 */ 2231 bcopy((char *)evl, (char *)evl + EVL_ENCAPLEN, 2232 ETHER_HDR_LEN - ETHER_TYPE_LEN); 2233 m_adj(m, EVL_ENCAPLEN); 2234 } 2235 2236 2237 static __inline void 2238 mxge_rx_done_big(struct ifnet *ifp, mxge_rx_ring_t *rx, 2239 uint32_t len, uint32_t csum) 2240 { 2241 struct mbuf *m; 2242 const struct ether_header *eh; 2243 bus_dmamap_t old_map; 2244 int idx; 2245 2246 idx = rx->cnt & rx->mask; 2247 rx->cnt++; 2248 2249 /* Save a pointer to the received mbuf */ 2250 m = rx->info[idx].m; 2251 2252 /* Try to replace the received mbuf */ 2253 if (mxge_get_buf_big(rx, rx->extra_map, idx, FALSE)) { 2254 /* Drop the frame -- the old mbuf is re-cycled */ 2255 IFNET_STAT_INC(ifp, ierrors, 1); 2256 return; 2257 } 2258 2259 /* Unmap the received buffer */ 2260 old_map = rx->info[idx].map; 2261 bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); 2262 bus_dmamap_unload(rx->dmat, old_map); 2263 2264 /* Swap the bus_dmamap_t's */ 2265 rx->info[idx].map = rx->extra_map; 2266 rx->extra_map = old_map; 2267 2268 /* 2269 * mcp implicitly skips 1st 2 bytes so that packet is properly 2270 * aligned 2271 */ 2272 m->m_data += MXGEFW_PAD; 2273 2274 m->m_pkthdr.rcvif = ifp; 2275 m->m_len = m->m_pkthdr.len = len; 2276 2277 IFNET_STAT_INC(ifp, ipackets, 1); 2278 2279 eh = mtod(m, const struct ether_header *); 2280 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2281 mxge_vlan_tag_remove(m, &csum); 2282 2283 /* If the checksum is valid, mark it in the mbuf header */ 2284 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2285 mxge_rx_csum(m, csum) == 0) { 2286 /* Tell the stack that the checksum is good */ 2287 m->m_pkthdr.csum_data = 0xffff; 2288 m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | 2289 CSUM_DATA_VALID; 2290 } 2291 ifp->if_input(ifp, m, NULL, -1); 2292 } 2293 2294 static __inline void 2295 mxge_rx_done_small(struct ifnet *ifp, mxge_rx_ring_t *rx, 2296 uint32_t len, uint32_t csum) 2297 { 2298 const struct ether_header *eh; 2299 struct mbuf *m; 2300 bus_dmamap_t old_map; 2301 int idx; 2302 2303 idx = rx->cnt & rx->mask; 2304 rx->cnt++; 2305 2306 /* Save a pointer to the received mbuf */ 2307 m = rx->info[idx].m; 2308 2309 /* Try to replace the received mbuf */ 2310 if (mxge_get_buf_small(rx, rx->extra_map, idx, FALSE)) { 2311 /* Drop the frame -- the old mbuf is re-cycled */ 2312 IFNET_STAT_INC(ifp, ierrors, 1); 2313 return; 2314 } 2315 2316 /* Unmap the received buffer */ 2317 old_map = rx->info[idx].map; 2318 bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); 2319 bus_dmamap_unload(rx->dmat, old_map); 2320 2321 /* Swap the bus_dmamap_t's */ 2322 rx->info[idx].map = rx->extra_map; 2323 rx->extra_map = old_map; 2324 2325 /* 2326 * mcp implicitly skips 1st 2 bytes so that packet is properly 2327 * aligned 2328 */ 2329 m->m_data += MXGEFW_PAD; 2330 2331 m->m_pkthdr.rcvif = ifp; 2332 m->m_len = m->m_pkthdr.len = len; 2333 2334 IFNET_STAT_INC(ifp, ipackets, 1); 2335 2336 eh = mtod(m, const struct ether_header *); 2337 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2338 mxge_vlan_tag_remove(m, &csum); 2339 2340 /* If the checksum is valid, mark it in the mbuf header */ 2341 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2342 mxge_rx_csum(m, csum) == 0) { 2343 /* Tell the stack that the checksum is good */ 2344 m->m_pkthdr.csum_data = 0xffff; 2345 m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | 2346 CSUM_DATA_VALID; 2347 } 2348 ifp->if_input(ifp, m, NULL, -1); 2349 } 2350 2351 static __inline void 2352 mxge_clean_rx_done(struct ifnet *ifp, struct mxge_rx_data *rx_data, int cycle) 2353 { 2354 mxge_rx_done_t *rx_done = &rx_data->rx_done; 2355 2356 while (rx_done->entry[rx_done->idx].length != 0 && cycle != 0) { 2357 uint16_t length, checksum; 2358 2359 length = ntohs(rx_done->entry[rx_done->idx].length); 2360 rx_done->entry[rx_done->idx].length = 0; 2361 2362 checksum = rx_done->entry[rx_done->idx].checksum; 2363 2364 if (length <= MXGE_RX_SMALL_BUFLEN) { 2365 mxge_rx_done_small(ifp, &rx_data->rx_small, 2366 length, checksum); 2367 } else { 2368 mxge_rx_done_big(ifp, &rx_data->rx_big, 2369 length, checksum); 2370 } 2371 2372 rx_done->idx++; 2373 rx_done->idx &= rx_done->mask; 2374 --cycle; 2375 } 2376 } 2377 2378 static __inline void 2379 mxge_tx_done(struct ifnet *ifp, mxge_tx_ring_t *tx, uint32_t mcp_idx) 2380 { 2381 ASSERT_SERIALIZED(&tx->tx_serialize); 2382 2383 while (tx->pkt_done != mcp_idx) { 2384 struct mbuf *m; 2385 int idx; 2386 2387 idx = tx->done & tx->mask; 2388 tx->done++; 2389 2390 m = tx->info[idx].m; 2391 /* 2392 * mbuf and DMA map only attached to the first 2393 * segment per-mbuf. 2394 */ 2395 if (m != NULL) { 2396 tx->pkt_done++; 2397 IFNET_STAT_INC(ifp, opackets, 1); 2398 tx->info[idx].m = NULL; 2399 bus_dmamap_unload(tx->dmat, tx->info[idx].map); 2400 m_freem(m); 2401 } 2402 } 2403 2404 /* 2405 * If we have space, clear OACTIVE to tell the stack that 2406 * its OK to send packets 2407 */ 2408 if (tx->req - tx->done < (tx->mask + 1) / 2) { 2409 ifsq_clr_oactive(tx->ifsq); 2410 if (tx->req == tx->done) { 2411 /* Reset watchdog */ 2412 tx->watchdog.wd_timer = 0; 2413 } 2414 } 2415 2416 if (!ifsq_is_empty(tx->ifsq)) 2417 ifsq_devstart(tx->ifsq); 2418 2419 if (tx->send_stop != NULL && tx->req == tx->done) { 2420 /* 2421 * Let the NIC stop polling this queue, since there 2422 * are no more transmits pending 2423 */ 2424 *tx->send_stop = 1; 2425 tx->queue_active = 0; 2426 tx->deactivate++; 2427 wmb(); 2428 } 2429 } 2430 2431 static struct mxge_media_type mxge_xfp_media_types[] = { 2432 {IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"}, 2433 {IFM_10G_SR, (1 << 7), "10GBASE-SR"}, 2434 {IFM_10G_LR, (1 << 6), "10GBASE-LR"}, 2435 {0, (1 << 5), "10GBASE-ER"}, 2436 {IFM_10G_LRM, (1 << 4), "10GBASE-LRM"}, 2437 {0, (1 << 3), "10GBASE-SW"}, 2438 {0, (1 << 2), "10GBASE-LW"}, 2439 {0, (1 << 1), "10GBASE-EW"}, 2440 {0, (1 << 0), "Reserved"} 2441 }; 2442 2443 static struct mxge_media_type mxge_sfp_media_types[] = { 2444 {IFM_10G_TWINAX, 0, "10GBASE-Twinax"}, 2445 {0, (1 << 7), "Reserved"}, 2446 {IFM_10G_LRM, (1 << 6), "10GBASE-LRM"}, 2447 {IFM_10G_LR, (1 << 5), "10GBASE-LR"}, 2448 {IFM_10G_SR, (1 << 4), "10GBASE-SR"}, 2449 {IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"} 2450 }; 2451 2452 static void 2453 mxge_media_set(mxge_softc_t *sc, int media_type) 2454 { 2455 ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type, 0, NULL); 2456 ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type); 2457 sc->current_media = media_type; 2458 sc->media.ifm_media = sc->media.ifm_cur->ifm_media; 2459 } 2460 2461 static void 2462 mxge_media_init(mxge_softc_t *sc) 2463 { 2464 const char *ptr; 2465 int i; 2466 2467 ifmedia_removeall(&sc->media); 2468 mxge_media_set(sc, IFM_AUTO); 2469 2470 /* 2471 * Parse the product code to deterimine the interface type 2472 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character 2473 * after the 3rd dash in the driver's cached copy of the 2474 * EEPROM's product code string. 2475 */ 2476 ptr = sc->product_code_string; 2477 if (ptr == NULL) { 2478 if_printf(sc->ifp, "Missing product code\n"); 2479 return; 2480 } 2481 2482 for (i = 0; i < 3; i++, ptr++) { 2483 ptr = strchr(ptr, '-'); 2484 if (ptr == NULL) { 2485 if_printf(sc->ifp, "only %d dashes in PC?!?\n", i); 2486 return; 2487 } 2488 } 2489 if (*ptr == 'C' || *(ptr +1) == 'C') { 2490 /* -C is CX4 */ 2491 sc->connector = MXGE_CX4; 2492 mxge_media_set(sc, IFM_10G_CX4); 2493 } else if (*ptr == 'Q') { 2494 /* -Q is Quad Ribbon Fiber */ 2495 sc->connector = MXGE_QRF; 2496 if_printf(sc->ifp, "Quad Ribbon Fiber Media\n"); 2497 /* DragonFly has no media type for Quad ribbon fiber */ 2498 } else if (*ptr == 'R') { 2499 /* -R is XFP */ 2500 sc->connector = MXGE_XFP; 2501 } else if (*ptr == 'S' || *(ptr +1) == 'S') { 2502 /* -S or -2S is SFP+ */ 2503 sc->connector = MXGE_SFP; 2504 } else { 2505 if_printf(sc->ifp, "Unknown media type: %c\n", *ptr); 2506 } 2507 } 2508 2509 /* 2510 * Determine the media type for a NIC. Some XFPs will identify 2511 * themselves only when their link is up, so this is initiated via a 2512 * link up interrupt. However, this can potentially take up to 2513 * several milliseconds, so it is run via the watchdog routine, rather 2514 * than in the interrupt handler itself. 2515 */ 2516 static void 2517 mxge_media_probe(mxge_softc_t *sc) 2518 { 2519 mxge_cmd_t cmd; 2520 const char *cage_type; 2521 struct mxge_media_type *mxge_media_types = NULL; 2522 int i, err, ms, mxge_media_type_entries; 2523 uint32_t byte; 2524 2525 sc->need_media_probe = 0; 2526 2527 if (sc->connector == MXGE_XFP) { 2528 /* -R is XFP */ 2529 mxge_media_types = mxge_xfp_media_types; 2530 mxge_media_type_entries = NELEM(mxge_xfp_media_types); 2531 byte = MXGE_XFP_COMPLIANCE_BYTE; 2532 cage_type = "XFP"; 2533 } else if (sc->connector == MXGE_SFP) { 2534 /* -S or -2S is SFP+ */ 2535 mxge_media_types = mxge_sfp_media_types; 2536 mxge_media_type_entries = NELEM(mxge_sfp_media_types); 2537 cage_type = "SFP+"; 2538 byte = 3; 2539 } else { 2540 /* nothing to do; media type cannot change */ 2541 return; 2542 } 2543 2544 /* 2545 * At this point we know the NIC has an XFP cage, so now we 2546 * try to determine what is in the cage by using the 2547 * firmware's XFP I2C commands to read the XFP 10GbE compilance 2548 * register. We read just one byte, which may take over 2549 * a millisecond 2550 */ 2551 2552 cmd.data0 = 0; /* just fetch 1 byte, not all 256 */ 2553 cmd.data1 = byte; 2554 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd); 2555 if (err == MXGEFW_CMD_ERROR_I2C_FAILURE) 2556 if_printf(sc->ifp, "failed to read XFP\n"); 2557 if (err == MXGEFW_CMD_ERROR_I2C_ABSENT) 2558 if_printf(sc->ifp, "Type R/S with no XFP!?!?\n"); 2559 if (err != MXGEFW_CMD_OK) 2560 return; 2561 2562 /* Now we wait for the data to be cached */ 2563 cmd.data0 = byte; 2564 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); 2565 for (ms = 0; err == EBUSY && ms < 50; ms++) { 2566 DELAY(1000); 2567 cmd.data0 = byte; 2568 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); 2569 } 2570 if (err != MXGEFW_CMD_OK) { 2571 if_printf(sc->ifp, "failed to read %s (%d, %dms)\n", 2572 cage_type, err, ms); 2573 return; 2574 } 2575 2576 if (cmd.data0 == mxge_media_types[0].bitmask) { 2577 if (bootverbose) { 2578 if_printf(sc->ifp, "%s:%s\n", cage_type, 2579 mxge_media_types[0].name); 2580 } 2581 if (sc->current_media != mxge_media_types[0].flag) { 2582 mxge_media_init(sc); 2583 mxge_media_set(sc, mxge_media_types[0].flag); 2584 } 2585 return; 2586 } 2587 for (i = 1; i < mxge_media_type_entries; i++) { 2588 if (cmd.data0 & mxge_media_types[i].bitmask) { 2589 if (bootverbose) { 2590 if_printf(sc->ifp, "%s:%s\n", cage_type, 2591 mxge_media_types[i].name); 2592 } 2593 2594 if (sc->current_media != mxge_media_types[i].flag) { 2595 mxge_media_init(sc); 2596 mxge_media_set(sc, mxge_media_types[i].flag); 2597 } 2598 return; 2599 } 2600 } 2601 if (bootverbose) { 2602 if_printf(sc->ifp, "%s media 0x%x unknown\n", cage_type, 2603 cmd.data0); 2604 } 2605 } 2606 2607 static void 2608 mxge_intr_status(struct mxge_softc *sc, const mcp_irq_data_t *stats) 2609 { 2610 if (sc->link_state != stats->link_up) { 2611 sc->link_state = stats->link_up; 2612 if (sc->link_state) { 2613 sc->ifp->if_link_state = LINK_STATE_UP; 2614 if_link_state_change(sc->ifp); 2615 if (bootverbose) 2616 if_printf(sc->ifp, "link up\n"); 2617 } else { 2618 sc->ifp->if_link_state = LINK_STATE_DOWN; 2619 if_link_state_change(sc->ifp); 2620 if (bootverbose) 2621 if_printf(sc->ifp, "link down\n"); 2622 } 2623 sc->need_media_probe = 1; 2624 } 2625 2626 if (sc->rdma_tags_available != be32toh(stats->rdma_tags_available)) { 2627 sc->rdma_tags_available = be32toh(stats->rdma_tags_available); 2628 if_printf(sc->ifp, "RDMA timed out! %d tags left\n", 2629 sc->rdma_tags_available); 2630 } 2631 2632 if (stats->link_down) { 2633 sc->down_cnt += stats->link_down; 2634 sc->link_state = 0; 2635 sc->ifp->if_link_state = LINK_STATE_DOWN; 2636 if_link_state_change(sc->ifp); 2637 } 2638 } 2639 2640 static void 2641 mxge_serialize_skipmain(struct mxge_softc *sc) 2642 { 2643 lwkt_serialize_array_enter(sc->serializes, sc->nserialize, 1); 2644 } 2645 2646 static void 2647 mxge_deserialize_skipmain(struct mxge_softc *sc) 2648 { 2649 lwkt_serialize_array_exit(sc->serializes, sc->nserialize, 1); 2650 } 2651 2652 static void 2653 mxge_legacy(void *arg) 2654 { 2655 struct mxge_slice_state *ss = arg; 2656 mxge_softc_t *sc = ss->sc; 2657 mcp_irq_data_t *stats = ss->fw_stats; 2658 mxge_tx_ring_t *tx = &ss->tx; 2659 mxge_rx_done_t *rx_done = &ss->rx_data.rx_done; 2660 uint32_t send_done_count; 2661 uint8_t valid; 2662 2663 ASSERT_SERIALIZED(&sc->main_serialize); 2664 2665 /* Make sure the DMA has finished */ 2666 if (!stats->valid) 2667 return; 2668 valid = stats->valid; 2669 2670 /* Lower legacy IRQ */ 2671 *sc->irq_deassert = 0; 2672 if (!mxge_deassert_wait) { 2673 /* Don't wait for conf. that irq is low */ 2674 stats->valid = 0; 2675 } 2676 2677 mxge_serialize_skipmain(sc); 2678 2679 /* 2680 * Loop while waiting for legacy irq deassertion 2681 * XXX do we really want to loop? 2682 */ 2683 do { 2684 /* Check for transmit completes and receives */ 2685 send_done_count = be32toh(stats->send_done_count); 2686 while ((send_done_count != tx->pkt_done) || 2687 (rx_done->entry[rx_done->idx].length != 0)) { 2688 if (send_done_count != tx->pkt_done) { 2689 mxge_tx_done(&sc->arpcom.ac_if, tx, 2690 (int)send_done_count); 2691 } 2692 mxge_clean_rx_done(&sc->arpcom.ac_if, &ss->rx_data, -1); 2693 send_done_count = be32toh(stats->send_done_count); 2694 } 2695 if (mxge_deassert_wait) 2696 wmb(); 2697 } while (*((volatile uint8_t *)&stats->valid)); 2698 2699 mxge_deserialize_skipmain(sc); 2700 2701 /* Fw link & error stats meaningful only on the first slice */ 2702 if (__predict_false(stats->stats_updated)) 2703 mxge_intr_status(sc, stats); 2704 2705 /* Check to see if we have rx token to pass back */ 2706 if (valid & 0x1) 2707 *ss->irq_claim = be32toh(3); 2708 *(ss->irq_claim + 1) = be32toh(3); 2709 } 2710 2711 static void 2712 mxge_msi(void *arg) 2713 { 2714 struct mxge_slice_state *ss = arg; 2715 mxge_softc_t *sc = ss->sc; 2716 mcp_irq_data_t *stats = ss->fw_stats; 2717 mxge_tx_ring_t *tx = &ss->tx; 2718 mxge_rx_done_t *rx_done = &ss->rx_data.rx_done; 2719 uint32_t send_done_count; 2720 uint8_t valid; 2721 #ifndef IFPOLL_ENABLE 2722 const boolean_t polling = FALSE; 2723 #else 2724 boolean_t polling = FALSE; 2725 #endif 2726 2727 ASSERT_SERIALIZED(&sc->main_serialize); 2728 2729 /* Make sure the DMA has finished */ 2730 if (__predict_false(!stats->valid)) 2731 return; 2732 2733 valid = stats->valid; 2734 stats->valid = 0; 2735 2736 #ifdef IFPOLL_ENABLE 2737 if (sc->arpcom.ac_if.if_flags & IFF_NPOLLING) 2738 polling = TRUE; 2739 #endif 2740 2741 if (!polling) { 2742 /* Check for receives */ 2743 lwkt_serialize_enter(&ss->rx_data.rx_serialize); 2744 if (rx_done->entry[rx_done->idx].length != 0) 2745 mxge_clean_rx_done(&sc->arpcom.ac_if, &ss->rx_data, -1); 2746 lwkt_serialize_exit(&ss->rx_data.rx_serialize); 2747 } 2748 2749 /* 2750 * Check for transmit completes 2751 * 2752 * NOTE: 2753 * Since pkt_done is only changed by mxge_tx_done(), 2754 * which is called only in interrupt handler, the 2755 * check w/o holding tx serializer is MPSAFE. 2756 */ 2757 send_done_count = be32toh(stats->send_done_count); 2758 if (send_done_count != tx->pkt_done) { 2759 lwkt_serialize_enter(&tx->tx_serialize); 2760 mxge_tx_done(&sc->arpcom.ac_if, tx, (int)send_done_count); 2761 lwkt_serialize_exit(&tx->tx_serialize); 2762 } 2763 2764 if (__predict_false(stats->stats_updated)) 2765 mxge_intr_status(sc, stats); 2766 2767 /* Check to see if we have rx token to pass back */ 2768 if (!polling && (valid & 0x1)) 2769 *ss->irq_claim = be32toh(3); 2770 *(ss->irq_claim + 1) = be32toh(3); 2771 } 2772 2773 static void 2774 mxge_msix_rx(void *arg) 2775 { 2776 struct mxge_slice_state *ss = arg; 2777 mxge_rx_done_t *rx_done = &ss->rx_data.rx_done; 2778 2779 #ifdef IFPOLL_ENABLE 2780 if (ss->sc->arpcom.ac_if.if_flags & IFF_NPOLLING) 2781 return; 2782 #endif 2783 2784 ASSERT_SERIALIZED(&ss->rx_data.rx_serialize); 2785 2786 if (rx_done->entry[rx_done->idx].length != 0) 2787 mxge_clean_rx_done(&ss->sc->arpcom.ac_if, &ss->rx_data, -1); 2788 2789 *ss->irq_claim = be32toh(3); 2790 } 2791 2792 static void 2793 mxge_msix_rxtx(void *arg) 2794 { 2795 struct mxge_slice_state *ss = arg; 2796 mxge_softc_t *sc = ss->sc; 2797 mcp_irq_data_t *stats = ss->fw_stats; 2798 mxge_tx_ring_t *tx = &ss->tx; 2799 mxge_rx_done_t *rx_done = &ss->rx_data.rx_done; 2800 uint32_t send_done_count; 2801 uint8_t valid; 2802 #ifndef IFPOLL_ENABLE 2803 const boolean_t polling = FALSE; 2804 #else 2805 boolean_t polling = FALSE; 2806 #endif 2807 2808 ASSERT_SERIALIZED(&ss->rx_data.rx_serialize); 2809 2810 /* Make sure the DMA has finished */ 2811 if (__predict_false(!stats->valid)) 2812 return; 2813 2814 valid = stats->valid; 2815 stats->valid = 0; 2816 2817 #ifdef IFPOLL_ENABLE 2818 if (sc->arpcom.ac_if.if_flags & IFF_NPOLLING) 2819 polling = TRUE; 2820 #endif 2821 2822 /* Check for receives */ 2823 if (!polling && rx_done->entry[rx_done->idx].length != 0) 2824 mxge_clean_rx_done(&sc->arpcom.ac_if, &ss->rx_data, -1); 2825 2826 /* 2827 * Check for transmit completes 2828 * 2829 * NOTE: 2830 * Since pkt_done is only changed by mxge_tx_done(), 2831 * which is called only in interrupt handler, the 2832 * check w/o holding tx serializer is MPSAFE. 2833 */ 2834 send_done_count = be32toh(stats->send_done_count); 2835 if (send_done_count != tx->pkt_done) { 2836 lwkt_serialize_enter(&tx->tx_serialize); 2837 mxge_tx_done(&sc->arpcom.ac_if, tx, (int)send_done_count); 2838 lwkt_serialize_exit(&tx->tx_serialize); 2839 } 2840 2841 /* Check to see if we have rx token to pass back */ 2842 if (!polling && (valid & 0x1)) 2843 *ss->irq_claim = be32toh(3); 2844 *(ss->irq_claim + 1) = be32toh(3); 2845 } 2846 2847 static void 2848 mxge_init(void *arg) 2849 { 2850 struct mxge_softc *sc = arg; 2851 2852 ASSERT_IFNET_SERIALIZED_ALL(sc->ifp); 2853 if ((sc->ifp->if_flags & IFF_RUNNING) == 0) 2854 mxge_open(sc); 2855 } 2856 2857 static void 2858 mxge_free_slice_mbufs(struct mxge_slice_state *ss) 2859 { 2860 int i; 2861 2862 for (i = 0; i <= ss->rx_data.rx_big.mask; i++) { 2863 if (ss->rx_data.rx_big.info[i].m == NULL) 2864 continue; 2865 bus_dmamap_unload(ss->rx_data.rx_big.dmat, 2866 ss->rx_data.rx_big.info[i].map); 2867 m_freem(ss->rx_data.rx_big.info[i].m); 2868 ss->rx_data.rx_big.info[i].m = NULL; 2869 } 2870 2871 for (i = 0; i <= ss->rx_data.rx_small.mask; i++) { 2872 if (ss->rx_data.rx_small.info[i].m == NULL) 2873 continue; 2874 bus_dmamap_unload(ss->rx_data.rx_small.dmat, 2875 ss->rx_data.rx_small.info[i].map); 2876 m_freem(ss->rx_data.rx_small.info[i].m); 2877 ss->rx_data.rx_small.info[i].m = NULL; 2878 } 2879 2880 /* Transmit ring used only on the first slice */ 2881 if (ss->tx.info == NULL) 2882 return; 2883 2884 for (i = 0; i <= ss->tx.mask; i++) { 2885 if (ss->tx.info[i].m == NULL) 2886 continue; 2887 bus_dmamap_unload(ss->tx.dmat, ss->tx.info[i].map); 2888 m_freem(ss->tx.info[i].m); 2889 ss->tx.info[i].m = NULL; 2890 } 2891 } 2892 2893 static void 2894 mxge_free_mbufs(mxge_softc_t *sc) 2895 { 2896 int slice; 2897 2898 for (slice = 0; slice < sc->num_slices; slice++) 2899 mxge_free_slice_mbufs(&sc->ss[slice]); 2900 } 2901 2902 static void 2903 mxge_free_slice_rings(struct mxge_slice_state *ss) 2904 { 2905 int i; 2906 2907 if (ss->rx_data.rx_done.entry != NULL) { 2908 mxge_dma_free(&ss->rx_done_dma); 2909 ss->rx_data.rx_done.entry = NULL; 2910 } 2911 2912 if (ss->tx.req_list != NULL) { 2913 kfree(ss->tx.req_list, M_DEVBUF); 2914 ss->tx.req_list = NULL; 2915 } 2916 2917 if (ss->tx.seg_list != NULL) { 2918 kfree(ss->tx.seg_list, M_DEVBUF); 2919 ss->tx.seg_list = NULL; 2920 } 2921 2922 if (ss->rx_data.rx_small.shadow != NULL) { 2923 kfree(ss->rx_data.rx_small.shadow, M_DEVBUF); 2924 ss->rx_data.rx_small.shadow = NULL; 2925 } 2926 2927 if (ss->rx_data.rx_big.shadow != NULL) { 2928 kfree(ss->rx_data.rx_big.shadow, M_DEVBUF); 2929 ss->rx_data.rx_big.shadow = NULL; 2930 } 2931 2932 if (ss->tx.info != NULL) { 2933 if (ss->tx.dmat != NULL) { 2934 for (i = 0; i <= ss->tx.mask; i++) { 2935 bus_dmamap_destroy(ss->tx.dmat, 2936 ss->tx.info[i].map); 2937 } 2938 bus_dma_tag_destroy(ss->tx.dmat); 2939 } 2940 kfree(ss->tx.info, M_DEVBUF); 2941 ss->tx.info = NULL; 2942 } 2943 2944 if (ss->rx_data.rx_small.info != NULL) { 2945 if (ss->rx_data.rx_small.dmat != NULL) { 2946 for (i = 0; i <= ss->rx_data.rx_small.mask; i++) { 2947 bus_dmamap_destroy(ss->rx_data.rx_small.dmat, 2948 ss->rx_data.rx_small.info[i].map); 2949 } 2950 bus_dmamap_destroy(ss->rx_data.rx_small.dmat, 2951 ss->rx_data.rx_small.extra_map); 2952 bus_dma_tag_destroy(ss->rx_data.rx_small.dmat); 2953 } 2954 kfree(ss->rx_data.rx_small.info, M_DEVBUF); 2955 ss->rx_data.rx_small.info = NULL; 2956 } 2957 2958 if (ss->rx_data.rx_big.info != NULL) { 2959 if (ss->rx_data.rx_big.dmat != NULL) { 2960 for (i = 0; i <= ss->rx_data.rx_big.mask; i++) { 2961 bus_dmamap_destroy(ss->rx_data.rx_big.dmat, 2962 ss->rx_data.rx_big.info[i].map); 2963 } 2964 bus_dmamap_destroy(ss->rx_data.rx_big.dmat, 2965 ss->rx_data.rx_big.extra_map); 2966 bus_dma_tag_destroy(ss->rx_data.rx_big.dmat); 2967 } 2968 kfree(ss->rx_data.rx_big.info, M_DEVBUF); 2969 ss->rx_data.rx_big.info = NULL; 2970 } 2971 } 2972 2973 static void 2974 mxge_free_rings(mxge_softc_t *sc) 2975 { 2976 int slice; 2977 2978 if (sc->ss == NULL) 2979 return; 2980 2981 for (slice = 0; slice < sc->num_slices; slice++) 2982 mxge_free_slice_rings(&sc->ss[slice]); 2983 } 2984 2985 static int 2986 mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries, 2987 int tx_ring_entries) 2988 { 2989 mxge_softc_t *sc = ss->sc; 2990 size_t bytes; 2991 int err, i; 2992 2993 /* 2994 * Allocate per-slice receive resources 2995 */ 2996 2997 ss->rx_data.rx_small.mask = ss->rx_data.rx_big.mask = 2998 rx_ring_entries - 1; 2999 ss->rx_data.rx_done.mask = (2 * rx_ring_entries) - 1; 3000 3001 /* Allocate the rx shadow rings */ 3002 bytes = rx_ring_entries * sizeof(*ss->rx_data.rx_small.shadow); 3003 ss->rx_data.rx_small.shadow = kmalloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); 3004 3005 bytes = rx_ring_entries * sizeof(*ss->rx_data.rx_big.shadow); 3006 ss->rx_data.rx_big.shadow = kmalloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); 3007 3008 /* Allocate the rx host info rings */ 3009 bytes = rx_ring_entries * sizeof(*ss->rx_data.rx_small.info); 3010 ss->rx_data.rx_small.info = kmalloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); 3011 3012 bytes = rx_ring_entries * sizeof(*ss->rx_data.rx_big.info); 3013 ss->rx_data.rx_big.info = kmalloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); 3014 3015 /* Allocate the rx busdma resources */ 3016 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 3017 1, /* alignment */ 3018 4096, /* boundary */ 3019 BUS_SPACE_MAXADDR, /* low */ 3020 BUS_SPACE_MAXADDR, /* high */ 3021 NULL, NULL, /* filter */ 3022 MHLEN, /* maxsize */ 3023 1, /* num segs */ 3024 MHLEN, /* maxsegsize */ 3025 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3026 /* flags */ 3027 &ss->rx_data.rx_small.dmat); /* tag */ 3028 if (err != 0) { 3029 device_printf(sc->dev, "Err %d allocating rx_small dmat\n", 3030 err); 3031 return err; 3032 } 3033 3034 err = bus_dmamap_create(ss->rx_data.rx_small.dmat, BUS_DMA_WAITOK, 3035 &ss->rx_data.rx_small.extra_map); 3036 if (err != 0) { 3037 device_printf(sc->dev, "Err %d extra rx_small dmamap\n", err); 3038 bus_dma_tag_destroy(ss->rx_data.rx_small.dmat); 3039 ss->rx_data.rx_small.dmat = NULL; 3040 return err; 3041 } 3042 for (i = 0; i <= ss->rx_data.rx_small.mask; i++) { 3043 err = bus_dmamap_create(ss->rx_data.rx_small.dmat, 3044 BUS_DMA_WAITOK, &ss->rx_data.rx_small.info[i].map); 3045 if (err != 0) { 3046 int j; 3047 3048 device_printf(sc->dev, "Err %d rx_small dmamap\n", err); 3049 3050 for (j = 0; j < i; ++j) { 3051 bus_dmamap_destroy(ss->rx_data.rx_small.dmat, 3052 ss->rx_data.rx_small.info[j].map); 3053 } 3054 bus_dmamap_destroy(ss->rx_data.rx_small.dmat, 3055 ss->rx_data.rx_small.extra_map); 3056 bus_dma_tag_destroy(ss->rx_data.rx_small.dmat); 3057 ss->rx_data.rx_small.dmat = NULL; 3058 return err; 3059 } 3060 } 3061 3062 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 3063 1, /* alignment */ 3064 4096, /* boundary */ 3065 BUS_SPACE_MAXADDR, /* low */ 3066 BUS_SPACE_MAXADDR, /* high */ 3067 NULL, NULL, /* filter */ 3068 4096, /* maxsize */ 3069 1, /* num segs */ 3070 4096, /* maxsegsize*/ 3071 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3072 /* flags */ 3073 &ss->rx_data.rx_big.dmat); /* tag */ 3074 if (err != 0) { 3075 device_printf(sc->dev, "Err %d allocating rx_big dmat\n", 3076 err); 3077 return err; 3078 } 3079 3080 err = bus_dmamap_create(ss->rx_data.rx_big.dmat, BUS_DMA_WAITOK, 3081 &ss->rx_data.rx_big.extra_map); 3082 if (err != 0) { 3083 device_printf(sc->dev, "Err %d extra rx_big dmamap\n", err); 3084 bus_dma_tag_destroy(ss->rx_data.rx_big.dmat); 3085 ss->rx_data.rx_big.dmat = NULL; 3086 return err; 3087 } 3088 for (i = 0; i <= ss->rx_data.rx_big.mask; i++) { 3089 err = bus_dmamap_create(ss->rx_data.rx_big.dmat, BUS_DMA_WAITOK, 3090 &ss->rx_data.rx_big.info[i].map); 3091 if (err != 0) { 3092 int j; 3093 3094 device_printf(sc->dev, "Err %d rx_big dmamap\n", err); 3095 for (j = 0; j < i; ++j) { 3096 bus_dmamap_destroy(ss->rx_data.rx_big.dmat, 3097 ss->rx_data.rx_big.info[j].map); 3098 } 3099 bus_dmamap_destroy(ss->rx_data.rx_big.dmat, 3100 ss->rx_data.rx_big.extra_map); 3101 bus_dma_tag_destroy(ss->rx_data.rx_big.dmat); 3102 ss->rx_data.rx_big.dmat = NULL; 3103 return err; 3104 } 3105 } 3106 3107 /* 3108 * Now allocate TX resources 3109 */ 3110 3111 ss->tx.mask = tx_ring_entries - 1; 3112 ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4); 3113 3114 /* 3115 * Allocate the tx request copy block; MUST be at least 8 bytes 3116 * aligned 3117 */ 3118 bytes = sizeof(*ss->tx.req_list) * (ss->tx.max_desc + 4); 3119 ss->tx.req_list = kmalloc_cachealign(__VM_CACHELINE_ALIGN(bytes), 3120 M_DEVBUF, M_WAITOK); 3121 3122 /* Allocate the tx busdma segment list */ 3123 bytes = sizeof(*ss->tx.seg_list) * ss->tx.max_desc; 3124 ss->tx.seg_list = kmalloc(bytes, M_DEVBUF, M_WAITOK); 3125 3126 /* Allocate the tx host info ring */ 3127 bytes = tx_ring_entries * sizeof(*ss->tx.info); 3128 ss->tx.info = kmalloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); 3129 3130 /* Allocate the tx busdma resources */ 3131 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 3132 1, /* alignment */ 3133 sc->tx_boundary, /* boundary */ 3134 BUS_SPACE_MAXADDR, /* low */ 3135 BUS_SPACE_MAXADDR, /* high */ 3136 NULL, NULL, /* filter */ 3137 IP_MAXPACKET + 3138 sizeof(struct ether_vlan_header), 3139 /* maxsize */ 3140 ss->tx.max_desc - 2, /* num segs */ 3141 sc->tx_boundary, /* maxsegsz */ 3142 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 3143 BUS_DMA_ONEBPAGE, /* flags */ 3144 &ss->tx.dmat); /* tag */ 3145 if (err != 0) { 3146 device_printf(sc->dev, "Err %d allocating tx dmat\n", err); 3147 return err; 3148 } 3149 3150 /* 3151 * Now use these tags to setup DMA maps for each slot in the ring 3152 */ 3153 for (i = 0; i <= ss->tx.mask; i++) { 3154 err = bus_dmamap_create(ss->tx.dmat, 3155 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &ss->tx.info[i].map); 3156 if (err != 0) { 3157 int j; 3158 3159 device_printf(sc->dev, "Err %d tx dmamap\n", err); 3160 for (j = 0; j < i; ++j) { 3161 bus_dmamap_destroy(ss->tx.dmat, 3162 ss->tx.info[j].map); 3163 } 3164 bus_dma_tag_destroy(ss->tx.dmat); 3165 ss->tx.dmat = NULL; 3166 return err; 3167 } 3168 } 3169 return 0; 3170 } 3171 3172 static int 3173 mxge_alloc_rings(mxge_softc_t *sc) 3174 { 3175 mxge_cmd_t cmd; 3176 int tx_ring_size; 3177 int tx_ring_entries, rx_ring_entries; 3178 int err, slice; 3179 3180 /* Get ring sizes */ 3181 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd); 3182 if (err != 0) { 3183 device_printf(sc->dev, "Cannot determine tx ring sizes\n"); 3184 return err; 3185 } 3186 tx_ring_size = cmd.data0; 3187 3188 tx_ring_entries = tx_ring_size / sizeof(mcp_kreq_ether_send_t); 3189 rx_ring_entries = sc->rx_intr_slots / 2; 3190 3191 if (bootverbose) { 3192 device_printf(sc->dev, "tx desc %d, rx desc %d\n", 3193 tx_ring_entries, rx_ring_entries); 3194 } 3195 3196 ifq_set_maxlen(&sc->ifp->if_snd, tx_ring_entries - 1); 3197 ifq_set_ready(&sc->ifp->if_snd); 3198 ifq_set_subq_cnt(&sc->ifp->if_snd, sc->num_tx_rings); 3199 3200 if (sc->num_tx_rings > 1) { 3201 sc->ifp->if_mapsubq = ifq_mapsubq_mask; 3202 ifq_set_subq_mask(&sc->ifp->if_snd, sc->num_tx_rings - 1); 3203 } 3204 3205 for (slice = 0; slice < sc->num_slices; slice++) { 3206 err = mxge_alloc_slice_rings(&sc->ss[slice], 3207 rx_ring_entries, tx_ring_entries); 3208 if (err != 0) { 3209 device_printf(sc->dev, 3210 "alloc %d slice rings failed\n", slice); 3211 return err; 3212 } 3213 } 3214 return 0; 3215 } 3216 3217 static void 3218 mxge_choose_params(int mtu, int *cl_size) 3219 { 3220 int bufsize = mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + MXGEFW_PAD; 3221 3222 if (bufsize < MCLBYTES) { 3223 *cl_size = MCLBYTES; 3224 } else { 3225 KASSERT(bufsize < MJUMPAGESIZE, ("invalid MTU %d", mtu)); 3226 *cl_size = MJUMPAGESIZE; 3227 } 3228 } 3229 3230 static int 3231 mxge_slice_open(struct mxge_slice_state *ss, int cl_size) 3232 { 3233 mxge_cmd_t cmd; 3234 int err, i, slice; 3235 3236 slice = ss - ss->sc->ss; 3237 3238 /* 3239 * Get the lanai pointers to the send and receive rings 3240 */ 3241 err = 0; 3242 3243 if (ss->sc->num_tx_rings == 1) { 3244 if (slice == 0) { 3245 cmd.data0 = slice; 3246 err = mxge_send_cmd(ss->sc, MXGEFW_CMD_GET_SEND_OFFSET, 3247 &cmd); 3248 ss->tx.lanai = (volatile mcp_kreq_ether_send_t *) 3249 (ss->sc->sram + cmd.data0); 3250 /* Leave send_go and send_stop as NULL */ 3251 } 3252 } else { 3253 cmd.data0 = slice; 3254 err = mxge_send_cmd(ss->sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd); 3255 ss->tx.lanai = (volatile mcp_kreq_ether_send_t *) 3256 (ss->sc->sram + cmd.data0); 3257 ss->tx.send_go = (volatile uint32_t *) 3258 (ss->sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice); 3259 ss->tx.send_stop = (volatile uint32_t *) 3260 (ss->sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); 3261 } 3262 3263 cmd.data0 = slice; 3264 err |= mxge_send_cmd(ss->sc, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd); 3265 ss->rx_data.rx_small.lanai = 3266 (volatile mcp_kreq_ether_recv_t *)(ss->sc->sram + cmd.data0); 3267 3268 cmd.data0 = slice; 3269 err |= mxge_send_cmd(ss->sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd); 3270 ss->rx_data.rx_big.lanai = 3271 (volatile mcp_kreq_ether_recv_t *)(ss->sc->sram + cmd.data0); 3272 3273 if (err != 0) { 3274 if_printf(ss->sc->ifp, 3275 "failed to get ring sizes or locations\n"); 3276 return EIO; 3277 } 3278 3279 /* 3280 * Stock small receive ring 3281 */ 3282 for (i = 0; i <= ss->rx_data.rx_small.mask; i++) { 3283 err = mxge_get_buf_small(&ss->rx_data.rx_small, 3284 ss->rx_data.rx_small.info[i].map, i, TRUE); 3285 if (err) { 3286 if_printf(ss->sc->ifp, "alloced %d/%d smalls\n", i, 3287 ss->rx_data.rx_small.mask + 1); 3288 return ENOMEM; 3289 } 3290 } 3291 3292 /* 3293 * Stock big receive ring 3294 */ 3295 for (i = 0; i <= ss->rx_data.rx_big.mask; i++) { 3296 ss->rx_data.rx_big.shadow[i].addr_low = 0xffffffff; 3297 ss->rx_data.rx_big.shadow[i].addr_high = 0xffffffff; 3298 } 3299 3300 ss->rx_data.rx_big.cl_size = cl_size; 3301 3302 for (i = 0; i <= ss->rx_data.rx_big.mask; i++) { 3303 err = mxge_get_buf_big(&ss->rx_data.rx_big, 3304 ss->rx_data.rx_big.info[i].map, i, TRUE); 3305 if (err) { 3306 if_printf(ss->sc->ifp, "alloced %d/%d bigs\n", i, 3307 ss->rx_data.rx_big.mask + 1); 3308 return ENOMEM; 3309 } 3310 } 3311 return 0; 3312 } 3313 3314 static int 3315 mxge_open(mxge_softc_t *sc) 3316 { 3317 struct ifnet *ifp = sc->ifp; 3318 mxge_cmd_t cmd; 3319 int err, slice, cl_size, i; 3320 bus_addr_t bus; 3321 volatile uint8_t *itable; 3322 struct mxge_slice_state *ss; 3323 3324 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3325 3326 /* Copy the MAC address in case it was overridden */ 3327 bcopy(IF_LLADDR(ifp), sc->mac_addr, ETHER_ADDR_LEN); 3328 3329 err = mxge_reset(sc, 1); 3330 if (err != 0) { 3331 if_printf(ifp, "failed to reset\n"); 3332 return EIO; 3333 } 3334 3335 if (sc->num_slices > 1) { 3336 /* Setup the indirection table */ 3337 cmd.data0 = sc->num_slices; 3338 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_TABLE_SIZE, &cmd); 3339 3340 err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_TABLE_OFFSET, &cmd); 3341 if (err != 0) { 3342 if_printf(ifp, "failed to setup rss tables\n"); 3343 return err; 3344 } 3345 3346 /* Just enable an identity mapping */ 3347 itable = sc->sram + cmd.data0; 3348 for (i = 0; i < sc->num_slices; i++) 3349 itable[i] = (uint8_t)i; 3350 3351 if (sc->use_rss) { 3352 volatile uint8_t *hwkey; 3353 uint8_t swkey[MXGE_HWRSS_KEYLEN]; 3354 3355 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_KEY_OFFSET, 3356 &cmd); 3357 if (err != 0) { 3358 if_printf(ifp, "failed to get rsskey\n"); 3359 return err; 3360 } 3361 hwkey = sc->sram + cmd.data0; 3362 3363 toeplitz_get_key(swkey, MXGE_HWRSS_KEYLEN); 3364 for (i = 0; i < MXGE_HWRSS_KEYLEN; ++i) 3365 hwkey[i] = swkey[i]; 3366 wmb(); 3367 3368 err = mxge_send_cmd(sc, MXGEFW_CMD_RSS_KEY_UPDATED, 3369 &cmd); 3370 if (err != 0) { 3371 if_printf(ifp, "failed to update rsskey\n"); 3372 return err; 3373 } 3374 if (bootverbose) 3375 if_printf(ifp, "RSS key updated\n"); 3376 } 3377 3378 cmd.data0 = 1; 3379 if (sc->use_rss) { 3380 if (bootverbose) 3381 if_printf(ifp, "input hash: RSS\n"); 3382 cmd.data1 = MXGEFW_RSS_HASH_TYPE_IPV4 | 3383 MXGEFW_RSS_HASH_TYPE_TCP_IPV4; 3384 } else { 3385 if (bootverbose) 3386 if_printf(ifp, "input hash: SRC_DST_PORT\n"); 3387 cmd.data1 = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; 3388 } 3389 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd); 3390 if (err != 0) { 3391 if_printf(ifp, "failed to enable slices\n"); 3392 return err; 3393 } 3394 } 3395 3396 cmd.data0 = MXGEFW_TSO_MODE_NDIS; 3397 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_TSO_MODE, &cmd); 3398 if (err) { 3399 /* 3400 * Can't change TSO mode to NDIS, never allow TSO then 3401 */ 3402 if_printf(ifp, "failed to set TSO mode\n"); 3403 ifp->if_capenable &= ~IFCAP_TSO; 3404 ifp->if_capabilities &= ~IFCAP_TSO; 3405 ifp->if_hwassist &= ~CSUM_TSO; 3406 } 3407 3408 mxge_choose_params(ifp->if_mtu, &cl_size); 3409 3410 cmd.data0 = 1; 3411 err = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, &cmd); 3412 /* 3413 * Error is only meaningful if we're trying to set 3414 * MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS > 1 3415 */ 3416 3417 /* 3418 * Give the firmware the mtu and the big and small buffer 3419 * sizes. The firmware wants the big buf size to be a power 3420 * of two. Luckily, DragonFly's clusters are powers of two 3421 */ 3422 cmd.data0 = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3423 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd); 3424 3425 cmd.data0 = MXGE_RX_SMALL_BUFLEN; 3426 err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd); 3427 3428 cmd.data0 = cl_size; 3429 err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd); 3430 3431 if (err != 0) { 3432 if_printf(ifp, "failed to setup params\n"); 3433 goto abort; 3434 } 3435 3436 /* Now give him the pointer to the stats block */ 3437 for (slice = 0; slice < sc->num_slices; slice++) { 3438 ss = &sc->ss[slice]; 3439 cmd.data0 = MXGE_LOWPART_TO_U32(ss->fw_stats_dma.dmem_busaddr); 3440 cmd.data1 = MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.dmem_busaddr); 3441 cmd.data2 = sizeof(struct mcp_irq_data); 3442 cmd.data2 |= (slice << 16); 3443 err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd); 3444 } 3445 3446 if (err != 0) { 3447 bus = sc->ss->fw_stats_dma.dmem_busaddr; 3448 bus += offsetof(struct mcp_irq_data, send_done_count); 3449 cmd.data0 = MXGE_LOWPART_TO_U32(bus); 3450 cmd.data1 = MXGE_HIGHPART_TO_U32(bus); 3451 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, 3452 &cmd); 3453 3454 /* Firmware cannot support multicast without STATS_DMA_V2 */ 3455 sc->fw_multicast_support = 0; 3456 } else { 3457 sc->fw_multicast_support = 1; 3458 } 3459 3460 if (err != 0) { 3461 if_printf(ifp, "failed to setup params\n"); 3462 goto abort; 3463 } 3464 3465 for (slice = 0; slice < sc->num_slices; slice++) { 3466 err = mxge_slice_open(&sc->ss[slice], cl_size); 3467 if (err != 0) { 3468 if_printf(ifp, "couldn't open slice %d\n", slice); 3469 goto abort; 3470 } 3471 } 3472 3473 /* Finally, start the firmware running */ 3474 err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd); 3475 if (err) { 3476 if_printf(ifp, "Couldn't bring up link\n"); 3477 goto abort; 3478 } 3479 3480 ifp->if_flags |= IFF_RUNNING; 3481 for (i = 0; i < sc->num_tx_rings; ++i) { 3482 mxge_tx_ring_t *tx = &sc->ss[i].tx; 3483 3484 ifsq_clr_oactive(tx->ifsq); 3485 ifsq_watchdog_start(&tx->watchdog); 3486 } 3487 3488 return 0; 3489 3490 abort: 3491 mxge_free_mbufs(sc); 3492 return err; 3493 } 3494 3495 static void 3496 mxge_close(mxge_softc_t *sc, int down) 3497 { 3498 struct ifnet *ifp = sc->ifp; 3499 mxge_cmd_t cmd; 3500 int err, old_down_cnt, i; 3501 3502 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3503 3504 if (!down) { 3505 old_down_cnt = sc->down_cnt; 3506 wmb(); 3507 3508 err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd); 3509 if (err) 3510 if_printf(ifp, "Couldn't bring down link\n"); 3511 3512 if (old_down_cnt == sc->down_cnt) { 3513 /* 3514 * Wait for down irq 3515 * XXX racy 3516 */ 3517 ifnet_deserialize_all(ifp); 3518 DELAY(10 * sc->intr_coal_delay); 3519 ifnet_serialize_all(ifp); 3520 } 3521 3522 wmb(); 3523 if (old_down_cnt == sc->down_cnt) 3524 if_printf(ifp, "never got down irq\n"); 3525 } 3526 mxge_free_mbufs(sc); 3527 3528 ifp->if_flags &= ~IFF_RUNNING; 3529 for (i = 0; i < sc->num_tx_rings; ++i) { 3530 mxge_tx_ring_t *tx = &sc->ss[i].tx; 3531 3532 ifsq_clr_oactive(tx->ifsq); 3533 ifsq_watchdog_stop(&tx->watchdog); 3534 } 3535 } 3536 3537 static void 3538 mxge_setup_cfg_space(mxge_softc_t *sc) 3539 { 3540 device_t dev = sc->dev; 3541 int reg; 3542 uint16_t lnk, pectl; 3543 3544 /* Find the PCIe link width and set max read request to 4KB */ 3545 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 3546 lnk = pci_read_config(dev, reg + 0x12, 2); 3547 sc->link_width = (lnk >> 4) & 0x3f; 3548 3549 if (sc->pectl == 0) { 3550 pectl = pci_read_config(dev, reg + 0x8, 2); 3551 pectl = (pectl & ~0x7000) | (5 << 12); 3552 pci_write_config(dev, reg + 0x8, pectl, 2); 3553 sc->pectl = pectl; 3554 } else { 3555 /* Restore saved pectl after watchdog reset */ 3556 pci_write_config(dev, reg + 0x8, sc->pectl, 2); 3557 } 3558 } 3559 3560 /* Enable DMA and memory space access */ 3561 pci_enable_busmaster(dev); 3562 } 3563 3564 static uint32_t 3565 mxge_read_reboot(mxge_softc_t *sc) 3566 { 3567 device_t dev = sc->dev; 3568 uint32_t vs; 3569 3570 /* Find the vendor specific offset */ 3571 if (pci_find_extcap(dev, PCIY_VENDOR, &vs) != 0) { 3572 if_printf(sc->ifp, "could not find vendor specific offset\n"); 3573 return (uint32_t)-1; 3574 } 3575 /* Enable read32 mode */ 3576 pci_write_config(dev, vs + 0x10, 0x3, 1); 3577 /* Tell NIC which register to read */ 3578 pci_write_config(dev, vs + 0x18, 0xfffffff0, 4); 3579 return pci_read_config(dev, vs + 0x14, 4); 3580 } 3581 3582 static void 3583 mxge_watchdog_reset(mxge_softc_t *sc) 3584 { 3585 struct pci_devinfo *dinfo; 3586 int err, running; 3587 uint32_t reboot; 3588 uint16_t cmd; 3589 3590 err = ENXIO; 3591 3592 if_printf(sc->ifp, "Watchdog reset!\n"); 3593 3594 /* 3595 * Check to see if the NIC rebooted. If it did, then all of 3596 * PCI config space has been reset, and things like the 3597 * busmaster bit will be zero. If this is the case, then we 3598 * must restore PCI config space before the NIC can be used 3599 * again 3600 */ 3601 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); 3602 if (cmd == 0xffff) { 3603 /* 3604 * Maybe the watchdog caught the NIC rebooting; wait 3605 * up to 100ms for it to finish. If it does not come 3606 * back, then give up 3607 */ 3608 DELAY(1000*100); 3609 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); 3610 if (cmd == 0xffff) 3611 if_printf(sc->ifp, "NIC disappeared!\n"); 3612 } 3613 if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { 3614 /* Print the reboot status */ 3615 reboot = mxge_read_reboot(sc); 3616 if_printf(sc->ifp, "NIC rebooted, status = 0x%x\n", reboot); 3617 3618 running = sc->ifp->if_flags & IFF_RUNNING; 3619 if (running) { 3620 /* 3621 * Quiesce NIC so that TX routines will not try to 3622 * xmit after restoration of BAR 3623 */ 3624 3625 /* Mark the link as down */ 3626 if (sc->link_state) { 3627 sc->ifp->if_link_state = LINK_STATE_DOWN; 3628 if_link_state_change(sc->ifp); 3629 } 3630 mxge_close(sc, 1); 3631 } 3632 /* Restore PCI configuration space */ 3633 dinfo = device_get_ivars(sc->dev); 3634 pci_cfg_restore(sc->dev, dinfo); 3635 3636 /* And redo any changes we made to our config space */ 3637 mxge_setup_cfg_space(sc); 3638 3639 /* Reload f/w */ 3640 err = mxge_load_firmware(sc, 0); 3641 if (err) 3642 if_printf(sc->ifp, "Unable to re-load f/w\n"); 3643 if (running && !err) { 3644 int i; 3645 3646 err = mxge_open(sc); 3647 3648 for (i = 0; i < sc->num_tx_rings; ++i) 3649 ifsq_devstart_sched(sc->ss[i].tx.ifsq); 3650 } 3651 sc->watchdog_resets++; 3652 } else { 3653 if_printf(sc->ifp, "NIC did not reboot, not resetting\n"); 3654 err = 0; 3655 } 3656 if (err) { 3657 if_printf(sc->ifp, "watchdog reset failed\n"); 3658 } else { 3659 if (sc->dying == 2) 3660 sc->dying = 0; 3661 callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); 3662 } 3663 } 3664 3665 static void 3666 mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice) 3667 { 3668 if_printf(sc->ifp, "slice %d struck? ring state:\n", slice); 3669 if_printf(sc->ifp, "tx.req=%d tx.done=%d, tx.queue_active=%d\n", 3670 tx->req, tx->done, tx->queue_active); 3671 if_printf(sc->ifp, "tx.activate=%d tx.deactivate=%d\n", 3672 tx->activate, tx->deactivate); 3673 if_printf(sc->ifp, "pkt_done=%d fw=%d\n", 3674 tx->pkt_done, be32toh(sc->ss->fw_stats->send_done_count)); 3675 } 3676 3677 static u_long 3678 mxge_update_stats(mxge_softc_t *sc) 3679 { 3680 u_long ipackets, opackets, pkts; 3681 3682 IFNET_STAT_GET(sc->ifp, ipackets, ipackets); 3683 IFNET_STAT_GET(sc->ifp, opackets, opackets); 3684 3685 pkts = ipackets - sc->ipackets; 3686 pkts += opackets - sc->opackets; 3687 3688 sc->ipackets = ipackets; 3689 sc->opackets = opackets; 3690 3691 return pkts; 3692 } 3693 3694 static void 3695 mxge_tick(void *arg) 3696 { 3697 mxge_softc_t *sc = arg; 3698 u_long pkts = 0; 3699 int err = 0; 3700 int ticks; 3701 3702 lwkt_serialize_enter(&sc->main_serialize); 3703 3704 ticks = mxge_ticks; 3705 if (sc->ifp->if_flags & IFF_RUNNING) { 3706 /* Aggregate stats from different slices */ 3707 pkts = mxge_update_stats(sc); 3708 if (sc->need_media_probe) 3709 mxge_media_probe(sc); 3710 } 3711 if (pkts == 0) { 3712 uint16_t cmd; 3713 3714 /* Ensure NIC did not suffer h/w fault while idle */ 3715 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); 3716 if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { 3717 sc->dying = 2; 3718 mxge_serialize_skipmain(sc); 3719 mxge_watchdog_reset(sc); 3720 mxge_deserialize_skipmain(sc); 3721 err = ENXIO; 3722 } 3723 3724 /* Look less often if NIC is idle */ 3725 ticks *= 4; 3726 } 3727 3728 if (err == 0) 3729 callout_reset(&sc->co_hdl, ticks, mxge_tick, sc); 3730 3731 lwkt_serialize_exit(&sc->main_serialize); 3732 } 3733 3734 static int 3735 mxge_media_change(struct ifnet *ifp) 3736 { 3737 return EINVAL; 3738 } 3739 3740 static int 3741 mxge_change_mtu(mxge_softc_t *sc, int mtu) 3742 { 3743 struct ifnet *ifp = sc->ifp; 3744 int real_mtu, old_mtu; 3745 int err = 0; 3746 3747 real_mtu = mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3748 if (mtu > sc->max_mtu || real_mtu < 60) 3749 return EINVAL; 3750 3751 old_mtu = ifp->if_mtu; 3752 ifp->if_mtu = mtu; 3753 if (ifp->if_flags & IFF_RUNNING) { 3754 mxge_close(sc, 0); 3755 err = mxge_open(sc); 3756 if (err != 0) { 3757 ifp->if_mtu = old_mtu; 3758 mxge_close(sc, 0); 3759 mxge_open(sc); 3760 } 3761 } 3762 return err; 3763 } 3764 3765 static void 3766 mxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 3767 { 3768 mxge_softc_t *sc = ifp->if_softc; 3769 3770 3771 if (sc == NULL) 3772 return; 3773 ifmr->ifm_status = IFM_AVALID; 3774 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 3775 ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0; 3776 ifmr->ifm_active |= sc->current_media; 3777 } 3778 3779 static int 3780 mxge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, 3781 struct ucred *cr __unused) 3782 { 3783 mxge_softc_t *sc = ifp->if_softc; 3784 struct ifreq *ifr = (struct ifreq *)data; 3785 int err, mask; 3786 3787 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3788 err = 0; 3789 3790 switch (command) { 3791 case SIOCSIFMTU: 3792 err = mxge_change_mtu(sc, ifr->ifr_mtu); 3793 break; 3794 3795 case SIOCSIFFLAGS: 3796 if (sc->dying) 3797 return EINVAL; 3798 3799 if (ifp->if_flags & IFF_UP) { 3800 if (!(ifp->if_flags & IFF_RUNNING)) { 3801 err = mxge_open(sc); 3802 } else { 3803 /* 3804 * Take care of PROMISC and ALLMULTI 3805 * flag changes 3806 */ 3807 mxge_change_promisc(sc, 3808 ifp->if_flags & IFF_PROMISC); 3809 mxge_set_multicast_list(sc); 3810 } 3811 } else { 3812 if (ifp->if_flags & IFF_RUNNING) 3813 mxge_close(sc, 0); 3814 } 3815 break; 3816 3817 case SIOCADDMULTI: 3818 case SIOCDELMULTI: 3819 mxge_set_multicast_list(sc); 3820 break; 3821 3822 case SIOCSIFCAP: 3823 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3824 if (mask & IFCAP_TXCSUM) { 3825 ifp->if_capenable ^= IFCAP_TXCSUM; 3826 if (ifp->if_capenable & IFCAP_TXCSUM) 3827 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 3828 else 3829 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP); 3830 } 3831 if (mask & IFCAP_TSO) { 3832 ifp->if_capenable ^= IFCAP_TSO; 3833 if (ifp->if_capenable & IFCAP_TSO) 3834 ifp->if_hwassist |= CSUM_TSO; 3835 else 3836 ifp->if_hwassist &= ~CSUM_TSO; 3837 } 3838 if (mask & IFCAP_RXCSUM) 3839 ifp->if_capenable ^= IFCAP_RXCSUM; 3840 if (mask & IFCAP_VLAN_HWTAGGING) 3841 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3842 break; 3843 3844 case SIOCGIFMEDIA: 3845 mxge_media_probe(sc); 3846 err = ifmedia_ioctl(ifp, (struct ifreq *)data, 3847 &sc->media, command); 3848 break; 3849 3850 default: 3851 err = ether_ioctl(ifp, command, data); 3852 break; 3853 } 3854 return err; 3855 } 3856 3857 static void 3858 mxge_fetch_tunables(mxge_softc_t *sc) 3859 { 3860 sc->intr_coal_delay = mxge_intr_coal_delay; 3861 if (sc->intr_coal_delay < 0 || sc->intr_coal_delay > (10 * 1000)) 3862 sc->intr_coal_delay = MXGE_INTR_COAL_DELAY; 3863 3864 /* XXX */ 3865 if (mxge_ticks == 0) 3866 mxge_ticks = hz / 2; 3867 3868 sc->pause = mxge_flow_control; 3869 sc->use_rss = mxge_use_rss; 3870 3871 sc->throttle = mxge_throttle; 3872 if (sc->throttle && sc->throttle > MXGE_MAX_THROTTLE) 3873 sc->throttle = MXGE_MAX_THROTTLE; 3874 if (sc->throttle && sc->throttle < MXGE_MIN_THROTTLE) 3875 sc->throttle = MXGE_MIN_THROTTLE; 3876 } 3877 3878 static void 3879 mxge_free_slices(mxge_softc_t *sc) 3880 { 3881 struct mxge_slice_state *ss; 3882 int i; 3883 3884 if (sc->ss == NULL) 3885 return; 3886 3887 for (i = 0; i < sc->num_slices; i++) { 3888 ss = &sc->ss[i]; 3889 if (ss->fw_stats != NULL) { 3890 mxge_dma_free(&ss->fw_stats_dma); 3891 ss->fw_stats = NULL; 3892 } 3893 if (ss->rx_data.rx_done.entry != NULL) { 3894 mxge_dma_free(&ss->rx_done_dma); 3895 ss->rx_data.rx_done.entry = NULL; 3896 } 3897 } 3898 kfree(sc->ss, M_DEVBUF); 3899 sc->ss = NULL; 3900 } 3901 3902 static int 3903 mxge_alloc_slices(mxge_softc_t *sc) 3904 { 3905 mxge_cmd_t cmd; 3906 struct mxge_slice_state *ss; 3907 size_t bytes; 3908 int err, i, rx_ring_size; 3909 3910 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); 3911 if (err != 0) { 3912 device_printf(sc->dev, "Cannot determine rx ring size\n"); 3913 return err; 3914 } 3915 rx_ring_size = cmd.data0; 3916 sc->rx_intr_slots = 2 * (rx_ring_size / sizeof (mcp_dma_addr_t)); 3917 3918 bytes = sizeof(*sc->ss) * sc->num_slices; 3919 sc->ss = kmalloc_cachealign(bytes, M_DEVBUF, M_WAITOK | M_ZERO); 3920 3921 for (i = 0; i < sc->num_slices; i++) { 3922 ss = &sc->ss[i]; 3923 3924 ss->sc = sc; 3925 3926 lwkt_serialize_init(&ss->rx_data.rx_serialize); 3927 lwkt_serialize_init(&ss->tx.tx_serialize); 3928 ss->intr_rid = -1; 3929 3930 /* 3931 * Allocate per-slice rx interrupt queue 3932 * XXX assume 4bytes mcp_slot 3933 */ 3934 bytes = sc->rx_intr_slots * sizeof(mcp_slot_t); 3935 err = mxge_dma_alloc(sc, &ss->rx_done_dma, bytes, 4096); 3936 if (err != 0) { 3937 device_printf(sc->dev, 3938 "alloc %d slice rx_done failed\n", i); 3939 return err; 3940 } 3941 ss->rx_data.rx_done.entry = ss->rx_done_dma.dmem_addr; 3942 3943 /* 3944 * Allocate the per-slice firmware stats 3945 */ 3946 bytes = sizeof(*ss->fw_stats); 3947 err = mxge_dma_alloc(sc, &ss->fw_stats_dma, 3948 sizeof(*ss->fw_stats), 64); 3949 if (err != 0) { 3950 device_printf(sc->dev, 3951 "alloc %d fw_stats failed\n", i); 3952 return err; 3953 } 3954 ss->fw_stats = ss->fw_stats_dma.dmem_addr; 3955 } 3956 return 0; 3957 } 3958 3959 static void 3960 mxge_slice_probe(mxge_softc_t *sc) 3961 { 3962 int status, max_intr_slots, max_slices, num_slices; 3963 int msix_cnt, msix_enable, i, multi_tx; 3964 mxge_cmd_t cmd; 3965 const char *old_fw; 3966 3967 sc->num_slices = 1; 3968 sc->num_tx_rings = 1; 3969 3970 num_slices = device_getenv_int(sc->dev, "num_slices", mxge_num_slices); 3971 if (num_slices == 1) 3972 return; 3973 3974 if (ncpus2 == 1) 3975 return; 3976 3977 msix_enable = device_getenv_int(sc->dev, "msix.enable", 3978 mxge_msix_enable); 3979 if (!msix_enable) 3980 return; 3981 3982 msix_cnt = pci_msix_count(sc->dev); 3983 if (msix_cnt < 2) 3984 return; 3985 3986 /* 3987 * Round down MSI-X vector count to the nearest power of 2 3988 */ 3989 i = 0; 3990 while ((1 << (i + 1)) <= msix_cnt) 3991 ++i; 3992 msix_cnt = 1 << i; 3993 3994 /* 3995 * Now load the slice aware firmware see what it supports 3996 */ 3997 old_fw = sc->fw_name; 3998 if (old_fw == mxge_fw_aligned) 3999 sc->fw_name = mxge_fw_rss_aligned; 4000 else 4001 sc->fw_name = mxge_fw_rss_unaligned; 4002 status = mxge_load_firmware(sc, 0); 4003 if (status != 0) { 4004 device_printf(sc->dev, "Falling back to a single slice\n"); 4005 return; 4006 } 4007 4008 /* 4009 * Try to send a reset command to the card to see if it is alive 4010 */ 4011 memset(&cmd, 0, sizeof(cmd)); 4012 status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); 4013 if (status != 0) { 4014 device_printf(sc->dev, "failed reset\n"); 4015 goto abort_with_fw; 4016 } 4017 4018 /* 4019 * Get rx ring size to calculate rx interrupt queue size 4020 */ 4021 status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); 4022 if (status != 0) { 4023 device_printf(sc->dev, "Cannot determine rx ring size\n"); 4024 goto abort_with_fw; 4025 } 4026 max_intr_slots = 2 * (cmd.data0 / sizeof(mcp_dma_addr_t)); 4027 4028 /* 4029 * Tell it the size of the rx interrupt queue 4030 */ 4031 cmd.data0 = max_intr_slots * sizeof(struct mcp_slot); 4032 status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); 4033 if (status != 0) { 4034 device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); 4035 goto abort_with_fw; 4036 } 4037 4038 /* 4039 * Ask the maximum number of slices it supports 4040 */ 4041 status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); 4042 if (status != 0) { 4043 device_printf(sc->dev, 4044 "failed MXGEFW_CMD_GET_MAX_RSS_QUEUES\n"); 4045 goto abort_with_fw; 4046 } 4047 max_slices = cmd.data0; 4048 4049 /* 4050 * Round down max slices count to the nearest power of 2 4051 */ 4052 i = 0; 4053 while ((1 << (i + 1)) <= max_slices) 4054 ++i; 4055 max_slices = 1 << i; 4056 4057 if (max_slices > msix_cnt) 4058 max_slices = msix_cnt; 4059 4060 sc->num_slices = num_slices; 4061 sc->num_slices = if_ring_count2(sc->num_slices, max_slices); 4062 4063 multi_tx = device_getenv_int(sc->dev, "multi_tx", mxge_multi_tx); 4064 if (multi_tx) 4065 sc->num_tx_rings = sc->num_slices; 4066 4067 if (bootverbose) { 4068 device_printf(sc->dev, "using %d slices, max %d\n", 4069 sc->num_slices, max_slices); 4070 } 4071 4072 if (sc->num_slices == 1) 4073 goto abort_with_fw; 4074 return; 4075 4076 abort_with_fw: 4077 sc->fw_name = old_fw; 4078 mxge_load_firmware(sc, 0); 4079 } 4080 4081 static void 4082 mxge_setup_serialize(struct mxge_softc *sc) 4083 { 4084 int i = 0, slice; 4085 4086 /* Main + rx + tx */ 4087 sc->nserialize = (2 * sc->num_slices) + 1; 4088 sc->serializes = 4089 kmalloc(sc->nserialize * sizeof(struct lwkt_serialize *), 4090 M_DEVBUF, M_WAITOK | M_ZERO); 4091 4092 /* 4093 * Setup serializes 4094 * 4095 * NOTE: Order is critical 4096 */ 4097 4098 KKASSERT(i < sc->nserialize); 4099 sc->serializes[i++] = &sc->main_serialize; 4100 4101 for (slice = 0; slice < sc->num_slices; ++slice) { 4102 KKASSERT(i < sc->nserialize); 4103 sc->serializes[i++] = &sc->ss[slice].rx_data.rx_serialize; 4104 } 4105 4106 for (slice = 0; slice < sc->num_slices; ++slice) { 4107 KKASSERT(i < sc->nserialize); 4108 sc->serializes[i++] = &sc->ss[slice].tx.tx_serialize; 4109 } 4110 4111 KKASSERT(i == sc->nserialize); 4112 } 4113 4114 static void 4115 mxge_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4116 { 4117 struct mxge_softc *sc = ifp->if_softc; 4118 4119 ifnet_serialize_array_enter(sc->serializes, sc->nserialize, slz); 4120 } 4121 4122 static void 4123 mxge_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4124 { 4125 struct mxge_softc *sc = ifp->if_softc; 4126 4127 ifnet_serialize_array_exit(sc->serializes, sc->nserialize, slz); 4128 } 4129 4130 static int 4131 mxge_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4132 { 4133 struct mxge_softc *sc = ifp->if_softc; 4134 4135 return ifnet_serialize_array_try(sc->serializes, sc->nserialize, slz); 4136 } 4137 4138 #ifdef INVARIANTS 4139 4140 static void 4141 mxge_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4142 boolean_t serialized) 4143 { 4144 struct mxge_softc *sc = ifp->if_softc; 4145 4146 ifnet_serialize_array_assert(sc->serializes, sc->nserialize, 4147 slz, serialized); 4148 } 4149 4150 #endif /* INVARIANTS */ 4151 4152 #ifdef IFPOLL_ENABLE 4153 4154 static void 4155 mxge_npoll_rx(struct ifnet *ifp, void *xss, int cycle) 4156 { 4157 struct mxge_slice_state *ss = xss; 4158 mxge_rx_done_t *rx_done = &ss->rx_data.rx_done; 4159 4160 ASSERT_SERIALIZED(&ss->rx_data.rx_serialize); 4161 4162 if (rx_done->entry[rx_done->idx].length != 0) { 4163 mxge_clean_rx_done(&ss->sc->arpcom.ac_if, &ss->rx_data, cycle); 4164 } else { 4165 /* 4166 * XXX 4167 * This register writting obviously has cost, 4168 * however, if we don't hand back the rx token, 4169 * the upcoming packets may suffer rediculously 4170 * large delay, as observed on 8AL-C using ping(8). 4171 */ 4172 *ss->irq_claim = be32toh(3); 4173 } 4174 } 4175 4176 static void 4177 mxge_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4178 { 4179 struct mxge_softc *sc = ifp->if_softc; 4180 int i; 4181 4182 if (info == NULL) 4183 return; 4184 4185 /* 4186 * Only poll rx; polling tx and status don't seem to work 4187 */ 4188 for (i = 0; i < sc->num_slices; ++i) { 4189 struct mxge_slice_state *ss = &sc->ss[i]; 4190 int idx = ss->intr_cpuid; 4191 4192 KKASSERT(idx < ncpus2); 4193 info->ifpi_rx[idx].poll_func = mxge_npoll_rx; 4194 info->ifpi_rx[idx].arg = ss; 4195 info->ifpi_rx[idx].serializer = &ss->rx_data.rx_serialize; 4196 } 4197 } 4198 4199 #endif /* IFPOLL_ENABLE */ 4200 4201 static int 4202 mxge_attach(device_t dev) 4203 { 4204 mxge_softc_t *sc = device_get_softc(dev); 4205 struct ifnet *ifp = &sc->arpcom.ac_if; 4206 int err, rid, i; 4207 4208 /* 4209 * Avoid rewriting half the lines in this file to use 4210 * &sc->arpcom.ac_if instead 4211 */ 4212 sc->ifp = ifp; 4213 sc->dev = dev; 4214 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 4215 ifmedia_init(&sc->media, 0, mxge_media_change, mxge_media_status); 4216 4217 lwkt_serialize_init(&sc->main_serialize); 4218 4219 mxge_fetch_tunables(sc); 4220 4221 err = bus_dma_tag_create(NULL, /* parent */ 4222 1, /* alignment */ 4223 0, /* boundary */ 4224 BUS_SPACE_MAXADDR, /* low */ 4225 BUS_SPACE_MAXADDR, /* high */ 4226 NULL, NULL, /* filter */ 4227 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4228 0, /* num segs */ 4229 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4230 0, /* flags */ 4231 &sc->parent_dmat); /* tag */ 4232 if (err != 0) { 4233 device_printf(dev, "Err %d allocating parent dmat\n", err); 4234 goto failed; 4235 } 4236 4237 callout_init_mp(&sc->co_hdl); 4238 4239 mxge_setup_cfg_space(sc); 4240 4241 /* 4242 * Map the board into the kernel 4243 */ 4244 rid = PCIR_BARS; 4245 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 4246 &rid, RF_ACTIVE); 4247 if (sc->mem_res == NULL) { 4248 device_printf(dev, "could not map memory\n"); 4249 err = ENXIO; 4250 goto failed; 4251 } 4252 4253 sc->sram = rman_get_virtual(sc->mem_res); 4254 sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100; 4255 if (sc->sram_size > rman_get_size(sc->mem_res)) { 4256 device_printf(dev, "impossible memory region size %ld\n", 4257 rman_get_size(sc->mem_res)); 4258 err = ENXIO; 4259 goto failed; 4260 } 4261 4262 /* 4263 * Make NULL terminated copy of the EEPROM strings section of 4264 * lanai SRAM 4265 */ 4266 bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE); 4267 bus_space_read_region_1(rman_get_bustag(sc->mem_res), 4268 rman_get_bushandle(sc->mem_res), 4269 sc->sram_size - MXGE_EEPROM_STRINGS_SIZE, 4270 sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE - 2); 4271 err = mxge_parse_strings(sc); 4272 if (err != 0) { 4273 device_printf(dev, "parse EEPROM string failed\n"); 4274 goto failed; 4275 } 4276 4277 /* 4278 * Enable write combining for efficient use of PCIe bus 4279 */ 4280 mxge_enable_wc(sc); 4281 4282 /* 4283 * Allocate the out of band DMA memory 4284 */ 4285 err = mxge_dma_alloc(sc, &sc->cmd_dma, sizeof(mxge_cmd_t), 64); 4286 if (err != 0) { 4287 device_printf(dev, "alloc cmd DMA buf failed\n"); 4288 goto failed; 4289 } 4290 sc->cmd = sc->cmd_dma.dmem_addr; 4291 4292 err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64); 4293 if (err != 0) { 4294 device_printf(dev, "alloc zeropad DMA buf failed\n"); 4295 goto failed; 4296 } 4297 4298 err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096); 4299 if (err != 0) { 4300 device_printf(dev, "alloc dmabench DMA buf failed\n"); 4301 goto failed; 4302 } 4303 4304 /* Select & load the firmware */ 4305 err = mxge_select_firmware(sc); 4306 if (err != 0) { 4307 device_printf(dev, "select firmware failed\n"); 4308 goto failed; 4309 } 4310 4311 mxge_slice_probe(sc); 4312 err = mxge_alloc_slices(sc); 4313 if (err != 0) { 4314 device_printf(dev, "alloc slices failed\n"); 4315 goto failed; 4316 } 4317 4318 err = mxge_alloc_intr(sc); 4319 if (err != 0) { 4320 device_printf(dev, "alloc intr failed\n"); 4321 goto failed; 4322 } 4323 4324 /* Setup serializes */ 4325 mxge_setup_serialize(sc); 4326 4327 err = mxge_reset(sc, 0); 4328 if (err != 0) { 4329 device_printf(dev, "reset failed\n"); 4330 goto failed; 4331 } 4332 4333 err = mxge_alloc_rings(sc); 4334 if (err != 0) { 4335 device_printf(dev, "failed to allocate rings\n"); 4336 goto failed; 4337 } 4338 4339 ifp->if_baudrate = IF_Gbps(10UL); 4340 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO; 4341 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 4342 4343 ifp->if_capabilities |= IFCAP_VLAN_MTU; 4344 #if 0 4345 /* Well, its software, sigh */ 4346 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 4347 #endif 4348 ifp->if_capenable = ifp->if_capabilities; 4349 4350 ifp->if_softc = sc; 4351 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 4352 ifp->if_init = mxge_init; 4353 ifp->if_ioctl = mxge_ioctl; 4354 ifp->if_start = mxge_start; 4355 #ifdef IFPOLL_ENABLE 4356 if (sc->intr_type != PCI_INTR_TYPE_LEGACY) 4357 ifp->if_npoll = mxge_npoll; 4358 #endif 4359 ifp->if_serialize = mxge_serialize; 4360 ifp->if_deserialize = mxge_deserialize; 4361 ifp->if_tryserialize = mxge_tryserialize; 4362 #ifdef INVARIANTS 4363 ifp->if_serialize_assert = mxge_serialize_assert; 4364 #endif 4365 4366 /* Increase TSO burst length */ 4367 ifp->if_tsolen = (32 * ETHERMTU); 4368 4369 /* Initialise the ifmedia structure */ 4370 mxge_media_init(sc); 4371 mxge_media_probe(sc); 4372 4373 ether_ifattach(ifp, sc->mac_addr, NULL); 4374 4375 /* Setup TX rings and subqueues */ 4376 for (i = 0; i < sc->num_tx_rings; ++i) { 4377 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 4378 struct mxge_slice_state *ss = &sc->ss[i]; 4379 4380 ifsq_set_cpuid(ifsq, ss->intr_cpuid); 4381 ifsq_set_hw_serialize(ifsq, &ss->tx.tx_serialize); 4382 ifsq_set_priv(ifsq, &ss->tx); 4383 ss->tx.ifsq = ifsq; 4384 4385 ifsq_watchdog_init(&ss->tx.watchdog, ifsq, mxge_watchdog); 4386 } 4387 4388 /* 4389 * XXX 4390 * We are not ready to do "gather" jumbo frame, so 4391 * limit MTU to MJUMPAGESIZE 4392 */ 4393 sc->max_mtu = MJUMPAGESIZE - 4394 ETHER_HDR_LEN - EVL_ENCAPLEN - MXGEFW_PAD - 1; 4395 sc->dying = 0; 4396 4397 err = mxge_setup_intr(sc); 4398 if (err != 0) { 4399 device_printf(dev, "alloc and setup intr failed\n"); 4400 ether_ifdetach(ifp); 4401 goto failed; 4402 } 4403 4404 mxge_add_sysctls(sc); 4405 4406 callout_reset_bycpu(&sc->co_hdl, mxge_ticks, mxge_tick, sc, 4407 sc->ss[0].intr_cpuid); 4408 return 0; 4409 4410 failed: 4411 mxge_detach(dev); 4412 return err; 4413 } 4414 4415 static int 4416 mxge_detach(device_t dev) 4417 { 4418 mxge_softc_t *sc = device_get_softc(dev); 4419 4420 if (device_is_attached(dev)) { 4421 struct ifnet *ifp = sc->ifp; 4422 4423 ifnet_serialize_all(ifp); 4424 4425 sc->dying = 1; 4426 if (ifp->if_flags & IFF_RUNNING) 4427 mxge_close(sc, 1); 4428 callout_stop(&sc->co_hdl); 4429 4430 mxge_teardown_intr(sc, sc->num_slices); 4431 4432 ifnet_deserialize_all(ifp); 4433 4434 callout_terminate(&sc->co_hdl); 4435 4436 ether_ifdetach(ifp); 4437 } 4438 ifmedia_removeall(&sc->media); 4439 4440 if (sc->cmd != NULL && sc->zeropad_dma.dmem_addr != NULL && 4441 sc->sram != NULL) 4442 mxge_dummy_rdma(sc, 0); 4443 4444 mxge_free_intr(sc); 4445 mxge_rem_sysctls(sc); 4446 mxge_free_rings(sc); 4447 4448 /* MUST after sysctls, intr and rings are freed */ 4449 mxge_free_slices(sc); 4450 4451 if (sc->dmabench_dma.dmem_addr != NULL) 4452 mxge_dma_free(&sc->dmabench_dma); 4453 if (sc->zeropad_dma.dmem_addr != NULL) 4454 mxge_dma_free(&sc->zeropad_dma); 4455 if (sc->cmd_dma.dmem_addr != NULL) 4456 mxge_dma_free(&sc->cmd_dma); 4457 4458 if (sc->msix_table_res != NULL) { 4459 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 4460 sc->msix_table_res); 4461 } 4462 if (sc->mem_res != NULL) { 4463 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, 4464 sc->mem_res); 4465 } 4466 4467 if (sc->parent_dmat != NULL) 4468 bus_dma_tag_destroy(sc->parent_dmat); 4469 4470 return 0; 4471 } 4472 4473 static int 4474 mxge_shutdown(device_t dev) 4475 { 4476 return 0; 4477 } 4478 4479 static void 4480 mxge_free_msix(struct mxge_softc *sc, boolean_t setup) 4481 { 4482 int i; 4483 4484 KKASSERT(sc->num_slices > 1); 4485 4486 for (i = 0; i < sc->num_slices; ++i) { 4487 struct mxge_slice_state *ss = &sc->ss[i]; 4488 4489 if (ss->intr_res != NULL) { 4490 bus_release_resource(sc->dev, SYS_RES_IRQ, 4491 ss->intr_rid, ss->intr_res); 4492 } 4493 if (ss->intr_rid >= 0) 4494 pci_release_msix_vector(sc->dev, ss->intr_rid); 4495 } 4496 if (setup) 4497 pci_teardown_msix(sc->dev); 4498 } 4499 4500 static int 4501 mxge_alloc_msix(struct mxge_softc *sc) 4502 { 4503 struct mxge_slice_state *ss; 4504 int offset, rid, error, i; 4505 boolean_t setup = FALSE; 4506 4507 KKASSERT(sc->num_slices > 1); 4508 4509 if (sc->num_slices == ncpus2) { 4510 offset = 0; 4511 } else { 4512 int offset_def; 4513 4514 offset_def = (sc->num_slices * device_get_unit(sc->dev)) % 4515 ncpus2; 4516 4517 offset = device_getenv_int(sc->dev, "msix.offset", offset_def); 4518 if (offset >= ncpus2 || 4519 offset % sc->num_slices != 0) { 4520 device_printf(sc->dev, "invalid msix.offset %d, " 4521 "use %d\n", offset, offset_def); 4522 offset = offset_def; 4523 } 4524 } 4525 4526 ss = &sc->ss[0]; 4527 4528 ss->intr_serialize = &sc->main_serialize; 4529 ss->intr_func = mxge_msi; 4530 ksnprintf(ss->intr_desc0, sizeof(ss->intr_desc0), 4531 "%s comb", device_get_nameunit(sc->dev)); 4532 ss->intr_desc = ss->intr_desc0; 4533 ss->intr_cpuid = offset; 4534 4535 for (i = 1; i < sc->num_slices; ++i) { 4536 ss = &sc->ss[i]; 4537 4538 ss->intr_serialize = &ss->rx_data.rx_serialize; 4539 if (sc->num_tx_rings == 1) { 4540 ss->intr_func = mxge_msix_rx; 4541 ksnprintf(ss->intr_desc0, sizeof(ss->intr_desc0), 4542 "%s rx", device_get_nameunit(sc->dev)); 4543 } else { 4544 ss->intr_func = mxge_msix_rxtx; 4545 ksnprintf(ss->intr_desc0, sizeof(ss->intr_desc0), 4546 "%s rxtx", device_get_nameunit(sc->dev)); 4547 } 4548 ss->intr_desc = ss->intr_desc0; 4549 ss->intr_cpuid = offset + i; 4550 } 4551 4552 rid = PCIR_BAR(2); 4553 sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4554 &rid, RF_ACTIVE); 4555 if (sc->msix_table_res == NULL) { 4556 device_printf(sc->dev, "couldn't alloc MSI-X table res\n"); 4557 return ENXIO; 4558 } 4559 4560 error = pci_setup_msix(sc->dev); 4561 if (error) { 4562 device_printf(sc->dev, "could not setup MSI-X\n"); 4563 goto back; 4564 } 4565 setup = TRUE; 4566 4567 for (i = 0; i < sc->num_slices; ++i) { 4568 ss = &sc->ss[i]; 4569 4570 error = pci_alloc_msix_vector(sc->dev, i, &ss->intr_rid, 4571 ss->intr_cpuid); 4572 if (error) { 4573 device_printf(sc->dev, "could not alloc " 4574 "MSI-X %d on cpu%d\n", i, ss->intr_cpuid); 4575 goto back; 4576 } 4577 4578 ss->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4579 &ss->intr_rid, RF_ACTIVE); 4580 if (ss->intr_res == NULL) { 4581 device_printf(sc->dev, "could not alloc " 4582 "MSI-X %d resource\n", i); 4583 error = ENXIO; 4584 goto back; 4585 } 4586 } 4587 4588 pci_enable_msix(sc->dev); 4589 sc->intr_type = PCI_INTR_TYPE_MSIX; 4590 back: 4591 if (error) 4592 mxge_free_msix(sc, setup); 4593 return error; 4594 } 4595 4596 static int 4597 mxge_alloc_intr(struct mxge_softc *sc) 4598 { 4599 struct mxge_slice_state *ss; 4600 u_int irq_flags; 4601 4602 if (sc->num_slices > 1) { 4603 int error; 4604 4605 error = mxge_alloc_msix(sc); 4606 if (error) 4607 return error; 4608 KKASSERT(sc->intr_type == PCI_INTR_TYPE_MSIX); 4609 return 0; 4610 } 4611 4612 ss = &sc->ss[0]; 4613 4614 sc->intr_type = pci_alloc_1intr(sc->dev, mxge_msi_enable, 4615 &ss->intr_rid, &irq_flags); 4616 4617 ss->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4618 &ss->intr_rid, irq_flags); 4619 if (ss->intr_res == NULL) { 4620 device_printf(sc->dev, "could not alloc interrupt\n"); 4621 return ENXIO; 4622 } 4623 4624 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) 4625 ss->intr_func = mxge_legacy; 4626 else 4627 ss->intr_func = mxge_msi; 4628 ss->intr_serialize = &sc->main_serialize; 4629 ss->intr_cpuid = rman_get_cpuid(ss->intr_res); 4630 4631 return 0; 4632 } 4633 4634 static int 4635 mxge_setup_intr(struct mxge_softc *sc) 4636 { 4637 int i; 4638 4639 for (i = 0; i < sc->num_slices; ++i) { 4640 struct mxge_slice_state *ss = &sc->ss[i]; 4641 int error; 4642 4643 error = bus_setup_intr_descr(sc->dev, ss->intr_res, 4644 INTR_MPSAFE, ss->intr_func, ss, &ss->intr_hand, 4645 ss->intr_serialize, ss->intr_desc); 4646 if (error) { 4647 device_printf(sc->dev, "can't setup %dth intr\n", i); 4648 mxge_teardown_intr(sc, i); 4649 return error; 4650 } 4651 } 4652 return 0; 4653 } 4654 4655 static void 4656 mxge_teardown_intr(struct mxge_softc *sc, int cnt) 4657 { 4658 int i; 4659 4660 if (sc->ss == NULL) 4661 return; 4662 4663 for (i = 0; i < cnt; ++i) { 4664 struct mxge_slice_state *ss = &sc->ss[i]; 4665 4666 bus_teardown_intr(sc->dev, ss->intr_res, ss->intr_hand); 4667 } 4668 } 4669 4670 static void 4671 mxge_free_intr(struct mxge_softc *sc) 4672 { 4673 if (sc->ss == NULL) 4674 return; 4675 4676 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 4677 struct mxge_slice_state *ss = &sc->ss[0]; 4678 4679 if (ss->intr_res != NULL) { 4680 bus_release_resource(sc->dev, SYS_RES_IRQ, 4681 ss->intr_rid, ss->intr_res); 4682 } 4683 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4684 pci_release_msi(sc->dev); 4685 } else { 4686 mxge_free_msix(sc, TRUE); 4687 } 4688 } 4689