1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_polling.h" 68 #include "opt_serializer.h" 69 #include "opt_rss.h" 70 #include "opt_emx.h" 71 72 #include <sys/param.h> 73 #include <sys/bus.h> 74 #include <sys/endian.h> 75 #include <sys/interrupt.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/malloc.h> 79 #include <sys/mbuf.h> 80 #include <sys/proc.h> 81 #include <sys/rman.h> 82 #include <sys/serialize.h> 83 #include <sys/socket.h> 84 #include <sys/sockio.h> 85 #include <sys/sysctl.h> 86 #include <sys/systm.h> 87 88 #include <net/bpf.h> 89 #include <net/ethernet.h> 90 #include <net/if.h> 91 #include <net/if_arp.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/ifq_var.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/emx/if_emx.h> 112 113 #ifdef EMX_RSS_DEBUG 114 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 115 do { \ 116 if (sc->rss_debug >= lvl) \ 117 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 118 } while (0) 119 #else /* !EMX_RSS_DEBUG */ 120 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 121 #endif /* EMX_RSS_DEBUG */ 122 123 #define EMX_NAME "Intel(R) PRO/1000 " 124 125 #define EMX_DEVICE(id) \ 126 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 127 #define EMX_DEVICE_NULL { 0, 0, NULL } 128 129 static const struct emx_device { 130 uint16_t vid; 131 uint16_t did; 132 const char *desc; 133 } emx_devices[] = { 134 EMX_DEVICE(82571EB_COPPER), 135 EMX_DEVICE(82571EB_FIBER), 136 EMX_DEVICE(82571EB_SERDES), 137 EMX_DEVICE(82571EB_SERDES_DUAL), 138 EMX_DEVICE(82571EB_SERDES_QUAD), 139 EMX_DEVICE(82571EB_QUAD_COPPER), 140 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 141 EMX_DEVICE(82571EB_QUAD_FIBER), 142 EMX_DEVICE(82571PT_QUAD_COPPER), 143 144 EMX_DEVICE(82572EI_COPPER), 145 EMX_DEVICE(82572EI_FIBER), 146 EMX_DEVICE(82572EI_SERDES), 147 EMX_DEVICE(82572EI), 148 149 EMX_DEVICE(82573E), 150 EMX_DEVICE(82573E_IAMT), 151 EMX_DEVICE(82573L), 152 153 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 154 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 155 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 156 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 157 158 EMX_DEVICE(82574L), 159 160 /* required last entry */ 161 EMX_DEVICE_NULL 162 }; 163 164 static int emx_probe(device_t); 165 static int emx_attach(device_t); 166 static int emx_detach(device_t); 167 static int emx_shutdown(device_t); 168 static int emx_suspend(device_t); 169 static int emx_resume(device_t); 170 171 static void emx_init(void *); 172 static void emx_stop(struct emx_softc *); 173 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 174 static void emx_start(struct ifnet *); 175 #ifdef DEVICE_POLLING 176 static void emx_poll(struct ifnet *, enum poll_cmd, int); 177 #endif 178 static void emx_watchdog(struct ifnet *); 179 static void emx_media_status(struct ifnet *, struct ifmediareq *); 180 static int emx_media_change(struct ifnet *); 181 static void emx_timer(void *); 182 183 static void emx_intr(void *); 184 static void emx_rxeof(struct emx_softc *, int, int); 185 static void emx_txeof(struct emx_softc *); 186 static void emx_tx_collect(struct emx_softc *); 187 static void emx_tx_purge(struct emx_softc *); 188 static void emx_enable_intr(struct emx_softc *); 189 static void emx_disable_intr(struct emx_softc *); 190 191 static int emx_dma_alloc(struct emx_softc *); 192 static void emx_dma_free(struct emx_softc *); 193 static void emx_init_tx_ring(struct emx_softc *); 194 static int emx_init_rx_ring(struct emx_softc *, struct emx_rxdata *); 195 static void emx_free_rx_ring(struct emx_softc *, struct emx_rxdata *); 196 static int emx_create_tx_ring(struct emx_softc *); 197 static int emx_create_rx_ring(struct emx_softc *, struct emx_rxdata *); 198 static void emx_destroy_tx_ring(struct emx_softc *, int); 199 static void emx_destroy_rx_ring(struct emx_softc *, 200 struct emx_rxdata *, int); 201 static int emx_newbuf(struct emx_softc *, struct emx_rxdata *, int, int); 202 static int emx_encap(struct emx_softc *, struct mbuf **); 203 static int emx_txcsum_pullup(struct emx_softc *, struct mbuf **); 204 static int emx_txcsum(struct emx_softc *, struct mbuf *, 205 uint32_t *, uint32_t *); 206 207 static int emx_is_valid_eaddr(const uint8_t *); 208 static int emx_hw_init(struct emx_softc *); 209 static void emx_setup_ifp(struct emx_softc *); 210 static void emx_init_tx_unit(struct emx_softc *); 211 static void emx_init_rx_unit(struct emx_softc *); 212 static void emx_update_stats(struct emx_softc *); 213 static void emx_set_promisc(struct emx_softc *); 214 static void emx_disable_promisc(struct emx_softc *); 215 static void emx_set_multi(struct emx_softc *); 216 static void emx_update_link_status(struct emx_softc *); 217 static void emx_smartspeed(struct emx_softc *); 218 219 static void emx_print_debug_info(struct emx_softc *); 220 static void emx_print_nvm_info(struct emx_softc *); 221 static void emx_print_hw_stats(struct emx_softc *); 222 223 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 224 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 225 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 226 static int emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 227 static void emx_add_sysctl(struct emx_softc *); 228 229 /* Management and WOL Support */ 230 static void emx_get_mgmt(struct emx_softc *); 231 static void emx_rel_mgmt(struct emx_softc *); 232 static void emx_get_hw_control(struct emx_softc *); 233 static void emx_rel_hw_control(struct emx_softc *); 234 static void emx_enable_wol(device_t); 235 236 static device_method_t emx_methods[] = { 237 /* Device interface */ 238 DEVMETHOD(device_probe, emx_probe), 239 DEVMETHOD(device_attach, emx_attach), 240 DEVMETHOD(device_detach, emx_detach), 241 DEVMETHOD(device_shutdown, emx_shutdown), 242 DEVMETHOD(device_suspend, emx_suspend), 243 DEVMETHOD(device_resume, emx_resume), 244 { 0, 0 } 245 }; 246 247 static driver_t emx_driver = { 248 "emx", 249 emx_methods, 250 sizeof(struct emx_softc), 251 }; 252 253 static devclass_t emx_devclass; 254 255 DECLARE_DUMMY_MODULE(if_emx); 256 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 257 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, 0, 0); 258 259 /* 260 * Tunables 261 */ 262 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 263 static int emx_rxd = EMX_DEFAULT_RXD; 264 static int emx_txd = EMX_DEFAULT_TXD; 265 static int emx_smart_pwr_down = FALSE; 266 267 /* Controls whether promiscuous also shows bad packets */ 268 static int emx_debug_sbp = FALSE; 269 270 static int emx_82573_workaround = TRUE; 271 272 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 273 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 274 TUNABLE_INT("hw.emx.txd", &emx_txd); 275 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 276 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 277 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 278 279 /* Global used in WOL setup with multiport cards */ 280 static int emx_global_quad_port_a = 0; 281 282 /* Set this to one to display debug statistics */ 283 static int emx_display_debug_stats = 0; 284 285 #if !defined(KTR_IF_EMX) 286 #define KTR_IF_EMX KTR_ALL 287 #endif 288 KTR_INFO_MASTER(if_emx); 289 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin", 0); 290 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end", 0); 291 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet", 0); 292 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet", 0); 293 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean", 0); 294 #define logif(name) KTR_LOG(if_emx_ ## name) 295 296 static __inline void 297 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 298 { 299 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 300 /* DD bit must be cleared */ 301 rxd->rxd_staterr = 0; 302 } 303 304 static __inline void 305 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 306 { 307 /* Ignore Checksum bit is set */ 308 if (staterr & E1000_RXD_STAT_IXSM) 309 return; 310 311 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 312 E1000_RXD_STAT_IPCS) 313 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 314 315 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 316 E1000_RXD_STAT_TCPCS) { 317 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 318 CSUM_PSEUDO_HDR | 319 CSUM_FRAG_NOT_CHECKED; 320 mp->m_pkthdr.csum_data = htons(0xffff); 321 } 322 } 323 324 static __inline struct pktinfo * 325 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 326 uint32_t mrq, uint32_t hash, uint32_t staterr) 327 { 328 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 329 case EMX_RXDMRQ_IPV4_TCP: 330 pi->pi_netisr = NETISR_IP; 331 pi->pi_flags = 0; 332 pi->pi_l3proto = IPPROTO_TCP; 333 break; 334 335 case EMX_RXDMRQ_IPV6_TCP: 336 pi->pi_netisr = NETISR_IPV6; 337 pi->pi_flags = 0; 338 pi->pi_l3proto = IPPROTO_TCP; 339 break; 340 341 case EMX_RXDMRQ_IPV4: 342 if (staterr & E1000_RXD_STAT_IXSM) 343 return NULL; 344 345 if ((staterr & 346 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 347 E1000_RXD_STAT_TCPCS) { 348 pi->pi_netisr = NETISR_IP; 349 pi->pi_flags = 0; 350 pi->pi_l3proto = IPPROTO_UDP; 351 break; 352 } 353 /* FALL THROUGH */ 354 default: 355 return NULL; 356 } 357 358 m->m_flags |= M_HASH; 359 m->m_pkthdr.hash = toeplitz_hash(hash); 360 return pi; 361 } 362 363 static int 364 emx_probe(device_t dev) 365 { 366 const struct emx_device *d; 367 uint16_t vid, did; 368 369 vid = pci_get_vendor(dev); 370 did = pci_get_device(dev); 371 372 for (d = emx_devices; d->desc != NULL; ++d) { 373 if (vid == d->vid && did == d->did) { 374 device_set_desc(dev, d->desc); 375 device_set_async_attach(dev, TRUE); 376 return 0; 377 } 378 } 379 return ENXIO; 380 } 381 382 static int 383 emx_attach(device_t dev) 384 { 385 struct emx_softc *sc = device_get_softc(dev); 386 struct ifnet *ifp = &sc->arpcom.ac_if; 387 int error = 0; 388 uint16_t eeprom_data, device_id; 389 390 callout_init(&sc->timer); 391 392 sc->dev = sc->osdep.dev = dev; 393 394 /* 395 * Determine hardware and mac type 396 */ 397 sc->hw.vendor_id = pci_get_vendor(dev); 398 sc->hw.device_id = pci_get_device(dev); 399 sc->hw.revision_id = pci_get_revid(dev); 400 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 401 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 402 403 if (e1000_set_mac_type(&sc->hw)) 404 return ENXIO; 405 406 /* Enable bus mastering */ 407 pci_enable_busmaster(dev); 408 409 /* 410 * Allocate IO memory 411 */ 412 sc->memory_rid = EMX_BAR_MEM; 413 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 414 &sc->memory_rid, RF_ACTIVE); 415 if (sc->memory == NULL) { 416 device_printf(dev, "Unable to allocate bus resource: memory\n"); 417 error = ENXIO; 418 goto fail; 419 } 420 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 421 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 422 423 /* XXX This is quite goofy, it is not actually used */ 424 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 425 426 /* 427 * Allocate interrupt 428 */ 429 sc->intr_rid = 0; 430 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 431 RF_SHAREABLE | RF_ACTIVE); 432 if (sc->intr_res == NULL) { 433 device_printf(dev, "Unable to allocate bus resource: " 434 "interrupt\n"); 435 error = ENXIO; 436 goto fail; 437 } 438 439 /* Save PCI command register for Shared Code */ 440 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 441 sc->hw.back = &sc->osdep; 442 443 /* Do Shared Code initialization */ 444 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 445 device_printf(dev, "Setup of Shared code failed\n"); 446 error = ENXIO; 447 goto fail; 448 } 449 e1000_get_bus_info(&sc->hw); 450 451 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 452 sc->hw.phy.autoneg_wait_to_complete = FALSE; 453 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 454 455 /* 456 * Interrupt throttle rate 457 */ 458 if (emx_int_throttle_ceil == 0) { 459 sc->int_throttle_ceil = 0; 460 } else { 461 int throttle = emx_int_throttle_ceil; 462 463 if (throttle < 0) 464 throttle = EMX_DEFAULT_ITR; 465 466 /* Recalculate the tunable value to get the exact frequency. */ 467 throttle = 1000000000 / 256 / throttle; 468 469 /* Upper 16bits of ITR is reserved and should be zero */ 470 if (throttle & 0xffff0000) 471 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 472 473 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 474 } 475 476 e1000_init_script_state_82541(&sc->hw, TRUE); 477 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 478 479 /* Copper options */ 480 if (sc->hw.phy.media_type == e1000_media_type_copper) { 481 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 482 sc->hw.phy.disable_polarity_correction = FALSE; 483 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 484 } 485 486 /* Set the frame limits assuming standard ethernet sized frames. */ 487 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 488 sc->min_frame_size = ETHER_MIN_LEN; 489 490 /* This controls when hardware reports transmit completion status. */ 491 sc->hw.mac.report_tx_early = 1; 492 493 #ifdef RSS 494 /* Calculate # of RX rings */ 495 if (ncpus > 1) 496 sc->rx_ring_cnt = EMX_NRX_RING; 497 else 498 #endif 499 sc->rx_ring_cnt = 1; 500 sc->rx_ring_inuse = sc->rx_ring_cnt; 501 502 /* Allocate RX/TX rings' busdma(9) stuffs */ 503 error = emx_dma_alloc(sc); 504 if (error) 505 goto fail; 506 507 /* Make sure we have a good EEPROM before we read from it */ 508 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 509 /* 510 * Some PCI-E parts fail the first check due to 511 * the link being in sleep state, call it again, 512 * if it fails a second time its a real issue. 513 */ 514 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 515 device_printf(dev, 516 "The EEPROM Checksum Is Not Valid\n"); 517 error = EIO; 518 goto fail; 519 } 520 } 521 522 /* Initialize the hardware */ 523 error = emx_hw_init(sc); 524 if (error) { 525 device_printf(dev, "Unable to initialize the hardware\n"); 526 goto fail; 527 } 528 529 /* Copy the permanent MAC address out of the EEPROM */ 530 if (e1000_read_mac_addr(&sc->hw) < 0) { 531 device_printf(dev, "EEPROM read error while reading MAC" 532 " address\n"); 533 error = EIO; 534 goto fail; 535 } 536 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 537 device_printf(dev, "Invalid MAC address\n"); 538 error = EIO; 539 goto fail; 540 } 541 542 /* Manually turn off all interrupts */ 543 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 544 545 /* Setup OS specific network interface */ 546 emx_setup_ifp(sc); 547 548 /* Add sysctl tree, must after emx_setup_ifp() */ 549 emx_add_sysctl(sc); 550 551 /* Initialize statistics */ 552 emx_update_stats(sc); 553 554 sc->hw.mac.get_link_status = 1; 555 emx_update_link_status(sc); 556 557 /* Indicate SOL/IDER usage */ 558 if (e1000_check_reset_block(&sc->hw)) { 559 device_printf(dev, 560 "PHY reset is blocked due to SOL/IDER session.\n"); 561 } 562 563 /* Determine if we have to control management hardware */ 564 sc->has_manage = e1000_enable_mng_pass_thru(&sc->hw); 565 566 /* 567 * Setup Wake-on-Lan 568 */ 569 switch (sc->hw.mac.type) { 570 case e1000_82571: 571 case e1000_80003es2lan: 572 if (sc->hw.bus.func == 1) { 573 e1000_read_nvm(&sc->hw, 574 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 575 } else { 576 e1000_read_nvm(&sc->hw, 577 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 578 } 579 eeprom_data &= EMX_EEPROM_APME; 580 break; 581 582 default: 583 /* APME bit in EEPROM is mapped to WUC.APME */ 584 eeprom_data = 585 E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 586 break; 587 } 588 if (eeprom_data) 589 sc->wol = E1000_WUFC_MAG; 590 /* 591 * We have the eeprom settings, now apply the special cases 592 * where the eeprom may be wrong or the board won't support 593 * wake on lan on a particular port 594 */ 595 device_id = pci_get_device(dev); 596 switch (device_id) { 597 case E1000_DEV_ID_82571EB_FIBER: 598 /* 599 * Wake events only supported on port A for dual fiber 600 * regardless of eeprom setting 601 */ 602 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 603 E1000_STATUS_FUNC_1) 604 sc->wol = 0; 605 break; 606 607 case E1000_DEV_ID_82571EB_QUAD_COPPER: 608 case E1000_DEV_ID_82571EB_QUAD_FIBER: 609 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 610 /* if quad port sc, disable WoL on all but port A */ 611 if (emx_global_quad_port_a != 0) 612 sc->wol = 0; 613 /* Reset for multiple quad port adapters */ 614 if (++emx_global_quad_port_a == 4) 615 emx_global_quad_port_a = 0; 616 break; 617 } 618 619 /* XXX disable wol */ 620 sc->wol = 0; 621 622 sc->spare_tx_desc = EMX_TX_SPARE; 623 624 /* 625 * Keep following relationship between spare_tx_desc, oact_tx_desc 626 * and tx_int_nsegs: 627 * (spare_tx_desc + EMX_TX_RESERVED) <= 628 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_int_nsegs 629 */ 630 sc->oact_tx_desc = sc->num_tx_desc / 8; 631 if (sc->oact_tx_desc > EMX_TX_OACTIVE_MAX) 632 sc->oact_tx_desc = EMX_TX_OACTIVE_MAX; 633 if (sc->oact_tx_desc < sc->spare_tx_desc + EMX_TX_RESERVED) 634 sc->oact_tx_desc = sc->spare_tx_desc + EMX_TX_RESERVED; 635 636 sc->tx_int_nsegs = sc->num_tx_desc / 16; 637 if (sc->tx_int_nsegs < sc->oact_tx_desc) 638 sc->tx_int_nsegs = sc->oact_tx_desc; 639 640 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, emx_intr, sc, 641 &sc->intr_tag, ifp->if_serializer); 642 if (error) { 643 device_printf(dev, "Failed to register interrupt handler"); 644 ether_ifdetach(&sc->arpcom.ac_if); 645 goto fail; 646 } 647 648 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->intr_res)); 649 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 650 return (0); 651 fail: 652 emx_detach(dev); 653 return (error); 654 } 655 656 static int 657 emx_detach(device_t dev) 658 { 659 struct emx_softc *sc = device_get_softc(dev); 660 661 if (device_is_attached(dev)) { 662 struct ifnet *ifp = &sc->arpcom.ac_if; 663 664 lwkt_serialize_enter(ifp->if_serializer); 665 666 emx_stop(sc); 667 668 e1000_phy_hw_reset(&sc->hw); 669 670 emx_rel_mgmt(sc); 671 672 if (sc->hw.mac.type == e1000_82573 && 673 e1000_check_mng_mode(&sc->hw)) 674 emx_rel_hw_control(sc); 675 676 if (sc->wol) { 677 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 678 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 679 emx_enable_wol(dev); 680 } 681 682 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 683 684 lwkt_serialize_exit(ifp->if_serializer); 685 686 ether_ifdetach(ifp); 687 } 688 bus_generic_detach(dev); 689 690 if (sc->intr_res != NULL) { 691 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 692 sc->intr_res); 693 } 694 695 if (sc->memory != NULL) { 696 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 697 sc->memory); 698 } 699 700 emx_dma_free(sc); 701 702 /* Free sysctl tree */ 703 if (sc->sysctl_tree != NULL) 704 sysctl_ctx_free(&sc->sysctl_ctx); 705 706 return (0); 707 } 708 709 static int 710 emx_shutdown(device_t dev) 711 { 712 return emx_suspend(dev); 713 } 714 715 static int 716 emx_suspend(device_t dev) 717 { 718 struct emx_softc *sc = device_get_softc(dev); 719 struct ifnet *ifp = &sc->arpcom.ac_if; 720 721 lwkt_serialize_enter(ifp->if_serializer); 722 723 emx_stop(sc); 724 725 emx_rel_mgmt(sc); 726 727 if (sc->hw.mac.type == e1000_82573 && 728 e1000_check_mng_mode(&sc->hw)) 729 emx_rel_hw_control(sc); 730 731 if (sc->wol) { 732 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 733 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 734 emx_enable_wol(dev); 735 } 736 737 lwkt_serialize_exit(ifp->if_serializer); 738 739 return bus_generic_suspend(dev); 740 } 741 742 static int 743 emx_resume(device_t dev) 744 { 745 struct emx_softc *sc = device_get_softc(dev); 746 struct ifnet *ifp = &sc->arpcom.ac_if; 747 748 lwkt_serialize_enter(ifp->if_serializer); 749 750 emx_init(sc); 751 emx_get_mgmt(sc); 752 if_devstart(ifp); 753 754 lwkt_serialize_exit(ifp->if_serializer); 755 756 return bus_generic_resume(dev); 757 } 758 759 static void 760 emx_start(struct ifnet *ifp) 761 { 762 struct emx_softc *sc = ifp->if_softc; 763 struct mbuf *m_head; 764 765 ASSERT_SERIALIZED(ifp->if_serializer); 766 767 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 768 return; 769 770 if (!sc->link_active) { 771 ifq_purge(&ifp->if_snd); 772 return; 773 } 774 775 while (!ifq_is_empty(&ifp->if_snd)) { 776 /* Now do we at least have a minimal? */ 777 if (EMX_IS_OACTIVE(sc)) { 778 emx_tx_collect(sc); 779 if (EMX_IS_OACTIVE(sc)) { 780 ifp->if_flags |= IFF_OACTIVE; 781 sc->no_tx_desc_avail1++; 782 break; 783 } 784 } 785 786 logif(pkt_txqueue); 787 m_head = ifq_dequeue(&ifp->if_snd, NULL); 788 if (m_head == NULL) 789 break; 790 791 if (emx_encap(sc, &m_head)) { 792 ifp->if_oerrors++; 793 emx_tx_collect(sc); 794 continue; 795 } 796 797 /* Send a copy of the frame to the BPF listener */ 798 ETHER_BPF_MTAP(ifp, m_head); 799 800 /* Set timeout in case hardware has problems transmitting. */ 801 ifp->if_timer = EMX_TX_TIMEOUT; 802 } 803 } 804 805 static int 806 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 807 { 808 struct emx_softc *sc = ifp->if_softc; 809 struct ifreq *ifr = (struct ifreq *)data; 810 uint16_t eeprom_data = 0; 811 int max_frame_size, mask, reinit; 812 int error = 0; 813 814 ASSERT_SERIALIZED(ifp->if_serializer); 815 816 switch (command) { 817 case SIOCSIFMTU: 818 switch (sc->hw.mac.type) { 819 case e1000_82573: 820 /* 821 * 82573 only supports jumbo frames 822 * if ASPM is disabled. 823 */ 824 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 825 &eeprom_data); 826 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 827 max_frame_size = ETHER_MAX_LEN; 828 break; 829 } 830 /* FALL THROUGH */ 831 832 /* Limit Jumbo Frame size */ 833 case e1000_82571: 834 case e1000_82572: 835 case e1000_82574: 836 case e1000_80003es2lan: 837 max_frame_size = 9234; 838 break; 839 840 default: 841 max_frame_size = MAX_JUMBO_FRAME_SIZE; 842 break; 843 } 844 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 845 ETHER_CRC_LEN) { 846 error = EINVAL; 847 break; 848 } 849 850 ifp->if_mtu = ifr->ifr_mtu; 851 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 852 ETHER_CRC_LEN; 853 854 if (ifp->if_flags & IFF_RUNNING) 855 emx_init(sc); 856 break; 857 858 case SIOCSIFFLAGS: 859 if (ifp->if_flags & IFF_UP) { 860 if ((ifp->if_flags & IFF_RUNNING)) { 861 if ((ifp->if_flags ^ sc->if_flags) & 862 (IFF_PROMISC | IFF_ALLMULTI)) { 863 emx_disable_promisc(sc); 864 emx_set_promisc(sc); 865 } 866 } else { 867 emx_init(sc); 868 } 869 } else if (ifp->if_flags & IFF_RUNNING) { 870 emx_stop(sc); 871 } 872 sc->if_flags = ifp->if_flags; 873 break; 874 875 case SIOCADDMULTI: 876 case SIOCDELMULTI: 877 if (ifp->if_flags & IFF_RUNNING) { 878 emx_disable_intr(sc); 879 emx_set_multi(sc); 880 #ifdef DEVICE_POLLING 881 if (!(ifp->if_flags & IFF_POLLING)) 882 #endif 883 emx_enable_intr(sc); 884 } 885 break; 886 887 case SIOCSIFMEDIA: 888 /* Check SOL/IDER usage */ 889 if (e1000_check_reset_block(&sc->hw)) { 890 device_printf(sc->dev, "Media change is" 891 " blocked due to SOL/IDER session.\n"); 892 break; 893 } 894 /* FALL THROUGH */ 895 896 case SIOCGIFMEDIA: 897 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 898 break; 899 900 case SIOCSIFCAP: 901 reinit = 0; 902 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 903 if (mask & IFCAP_HWCSUM) { 904 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 905 reinit = 1; 906 } 907 if (mask & IFCAP_VLAN_HWTAGGING) { 908 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 909 reinit = 1; 910 } 911 if (mask & IFCAP_RSS) { 912 ifp->if_capenable ^= IFCAP_RSS; 913 reinit = 1; 914 } 915 if (reinit && (ifp->if_flags & IFF_RUNNING)) 916 emx_init(sc); 917 break; 918 919 default: 920 error = ether_ioctl(ifp, command, data); 921 break; 922 } 923 return (error); 924 } 925 926 static void 927 emx_watchdog(struct ifnet *ifp) 928 { 929 struct emx_softc *sc = ifp->if_softc; 930 931 ASSERT_SERIALIZED(ifp->if_serializer); 932 933 /* 934 * The timer is set to 5 every time start queues a packet. 935 * Then txeof keeps resetting it as long as it cleans at 936 * least one descriptor. 937 * Finally, anytime all descriptors are clean the timer is 938 * set to 0. 939 */ 940 941 if (E1000_READ_REG(&sc->hw, E1000_TDT(0)) == 942 E1000_READ_REG(&sc->hw, E1000_TDH(0))) { 943 /* 944 * If we reach here, all TX jobs are completed and 945 * the TX engine should have been idled for some time. 946 * We don't need to call if_devstart() here. 947 */ 948 ifp->if_flags &= ~IFF_OACTIVE; 949 ifp->if_timer = 0; 950 return; 951 } 952 953 /* 954 * If we are in this routine because of pause frames, then 955 * don't reset the hardware. 956 */ 957 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 958 ifp->if_timer = EMX_TX_TIMEOUT; 959 return; 960 } 961 962 if (e1000_check_for_link(&sc->hw) == 0) 963 if_printf(ifp, "watchdog timeout -- resetting\n"); 964 965 ifp->if_oerrors++; 966 sc->watchdog_events++; 967 968 emx_init(sc); 969 970 if (!ifq_is_empty(&ifp->if_snd)) 971 if_devstart(ifp); 972 } 973 974 static void 975 emx_init(void *xsc) 976 { 977 struct emx_softc *sc = xsc; 978 struct ifnet *ifp = &sc->arpcom.ac_if; 979 device_t dev = sc->dev; 980 uint32_t pba; 981 int i; 982 983 ASSERT_SERIALIZED(ifp->if_serializer); 984 985 emx_stop(sc); 986 987 /* 988 * Packet Buffer Allocation (PBA) 989 * Writing PBA sets the receive portion of the buffer 990 * the remainder is used for the transmit buffer. 991 */ 992 switch (sc->hw.mac.type) { 993 /* Total Packet Buffer on these is 48K */ 994 case e1000_82571: 995 case e1000_82572: 996 case e1000_80003es2lan: 997 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 998 break; 999 1000 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1001 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1002 break; 1003 1004 case e1000_82574: 1005 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1006 break; 1007 1008 default: 1009 /* Devices before 82547 had a Packet Buffer of 64K. */ 1010 if (sc->max_frame_size > 8192) 1011 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1012 else 1013 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1014 } 1015 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1016 1017 /* Get the latest mac address, User can use a LAA */ 1018 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1019 1020 /* Put the address into the Receive Address Array */ 1021 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1022 1023 /* 1024 * With the 82571 sc, RAR[0] may be overwritten 1025 * when the other port is reset, we make a duplicate 1026 * in RAR[14] for that eventuality, this assures 1027 * the interface continues to function. 1028 */ 1029 if (sc->hw.mac.type == e1000_82571) { 1030 e1000_set_laa_state_82571(&sc->hw, TRUE); 1031 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1032 E1000_RAR_ENTRIES - 1); 1033 } 1034 1035 /* Initialize the hardware */ 1036 if (emx_hw_init(sc)) { 1037 device_printf(dev, "Unable to initialize the hardware\n"); 1038 /* XXX emx_stop()? */ 1039 return; 1040 } 1041 emx_update_link_status(sc); 1042 1043 /* Setup VLAN support, basic and offload if available */ 1044 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1045 1046 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1047 uint32_t ctrl; 1048 1049 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1050 ctrl |= E1000_CTRL_VME; 1051 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1052 } 1053 1054 /* Set hardware offload abilities */ 1055 if (ifp->if_capenable & IFCAP_TXCSUM) 1056 ifp->if_hwassist = EMX_CSUM_FEATURES; 1057 else 1058 ifp->if_hwassist = 0; 1059 1060 /* Configure for OS presence */ 1061 emx_get_mgmt(sc); 1062 1063 /* Prepare transmit descriptors and buffers */ 1064 emx_init_tx_ring(sc); 1065 emx_init_tx_unit(sc); 1066 1067 /* Setup Multicast table */ 1068 emx_set_multi(sc); 1069 1070 /* 1071 * Adjust # of RX ring to be used based on IFCAP_RSS 1072 */ 1073 if (ifp->if_capenable & IFCAP_RSS) 1074 sc->rx_ring_inuse = sc->rx_ring_cnt; 1075 else 1076 sc->rx_ring_inuse = 1; 1077 1078 /* Prepare receive descriptors and buffers */ 1079 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1080 if (emx_init_rx_ring(sc, &sc->rx_data[i])) { 1081 device_printf(dev, 1082 "Could not setup receive structures\n"); 1083 emx_stop(sc); 1084 return; 1085 } 1086 } 1087 emx_init_rx_unit(sc); 1088 1089 /* Don't lose promiscuous settings */ 1090 emx_set_promisc(sc); 1091 1092 ifp->if_flags |= IFF_RUNNING; 1093 ifp->if_flags &= ~IFF_OACTIVE; 1094 1095 callout_reset(&sc->timer, hz, emx_timer, sc); 1096 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1097 1098 /* MSI/X configuration for 82574 */ 1099 if (sc->hw.mac.type == e1000_82574) { 1100 int tmp; 1101 1102 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1103 tmp |= E1000_CTRL_EXT_PBA_CLR; 1104 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1105 /* 1106 * Set the IVAR - interrupt vector routing. 1107 * Each nibble represents a vector, high bit 1108 * is enable, other 3 bits are the MSIX table 1109 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1110 * Link (other) to 2, hence the magic number. 1111 */ 1112 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1113 } 1114 1115 #ifdef DEVICE_POLLING 1116 /* 1117 * Only enable interrupts if we are not polling, make sure 1118 * they are off otherwise. 1119 */ 1120 if (ifp->if_flags & IFF_POLLING) 1121 emx_disable_intr(sc); 1122 else 1123 #endif /* DEVICE_POLLING */ 1124 emx_enable_intr(sc); 1125 1126 /* Don't reset the phy next time init gets called */ 1127 sc->hw.phy.reset_disable = TRUE; 1128 } 1129 1130 #ifdef DEVICE_POLLING 1131 1132 static void 1133 emx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1134 { 1135 struct emx_softc *sc = ifp->if_softc; 1136 uint32_t reg_icr; 1137 1138 ASSERT_SERIALIZED(ifp->if_serializer); 1139 1140 switch (cmd) { 1141 case POLL_REGISTER: 1142 emx_disable_intr(sc); 1143 break; 1144 1145 case POLL_DEREGISTER: 1146 emx_enable_intr(sc); 1147 break; 1148 1149 case POLL_AND_CHECK_STATUS: 1150 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1151 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1152 callout_stop(&sc->timer); 1153 sc->hw.mac.get_link_status = 1; 1154 emx_update_link_status(sc); 1155 callout_reset(&sc->timer, hz, emx_timer, sc); 1156 } 1157 /* FALL THROUGH */ 1158 case POLL_ONLY: 1159 if (ifp->if_flags & IFF_RUNNING) { 1160 int i; 1161 1162 for (i = 0; i < sc->rx_ring_inuse; ++i) 1163 emx_rxeof(sc, i, count); 1164 1165 emx_txeof(sc); 1166 if (!ifq_is_empty(&ifp->if_snd)) 1167 if_devstart(ifp); 1168 } 1169 break; 1170 } 1171 } 1172 1173 #endif /* DEVICE_POLLING */ 1174 1175 static void 1176 emx_intr(void *xsc) 1177 { 1178 struct emx_softc *sc = xsc; 1179 struct ifnet *ifp = &sc->arpcom.ac_if; 1180 uint32_t reg_icr; 1181 1182 logif(intr_beg); 1183 ASSERT_SERIALIZED(ifp->if_serializer); 1184 1185 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1186 1187 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1188 logif(intr_end); 1189 return; 1190 } 1191 1192 /* 1193 * XXX: some laptops trigger several spurious interrupts 1194 * on emx(4) when in the resume cycle. The ICR register 1195 * reports all-ones value in this case. Processing such 1196 * interrupts would lead to a freeze. I don't know why. 1197 */ 1198 if (reg_icr == 0xffffffff) { 1199 logif(intr_end); 1200 return; 1201 } 1202 1203 if (ifp->if_flags & IFF_RUNNING) { 1204 if (reg_icr & 1205 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1206 int i; 1207 1208 for (i = 0; i < sc->rx_ring_inuse; ++i) 1209 emx_rxeof(sc, i, -1); 1210 } 1211 if (reg_icr & E1000_ICR_TXDW) { 1212 emx_txeof(sc); 1213 if (!ifq_is_empty(&ifp->if_snd)) 1214 if_devstart(ifp); 1215 } 1216 } 1217 1218 /* Link status change */ 1219 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1220 callout_stop(&sc->timer); 1221 sc->hw.mac.get_link_status = 1; 1222 emx_update_link_status(sc); 1223 1224 /* Deal with TX cruft when link lost */ 1225 emx_tx_purge(sc); 1226 1227 callout_reset(&sc->timer, hz, emx_timer, sc); 1228 } 1229 1230 if (reg_icr & E1000_ICR_RXO) 1231 sc->rx_overruns++; 1232 1233 logif(intr_end); 1234 } 1235 1236 static void 1237 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1238 { 1239 struct emx_softc *sc = ifp->if_softc; 1240 1241 ASSERT_SERIALIZED(ifp->if_serializer); 1242 1243 emx_update_link_status(sc); 1244 1245 ifmr->ifm_status = IFM_AVALID; 1246 ifmr->ifm_active = IFM_ETHER; 1247 1248 if (!sc->link_active) 1249 return; 1250 1251 ifmr->ifm_status |= IFM_ACTIVE; 1252 1253 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1254 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1255 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1256 } else { 1257 switch (sc->link_speed) { 1258 case 10: 1259 ifmr->ifm_active |= IFM_10_T; 1260 break; 1261 case 100: 1262 ifmr->ifm_active |= IFM_100_TX; 1263 break; 1264 1265 case 1000: 1266 ifmr->ifm_active |= IFM_1000_T; 1267 break; 1268 } 1269 if (sc->link_duplex == FULL_DUPLEX) 1270 ifmr->ifm_active |= IFM_FDX; 1271 else 1272 ifmr->ifm_active |= IFM_HDX; 1273 } 1274 } 1275 1276 static int 1277 emx_media_change(struct ifnet *ifp) 1278 { 1279 struct emx_softc *sc = ifp->if_softc; 1280 struct ifmedia *ifm = &sc->media; 1281 1282 ASSERT_SERIALIZED(ifp->if_serializer); 1283 1284 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1285 return (EINVAL); 1286 1287 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1288 case IFM_AUTO: 1289 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1290 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1291 break; 1292 1293 case IFM_1000_LX: 1294 case IFM_1000_SX: 1295 case IFM_1000_T: 1296 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1297 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1298 break; 1299 1300 case IFM_100_TX: 1301 sc->hw.mac.autoneg = FALSE; 1302 sc->hw.phy.autoneg_advertised = 0; 1303 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1304 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1305 else 1306 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1307 break; 1308 1309 case IFM_10_T: 1310 sc->hw.mac.autoneg = FALSE; 1311 sc->hw.phy.autoneg_advertised = 0; 1312 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1313 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1314 else 1315 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1316 break; 1317 1318 default: 1319 if_printf(ifp, "Unsupported media type\n"); 1320 break; 1321 } 1322 1323 /* 1324 * As the speed/duplex settings my have changed we need to 1325 * reset the PHY. 1326 */ 1327 sc->hw.phy.reset_disable = FALSE; 1328 1329 emx_init(sc); 1330 1331 return (0); 1332 } 1333 1334 static int 1335 emx_encap(struct emx_softc *sc, struct mbuf **m_headp) 1336 { 1337 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1338 bus_dmamap_t map; 1339 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1340 struct e1000_tx_desc *ctxd = NULL; 1341 struct mbuf *m_head = *m_headp; 1342 uint32_t txd_upper, txd_lower, cmd = 0; 1343 int maxsegs, nsegs, i, j, first, last = 0, error; 1344 1345 if (m_head->m_len < EMX_TXCSUM_MINHL && 1346 (m_head->m_flags & EMX_CSUM_FEATURES)) { 1347 /* 1348 * Make sure that ethernet header and ip.ip_hl are in 1349 * contiguous memory, since if TXCSUM is enabled, later 1350 * TX context descriptor's setup need to access ip.ip_hl. 1351 */ 1352 error = emx_txcsum_pullup(sc, m_headp); 1353 if (error) { 1354 KKASSERT(*m_headp == NULL); 1355 return error; 1356 } 1357 m_head = *m_headp; 1358 } 1359 1360 txd_upper = txd_lower = 0; 1361 1362 /* 1363 * Capture the first descriptor index, this descriptor 1364 * will have the index of the EOP which is the only one 1365 * that now gets a DONE bit writeback. 1366 */ 1367 first = sc->next_avail_tx_desc; 1368 tx_buffer = &sc->tx_buf[first]; 1369 tx_buffer_mapped = tx_buffer; 1370 map = tx_buffer->map; 1371 1372 maxsegs = sc->num_tx_desc_avail - EMX_TX_RESERVED; 1373 KASSERT(maxsegs >= sc->spare_tx_desc, ("not enough spare TX desc\n")); 1374 if (maxsegs > EMX_MAX_SCATTER) 1375 maxsegs = EMX_MAX_SCATTER; 1376 1377 error = bus_dmamap_load_mbuf_defrag(sc->txtag, map, m_headp, 1378 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1379 if (error) { 1380 if (error == ENOBUFS) 1381 sc->mbuf_alloc_failed++; 1382 else 1383 sc->no_tx_dma_setup++; 1384 1385 m_freem(*m_headp); 1386 *m_headp = NULL; 1387 return error; 1388 } 1389 bus_dmamap_sync(sc->txtag, map, BUS_DMASYNC_PREWRITE); 1390 1391 m_head = *m_headp; 1392 sc->tx_nsegs += nsegs; 1393 1394 if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1395 /* TX csum offloading will consume one TX desc */ 1396 sc->tx_nsegs += emx_txcsum(sc, m_head, &txd_upper, &txd_lower); 1397 } 1398 i = sc->next_avail_tx_desc; 1399 1400 /* Set up our transmit descriptors */ 1401 for (j = 0; j < nsegs; j++) { 1402 tx_buffer = &sc->tx_buf[i]; 1403 ctxd = &sc->tx_desc_base[i]; 1404 1405 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1406 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1407 txd_lower | segs[j].ds_len); 1408 ctxd->upper.data = htole32(txd_upper); 1409 1410 last = i; 1411 if (++i == sc->num_tx_desc) 1412 i = 0; 1413 } 1414 1415 sc->next_avail_tx_desc = i; 1416 1417 KKASSERT(sc->num_tx_desc_avail > nsegs); 1418 sc->num_tx_desc_avail -= nsegs; 1419 1420 /* Handle VLAN tag */ 1421 if (m_head->m_flags & M_VLANTAG) { 1422 /* Set the vlan id. */ 1423 ctxd->upper.fields.special = 1424 htole16(m_head->m_pkthdr.ether_vlantag); 1425 1426 /* Tell hardware to add tag */ 1427 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1428 } 1429 1430 tx_buffer->m_head = m_head; 1431 tx_buffer_mapped->map = tx_buffer->map; 1432 tx_buffer->map = map; 1433 1434 if (sc->tx_nsegs >= sc->tx_int_nsegs) { 1435 sc->tx_nsegs = 0; 1436 1437 /* 1438 * Report Status (RS) is turned on 1439 * every tx_int_nsegs descriptors. 1440 */ 1441 cmd = E1000_TXD_CMD_RS; 1442 1443 /* 1444 * Keep track of the descriptor, which will 1445 * be written back by hardware. 1446 */ 1447 sc->tx_dd[sc->tx_dd_tail] = last; 1448 EMX_INC_TXDD_IDX(sc->tx_dd_tail); 1449 KKASSERT(sc->tx_dd_tail != sc->tx_dd_head); 1450 } 1451 1452 /* 1453 * Last Descriptor of Packet needs End Of Packet (EOP) 1454 */ 1455 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1456 1457 /* 1458 * Advance the Transmit Descriptor Tail (TDT), this tells 1459 * the E1000 that this frame is available to transmit. 1460 */ 1461 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), i); 1462 1463 return (0); 1464 } 1465 1466 static void 1467 emx_set_promisc(struct emx_softc *sc) 1468 { 1469 struct ifnet *ifp = &sc->arpcom.ac_if; 1470 uint32_t reg_rctl; 1471 1472 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1473 1474 if (ifp->if_flags & IFF_PROMISC) { 1475 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1476 /* Turn this on if you want to see bad packets */ 1477 if (emx_debug_sbp) 1478 reg_rctl |= E1000_RCTL_SBP; 1479 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1480 } else if (ifp->if_flags & IFF_ALLMULTI) { 1481 reg_rctl |= E1000_RCTL_MPE; 1482 reg_rctl &= ~E1000_RCTL_UPE; 1483 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1484 } 1485 } 1486 1487 static void 1488 emx_disable_promisc(struct emx_softc *sc) 1489 { 1490 uint32_t reg_rctl; 1491 1492 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1493 1494 reg_rctl &= ~E1000_RCTL_UPE; 1495 reg_rctl &= ~E1000_RCTL_MPE; 1496 reg_rctl &= ~E1000_RCTL_SBP; 1497 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1498 } 1499 1500 static void 1501 emx_set_multi(struct emx_softc *sc) 1502 { 1503 struct ifnet *ifp = &sc->arpcom.ac_if; 1504 struct ifmultiaddr *ifma; 1505 uint32_t reg_rctl = 0; 1506 uint8_t mta[512]; /* Largest MTS is 4096 bits */ 1507 int mcnt = 0; 1508 1509 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1510 if (ifma->ifma_addr->sa_family != AF_LINK) 1511 continue; 1512 1513 if (mcnt == EMX_MCAST_ADDR_MAX) 1514 break; 1515 1516 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1517 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1518 mcnt++; 1519 } 1520 1521 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1522 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1523 reg_rctl |= E1000_RCTL_MPE; 1524 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1525 } else { 1526 e1000_update_mc_addr_list(&sc->hw, mta, 1527 mcnt, 1, sc->hw.mac.rar_entry_count); 1528 } 1529 } 1530 1531 /* 1532 * This routine checks for link status and updates statistics. 1533 */ 1534 static void 1535 emx_timer(void *xsc) 1536 { 1537 struct emx_softc *sc = xsc; 1538 struct ifnet *ifp = &sc->arpcom.ac_if; 1539 1540 lwkt_serialize_enter(ifp->if_serializer); 1541 1542 emx_update_link_status(sc); 1543 emx_update_stats(sc); 1544 1545 /* Reset LAA into RAR[0] on 82571 */ 1546 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1547 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1548 1549 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1550 emx_print_hw_stats(sc); 1551 1552 emx_smartspeed(sc); 1553 1554 callout_reset(&sc->timer, hz, emx_timer, sc); 1555 1556 lwkt_serialize_exit(ifp->if_serializer); 1557 } 1558 1559 static void 1560 emx_update_link_status(struct emx_softc *sc) 1561 { 1562 struct e1000_hw *hw = &sc->hw; 1563 struct ifnet *ifp = &sc->arpcom.ac_if; 1564 device_t dev = sc->dev; 1565 uint32_t link_check = 0; 1566 1567 /* Get the cached link value or read phy for real */ 1568 switch (hw->phy.media_type) { 1569 case e1000_media_type_copper: 1570 if (hw->mac.get_link_status) { 1571 /* Do the work to read phy */ 1572 e1000_check_for_link(hw); 1573 link_check = !hw->mac.get_link_status; 1574 if (link_check) /* ESB2 fix */ 1575 e1000_cfg_on_link_up(hw); 1576 } else { 1577 link_check = TRUE; 1578 } 1579 break; 1580 1581 case e1000_media_type_fiber: 1582 e1000_check_for_link(hw); 1583 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1584 break; 1585 1586 case e1000_media_type_internal_serdes: 1587 e1000_check_for_link(hw); 1588 link_check = sc->hw.mac.serdes_has_link; 1589 break; 1590 1591 case e1000_media_type_unknown: 1592 default: 1593 break; 1594 } 1595 1596 /* Now check for a transition */ 1597 if (link_check && sc->link_active == 0) { 1598 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1599 &sc->link_duplex); 1600 1601 /* 1602 * Check if we should enable/disable SPEED_MODE bit on 1603 * 82571EB/82572EI 1604 */ 1605 if (hw->mac.type == e1000_82571 || 1606 hw->mac.type == e1000_82572) { 1607 int tarc0; 1608 1609 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1610 if (sc->link_speed != SPEED_1000) 1611 tarc0 &= ~EMX_TARC_SPEED_MODE; 1612 else 1613 tarc0 |= EMX_TARC_SPEED_MODE; 1614 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1615 } 1616 if (bootverbose) { 1617 device_printf(dev, "Link is up %d Mbps %s\n", 1618 sc->link_speed, 1619 ((sc->link_duplex == FULL_DUPLEX) ? 1620 "Full Duplex" : "Half Duplex")); 1621 } 1622 sc->link_active = 1; 1623 sc->smartspeed = 0; 1624 ifp->if_baudrate = sc->link_speed * 1000000; 1625 ifp->if_link_state = LINK_STATE_UP; 1626 if_link_state_change(ifp); 1627 } else if (!link_check && sc->link_active == 1) { 1628 ifp->if_baudrate = sc->link_speed = 0; 1629 sc->link_duplex = 0; 1630 if (bootverbose) 1631 device_printf(dev, "Link is Down\n"); 1632 sc->link_active = 0; 1633 #if 0 1634 /* Link down, disable watchdog */ 1635 if->if_timer = 0; 1636 #endif 1637 ifp->if_link_state = LINK_STATE_DOWN; 1638 if_link_state_change(ifp); 1639 } 1640 } 1641 1642 static void 1643 emx_stop(struct emx_softc *sc) 1644 { 1645 struct ifnet *ifp = &sc->arpcom.ac_if; 1646 int i; 1647 1648 ASSERT_SERIALIZED(ifp->if_serializer); 1649 1650 emx_disable_intr(sc); 1651 1652 callout_stop(&sc->timer); 1653 1654 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1655 ifp->if_timer = 0; 1656 1657 /* 1658 * Disable multiple receive queues. 1659 * 1660 * NOTE: 1661 * We should disable multiple receive queues before 1662 * resetting the hardware. 1663 */ 1664 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1665 1666 e1000_reset_hw(&sc->hw); 1667 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1668 1669 for (i = 0; i < sc->num_tx_desc; i++) { 1670 struct emx_txbuf *tx_buffer = &sc->tx_buf[i]; 1671 1672 if (tx_buffer->m_head != NULL) { 1673 bus_dmamap_unload(sc->txtag, tx_buffer->map); 1674 m_freem(tx_buffer->m_head); 1675 tx_buffer->m_head = NULL; 1676 } 1677 } 1678 1679 for (i = 0; i < sc->rx_ring_inuse; ++i) 1680 emx_free_rx_ring(sc, &sc->rx_data[i]); 1681 1682 sc->csum_flags = 0; 1683 sc->csum_ehlen = 0; 1684 sc->csum_iphlen = 0; 1685 1686 sc->tx_dd_head = 0; 1687 sc->tx_dd_tail = 0; 1688 sc->tx_nsegs = 0; 1689 } 1690 1691 static int 1692 emx_hw_init(struct emx_softc *sc) 1693 { 1694 device_t dev = sc->dev; 1695 uint16_t rx_buffer_size; 1696 1697 /* Issue a global reset */ 1698 e1000_reset_hw(&sc->hw); 1699 1700 /* Get control from any management/hw control */ 1701 if (sc->hw.mac.type == e1000_82573 && 1702 e1000_check_mng_mode(&sc->hw)) 1703 emx_get_hw_control(sc); 1704 1705 /* Set up smart power down as default off on newer adapters. */ 1706 if (!emx_smart_pwr_down && 1707 (sc->hw.mac.type == e1000_82571 || 1708 sc->hw.mac.type == e1000_82572)) { 1709 uint16_t phy_tmp = 0; 1710 1711 /* Speed up time to link by disabling smart power down. */ 1712 e1000_read_phy_reg(&sc->hw, 1713 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1714 phy_tmp &= ~IGP02E1000_PM_SPD; 1715 e1000_write_phy_reg(&sc->hw, 1716 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1717 } 1718 1719 /* 1720 * These parameters control the automatic generation (Tx) and 1721 * response (Rx) to Ethernet PAUSE frames. 1722 * - High water mark should allow for at least two frames to be 1723 * received after sending an XOFF. 1724 * - Low water mark works best when it is very near the high water mark. 1725 * This allows the receiver to restart by sending XON when it has 1726 * drained a bit. Here we use an arbitary value of 1500 which will 1727 * restart after one full frame is pulled from the buffer. There 1728 * could be several smaller frames in the buffer and if so they will 1729 * not trigger the XON until their total number reduces the buffer 1730 * by 1500. 1731 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1732 */ 1733 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1734 1735 sc->hw.fc.high_water = rx_buffer_size - 1736 roundup2(sc->max_frame_size, 1024); 1737 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1738 1739 if (sc->hw.mac.type == e1000_80003es2lan) 1740 sc->hw.fc.pause_time = 0xFFFF; 1741 else 1742 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1743 sc->hw.fc.send_xon = TRUE; 1744 sc->hw.fc.requested_mode = e1000_fc_full; 1745 1746 if (e1000_init_hw(&sc->hw) < 0) { 1747 device_printf(dev, "Hardware Initialization Failed\n"); 1748 return (EIO); 1749 } 1750 1751 e1000_check_for_link(&sc->hw); 1752 1753 return (0); 1754 } 1755 1756 static void 1757 emx_setup_ifp(struct emx_softc *sc) 1758 { 1759 struct ifnet *ifp = &sc->arpcom.ac_if; 1760 1761 if_initname(ifp, device_get_name(sc->dev), 1762 device_get_unit(sc->dev)); 1763 ifp->if_softc = sc; 1764 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1765 ifp->if_init = emx_init; 1766 ifp->if_ioctl = emx_ioctl; 1767 ifp->if_start = emx_start; 1768 #ifdef DEVICE_POLLING 1769 ifp->if_poll = emx_poll; 1770 #endif 1771 ifp->if_watchdog = emx_watchdog; 1772 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 1773 ifq_set_ready(&ifp->if_snd); 1774 1775 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1776 1777 ifp->if_capabilities = IFCAP_HWCSUM | 1778 IFCAP_VLAN_HWTAGGING | 1779 IFCAP_VLAN_MTU; 1780 if (sc->rx_ring_cnt > 1) 1781 ifp->if_capabilities |= IFCAP_RSS; 1782 ifp->if_capenable = ifp->if_capabilities; 1783 ifp->if_hwassist = EMX_CSUM_FEATURES; 1784 1785 /* 1786 * Tell the upper layer(s) we support long frames. 1787 */ 1788 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1789 1790 /* 1791 * Specify the media types supported by this sc and register 1792 * callbacks to update media and link information 1793 */ 1794 ifmedia_init(&sc->media, IFM_IMASK, 1795 emx_media_change, emx_media_status); 1796 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1797 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1798 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1799 0, NULL); 1800 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1801 } else { 1802 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1803 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1804 0, NULL); 1805 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1806 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1807 0, NULL); 1808 if (sc->hw.phy.type != e1000_phy_ife) { 1809 ifmedia_add(&sc->media, 1810 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1811 ifmedia_add(&sc->media, 1812 IFM_ETHER | IFM_1000_T, 0, NULL); 1813 } 1814 } 1815 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1816 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1817 } 1818 1819 /* 1820 * Workaround for SmartSpeed on 82541 and 82547 controllers 1821 */ 1822 static void 1823 emx_smartspeed(struct emx_softc *sc) 1824 { 1825 uint16_t phy_tmp; 1826 1827 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 1828 sc->hw.mac.autoneg == 0 || 1829 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 1830 return; 1831 1832 if (sc->smartspeed == 0) { 1833 /* 1834 * If Master/Slave config fault is asserted twice, 1835 * we assume back-to-back 1836 */ 1837 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1838 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 1839 return; 1840 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1841 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 1842 e1000_read_phy_reg(&sc->hw, 1843 PHY_1000T_CTRL, &phy_tmp); 1844 if (phy_tmp & CR_1000T_MS_ENABLE) { 1845 phy_tmp &= ~CR_1000T_MS_ENABLE; 1846 e1000_write_phy_reg(&sc->hw, 1847 PHY_1000T_CTRL, phy_tmp); 1848 sc->smartspeed++; 1849 if (sc->hw.mac.autoneg && 1850 !e1000_phy_setup_autoneg(&sc->hw) && 1851 !e1000_read_phy_reg(&sc->hw, 1852 PHY_CONTROL, &phy_tmp)) { 1853 phy_tmp |= MII_CR_AUTO_NEG_EN | 1854 MII_CR_RESTART_AUTO_NEG; 1855 e1000_write_phy_reg(&sc->hw, 1856 PHY_CONTROL, phy_tmp); 1857 } 1858 } 1859 } 1860 return; 1861 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 1862 /* If still no link, perhaps using 2/3 pair cable */ 1863 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 1864 phy_tmp |= CR_1000T_MS_ENABLE; 1865 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 1866 if (sc->hw.mac.autoneg && 1867 !e1000_phy_setup_autoneg(&sc->hw) && 1868 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 1869 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 1870 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 1871 } 1872 } 1873 1874 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 1875 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 1876 sc->smartspeed = 0; 1877 } 1878 1879 static int 1880 emx_create_tx_ring(struct emx_softc *sc) 1881 { 1882 device_t dev = sc->dev; 1883 struct emx_txbuf *tx_buffer; 1884 int error, i, tsize; 1885 1886 /* 1887 * Validate number of transmit descriptors. It must not exceed 1888 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 1889 */ 1890 if ((emx_txd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 1891 emx_txd > EMX_MAX_TXD || emx_txd < EMX_MIN_TXD) { 1892 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 1893 EMX_DEFAULT_TXD, emx_txd); 1894 sc->num_tx_desc = EMX_DEFAULT_TXD; 1895 } else { 1896 sc->num_tx_desc = emx_txd; 1897 } 1898 1899 /* 1900 * Allocate Transmit Descriptor ring 1901 */ 1902 tsize = roundup2(sc->num_tx_desc * sizeof(struct e1000_tx_desc), 1903 EMX_DBA_ALIGN); 1904 sc->tx_desc_base = bus_dmamem_coherent_any(sc->parent_dtag, 1905 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1906 &sc->tx_desc_dtag, &sc->tx_desc_dmap, 1907 &sc->tx_desc_paddr); 1908 if (sc->tx_desc_base == NULL) { 1909 device_printf(dev, "Unable to allocate tx_desc memory\n"); 1910 return ENOMEM; 1911 } 1912 1913 sc->tx_buf = kmalloc(sizeof(struct emx_txbuf) * sc->num_tx_desc, 1914 M_DEVBUF, M_WAITOK | M_ZERO); 1915 1916 /* 1917 * Create DMA tags for tx buffers 1918 */ 1919 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 1920 1, 0, /* alignment, bounds */ 1921 BUS_SPACE_MAXADDR, /* lowaddr */ 1922 BUS_SPACE_MAXADDR, /* highaddr */ 1923 NULL, NULL, /* filter, filterarg */ 1924 EMX_TSO_SIZE, /* maxsize */ 1925 EMX_MAX_SCATTER, /* nsegments */ 1926 EMX_MAX_SEGSIZE, /* maxsegsize */ 1927 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1928 BUS_DMA_ONEBPAGE, /* flags */ 1929 &sc->txtag); 1930 if (error) { 1931 device_printf(dev, "Unable to allocate TX DMA tag\n"); 1932 kfree(sc->tx_buf, M_DEVBUF); 1933 sc->tx_buf = NULL; 1934 return error; 1935 } 1936 1937 /* 1938 * Create DMA maps for tx buffers 1939 */ 1940 for (i = 0; i < sc->num_tx_desc; i++) { 1941 tx_buffer = &sc->tx_buf[i]; 1942 1943 error = bus_dmamap_create(sc->txtag, 1944 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1945 &tx_buffer->map); 1946 if (error) { 1947 device_printf(dev, "Unable to create TX DMA map\n"); 1948 emx_destroy_tx_ring(sc, i); 1949 return error; 1950 } 1951 } 1952 return (0); 1953 } 1954 1955 static void 1956 emx_init_tx_ring(struct emx_softc *sc) 1957 { 1958 /* Clear the old ring contents */ 1959 bzero(sc->tx_desc_base, 1960 sizeof(struct e1000_tx_desc) * sc->num_tx_desc); 1961 1962 /* Reset state */ 1963 sc->next_avail_tx_desc = 0; 1964 sc->next_tx_to_clean = 0; 1965 sc->num_tx_desc_avail = sc->num_tx_desc; 1966 } 1967 1968 static void 1969 emx_init_tx_unit(struct emx_softc *sc) 1970 { 1971 uint32_t tctl, tarc, tipg = 0; 1972 uint64_t bus_addr; 1973 1974 /* Setup the Base and Length of the Tx Descriptor Ring */ 1975 bus_addr = sc->tx_desc_paddr; 1976 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(0), 1977 sc->num_tx_desc * sizeof(struct e1000_tx_desc)); 1978 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(0), 1979 (uint32_t)(bus_addr >> 32)); 1980 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(0), 1981 (uint32_t)bus_addr); 1982 /* Setup the HW Tx Head and Tail descriptor pointers */ 1983 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), 0); 1984 E1000_WRITE_REG(&sc->hw, E1000_TDH(0), 0); 1985 1986 /* Set the default values for the Tx Inter Packet Gap timer */ 1987 switch (sc->hw.mac.type) { 1988 case e1000_80003es2lan: 1989 tipg = DEFAULT_82543_TIPG_IPGR1; 1990 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 1991 E1000_TIPG_IPGR2_SHIFT; 1992 break; 1993 1994 default: 1995 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1996 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1997 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1998 else 1999 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2000 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2001 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2002 break; 2003 } 2004 2005 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2006 2007 /* NOTE: 0 is not allowed for TIDV */ 2008 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2009 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2010 2011 if (sc->hw.mac.type == e1000_82571 || 2012 sc->hw.mac.type == e1000_82572) { 2013 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2014 tarc |= EMX_TARC_SPEED_MODE; 2015 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2016 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2017 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2018 tarc |= 1; 2019 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2020 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2021 tarc |= 1; 2022 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2023 } 2024 2025 /* Program the Transmit Control Register */ 2026 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2027 tctl &= ~E1000_TCTL_CT; 2028 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2029 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2030 tctl |= E1000_TCTL_MULR; 2031 2032 /* This write will effectively turn on the transmit unit. */ 2033 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2034 } 2035 2036 static void 2037 emx_destroy_tx_ring(struct emx_softc *sc, int ndesc) 2038 { 2039 struct emx_txbuf *tx_buffer; 2040 int i; 2041 2042 /* Free Transmit Descriptor ring */ 2043 if (sc->tx_desc_base) { 2044 bus_dmamap_unload(sc->tx_desc_dtag, sc->tx_desc_dmap); 2045 bus_dmamem_free(sc->tx_desc_dtag, sc->tx_desc_base, 2046 sc->tx_desc_dmap); 2047 bus_dma_tag_destroy(sc->tx_desc_dtag); 2048 2049 sc->tx_desc_base = NULL; 2050 } 2051 2052 if (sc->tx_buf == NULL) 2053 return; 2054 2055 for (i = 0; i < ndesc; i++) { 2056 tx_buffer = &sc->tx_buf[i]; 2057 2058 KKASSERT(tx_buffer->m_head == NULL); 2059 bus_dmamap_destroy(sc->txtag, tx_buffer->map); 2060 } 2061 bus_dma_tag_destroy(sc->txtag); 2062 2063 kfree(sc->tx_buf, M_DEVBUF); 2064 sc->tx_buf = NULL; 2065 } 2066 2067 /* 2068 * The offload context needs to be set when we transfer the first 2069 * packet of a particular protocol (TCP/UDP). This routine has been 2070 * enhanced to deal with inserted VLAN headers. 2071 * 2072 * If the new packet's ether header length, ip header length and 2073 * csum offloading type are same as the previous packet, we should 2074 * avoid allocating a new csum context descriptor; mainly to take 2075 * advantage of the pipeline effect of the TX data read request. 2076 * 2077 * This function returns number of TX descrptors allocated for 2078 * csum context. 2079 */ 2080 static int 2081 emx_txcsum(struct emx_softc *sc, struct mbuf *mp, 2082 uint32_t *txd_upper, uint32_t *txd_lower) 2083 { 2084 struct e1000_context_desc *TXD; 2085 struct emx_txbuf *tx_buffer; 2086 struct ether_vlan_header *eh; 2087 struct ip *ip; 2088 int curr_txd, ehdrlen, csum_flags; 2089 uint32_t cmd, hdr_len, ip_hlen; 2090 uint16_t etype; 2091 2092 /* 2093 * Determine where frame payload starts. 2094 * Jump over vlan headers if already present, 2095 * helpful for QinQ too. 2096 */ 2097 KASSERT(mp->m_len >= ETHER_HDR_LEN, 2098 ("emx_txcsum_pullup is not called (eh)?\n")); 2099 eh = mtod(mp, struct ether_vlan_header *); 2100 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2101 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 2102 ("emx_txcsum_pullup is not called (evh)?\n")); 2103 etype = ntohs(eh->evl_proto); 2104 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 2105 } else { 2106 etype = ntohs(eh->evl_encap_proto); 2107 ehdrlen = ETHER_HDR_LEN; 2108 } 2109 2110 /* 2111 * We only support TCP/UDP for IPv4 for the moment. 2112 * TODO: Support SCTP too when it hits the tree. 2113 */ 2114 if (etype != ETHERTYPE_IP) 2115 return 0; 2116 2117 KASSERT(mp->m_len >= ehdrlen + EMX_IPVHL_SIZE, 2118 ("emx_txcsum_pullup is not called (eh+ip_vhl)?\n")); 2119 2120 /* NOTE: We could only safely access ip.ip_vhl part */ 2121 ip = (struct ip *)(mp->m_data + ehdrlen); 2122 ip_hlen = ip->ip_hl << 2; 2123 2124 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2125 2126 if (sc->csum_ehlen == ehdrlen && sc->csum_iphlen == ip_hlen && 2127 sc->csum_flags == csum_flags) { 2128 /* 2129 * Same csum offload context as the previous packets; 2130 * just return. 2131 */ 2132 *txd_upper = sc->csum_txd_upper; 2133 *txd_lower = sc->csum_txd_lower; 2134 return 0; 2135 } 2136 2137 /* 2138 * Setup a new csum offload context. 2139 */ 2140 2141 curr_txd = sc->next_avail_tx_desc; 2142 tx_buffer = &sc->tx_buf[curr_txd]; 2143 TXD = (struct e1000_context_desc *)&sc->tx_desc_base[curr_txd]; 2144 2145 cmd = 0; 2146 2147 /* Setup of IP header checksum. */ 2148 if (csum_flags & CSUM_IP) { 2149 /* 2150 * Start offset for header checksum calculation. 2151 * End offset for header checksum calculation. 2152 * Offset of place to put the checksum. 2153 */ 2154 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2155 TXD->lower_setup.ip_fields.ipcse = 2156 htole16(ehdrlen + ip_hlen - 1); 2157 TXD->lower_setup.ip_fields.ipcso = 2158 ehdrlen + offsetof(struct ip, ip_sum); 2159 cmd |= E1000_TXD_CMD_IP; 2160 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2161 } 2162 hdr_len = ehdrlen + ip_hlen; 2163 2164 if (csum_flags & CSUM_TCP) { 2165 /* 2166 * Start offset for payload checksum calculation. 2167 * End offset for payload checksum calculation. 2168 * Offset of place to put the checksum. 2169 */ 2170 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2171 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2172 TXD->upper_setup.tcp_fields.tucso = 2173 hdr_len + offsetof(struct tcphdr, th_sum); 2174 cmd |= E1000_TXD_CMD_TCP; 2175 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2176 } else if (csum_flags & CSUM_UDP) { 2177 /* 2178 * Start offset for header checksum calculation. 2179 * End offset for header checksum calculation. 2180 * Offset of place to put the checksum. 2181 */ 2182 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2183 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2184 TXD->upper_setup.tcp_fields.tucso = 2185 hdr_len + offsetof(struct udphdr, uh_sum); 2186 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2187 } 2188 2189 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2190 E1000_TXD_DTYP_D; /* Data descr */ 2191 2192 /* Save the information for this csum offloading context */ 2193 sc->csum_ehlen = ehdrlen; 2194 sc->csum_iphlen = ip_hlen; 2195 sc->csum_flags = csum_flags; 2196 sc->csum_txd_upper = *txd_upper; 2197 sc->csum_txd_lower = *txd_lower; 2198 2199 TXD->tcp_seg_setup.data = htole32(0); 2200 TXD->cmd_and_length = 2201 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2202 2203 if (++curr_txd == sc->num_tx_desc) 2204 curr_txd = 0; 2205 2206 KKASSERT(sc->num_tx_desc_avail > 0); 2207 sc->num_tx_desc_avail--; 2208 2209 sc->next_avail_tx_desc = curr_txd; 2210 return 1; 2211 } 2212 2213 static int 2214 emx_txcsum_pullup(struct emx_softc *sc, struct mbuf **m0) 2215 { 2216 struct mbuf *m = *m0; 2217 struct ether_header *eh; 2218 int len; 2219 2220 sc->tx_csum_try_pullup++; 2221 2222 len = ETHER_HDR_LEN + EMX_IPVHL_SIZE; 2223 2224 if (__predict_false(!M_WRITABLE(m))) { 2225 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2226 sc->tx_csum_drop1++; 2227 m_freem(m); 2228 *m0 = NULL; 2229 return ENOBUFS; 2230 } 2231 eh = mtod(m, struct ether_header *); 2232 2233 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2234 len += EVL_ENCAPLEN; 2235 2236 if (m->m_len < len) { 2237 sc->tx_csum_drop2++; 2238 m_freem(m); 2239 *m0 = NULL; 2240 return ENOBUFS; 2241 } 2242 return 0; 2243 } 2244 2245 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2246 sc->tx_csum_pullup1++; 2247 m = m_pullup(m, ETHER_HDR_LEN); 2248 if (m == NULL) { 2249 sc->tx_csum_pullup1_failed++; 2250 *m0 = NULL; 2251 return ENOBUFS; 2252 } 2253 *m0 = m; 2254 } 2255 eh = mtod(m, struct ether_header *); 2256 2257 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2258 len += EVL_ENCAPLEN; 2259 2260 if (m->m_len < len) { 2261 sc->tx_csum_pullup2++; 2262 m = m_pullup(m, len); 2263 if (m == NULL) { 2264 sc->tx_csum_pullup2_failed++; 2265 *m0 = NULL; 2266 return ENOBUFS; 2267 } 2268 *m0 = m; 2269 } 2270 return 0; 2271 } 2272 2273 static void 2274 emx_txeof(struct emx_softc *sc) 2275 { 2276 struct ifnet *ifp = &sc->arpcom.ac_if; 2277 struct emx_txbuf *tx_buffer; 2278 int first, num_avail; 2279 2280 if (sc->tx_dd_head == sc->tx_dd_tail) 2281 return; 2282 2283 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2284 return; 2285 2286 num_avail = sc->num_tx_desc_avail; 2287 first = sc->next_tx_to_clean; 2288 2289 while (sc->tx_dd_head != sc->tx_dd_tail) { 2290 int dd_idx = sc->tx_dd[sc->tx_dd_head]; 2291 struct e1000_tx_desc *tx_desc; 2292 2293 tx_desc = &sc->tx_desc_base[dd_idx]; 2294 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2295 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2296 2297 if (++dd_idx == sc->num_tx_desc) 2298 dd_idx = 0; 2299 2300 while (first != dd_idx) { 2301 logif(pkt_txclean); 2302 2303 num_avail++; 2304 2305 tx_buffer = &sc->tx_buf[first]; 2306 if (tx_buffer->m_head) { 2307 ifp->if_opackets++; 2308 bus_dmamap_unload(sc->txtag, 2309 tx_buffer->map); 2310 m_freem(tx_buffer->m_head); 2311 tx_buffer->m_head = NULL; 2312 } 2313 2314 if (++first == sc->num_tx_desc) 2315 first = 0; 2316 } 2317 } else { 2318 break; 2319 } 2320 } 2321 sc->next_tx_to_clean = first; 2322 sc->num_tx_desc_avail = num_avail; 2323 2324 if (sc->tx_dd_head == sc->tx_dd_tail) { 2325 sc->tx_dd_head = 0; 2326 sc->tx_dd_tail = 0; 2327 } 2328 2329 if (!EMX_IS_OACTIVE(sc)) { 2330 ifp->if_flags &= ~IFF_OACTIVE; 2331 2332 /* All clean, turn off the timer */ 2333 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2334 ifp->if_timer = 0; 2335 } 2336 } 2337 2338 static void 2339 emx_tx_collect(struct emx_softc *sc) 2340 { 2341 struct ifnet *ifp = &sc->arpcom.ac_if; 2342 struct emx_txbuf *tx_buffer; 2343 int tdh, first, num_avail, dd_idx = -1; 2344 2345 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2346 return; 2347 2348 tdh = E1000_READ_REG(&sc->hw, E1000_TDH(0)); 2349 if (tdh == sc->next_tx_to_clean) 2350 return; 2351 2352 if (sc->tx_dd_head != sc->tx_dd_tail) 2353 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2354 2355 num_avail = sc->num_tx_desc_avail; 2356 first = sc->next_tx_to_clean; 2357 2358 while (first != tdh) { 2359 logif(pkt_txclean); 2360 2361 num_avail++; 2362 2363 tx_buffer = &sc->tx_buf[first]; 2364 if (tx_buffer->m_head) { 2365 ifp->if_opackets++; 2366 bus_dmamap_unload(sc->txtag, 2367 tx_buffer->map); 2368 m_freem(tx_buffer->m_head); 2369 tx_buffer->m_head = NULL; 2370 } 2371 2372 if (first == dd_idx) { 2373 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2374 if (sc->tx_dd_head == sc->tx_dd_tail) { 2375 sc->tx_dd_head = 0; 2376 sc->tx_dd_tail = 0; 2377 dd_idx = -1; 2378 } else { 2379 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2380 } 2381 } 2382 2383 if (++first == sc->num_tx_desc) 2384 first = 0; 2385 } 2386 sc->next_tx_to_clean = first; 2387 sc->num_tx_desc_avail = num_avail; 2388 2389 if (!EMX_IS_OACTIVE(sc)) { 2390 ifp->if_flags &= ~IFF_OACTIVE; 2391 2392 /* All clean, turn off the timer */ 2393 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2394 ifp->if_timer = 0; 2395 } 2396 } 2397 2398 /* 2399 * When Link is lost sometimes there is work still in the TX ring 2400 * which will result in a watchdog, rather than allow that do an 2401 * attempted cleanup and then reinit here. Note that this has been 2402 * seens mostly with fiber adapters. 2403 */ 2404 static void 2405 emx_tx_purge(struct emx_softc *sc) 2406 { 2407 struct ifnet *ifp = &sc->arpcom.ac_if; 2408 2409 if (!sc->link_active && ifp->if_timer) { 2410 emx_tx_collect(sc); 2411 if (ifp->if_timer) { 2412 if_printf(ifp, "Link lost, TX pending, reinit\n"); 2413 ifp->if_timer = 0; 2414 emx_init(sc); 2415 } 2416 } 2417 } 2418 2419 static int 2420 emx_newbuf(struct emx_softc *sc, struct emx_rxdata *rdata, int i, int init) 2421 { 2422 struct mbuf *m; 2423 bus_dma_segment_t seg; 2424 bus_dmamap_t map; 2425 struct emx_rxbuf *rx_buffer; 2426 int error, nseg; 2427 2428 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2429 if (m == NULL) { 2430 rdata->mbuf_cluster_failed++; 2431 if (init) { 2432 if_printf(&sc->arpcom.ac_if, 2433 "Unable to allocate RX mbuf\n"); 2434 } 2435 return (ENOBUFS); 2436 } 2437 m->m_len = m->m_pkthdr.len = MCLBYTES; 2438 2439 if (sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2440 m_adj(m, ETHER_ALIGN); 2441 2442 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2443 rdata->rx_sparemap, m, 2444 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2445 if (error) { 2446 m_freem(m); 2447 if (init) { 2448 if_printf(&sc->arpcom.ac_if, 2449 "Unable to load RX mbuf\n"); 2450 } 2451 return (error); 2452 } 2453 2454 rx_buffer = &rdata->rx_buf[i]; 2455 if (rx_buffer->m_head != NULL) 2456 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2457 2458 map = rx_buffer->map; 2459 rx_buffer->map = rdata->rx_sparemap; 2460 rdata->rx_sparemap = map; 2461 2462 rx_buffer->m_head = m; 2463 rx_buffer->paddr = seg.ds_addr; 2464 2465 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2466 return (0); 2467 } 2468 2469 static int 2470 emx_create_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2471 { 2472 device_t dev = sc->dev; 2473 struct emx_rxbuf *rx_buffer; 2474 int i, error, rsize; 2475 2476 /* 2477 * Validate number of receive descriptors. It must not exceed 2478 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2479 */ 2480 if ((emx_rxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2481 emx_rxd > EMX_MAX_RXD || emx_rxd < EMX_MIN_RXD) { 2482 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2483 EMX_DEFAULT_RXD, emx_rxd); 2484 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2485 } else { 2486 rdata->num_rx_desc = emx_rxd; 2487 } 2488 2489 /* 2490 * Allocate Receive Descriptor ring 2491 */ 2492 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2493 EMX_DBA_ALIGN); 2494 rdata->rx_desc = bus_dmamem_coherent_any(sc->parent_dtag, 2495 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2496 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2497 &rdata->rx_desc_paddr); 2498 if (rdata->rx_desc == NULL) { 2499 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2500 return ENOMEM; 2501 } 2502 2503 rdata->rx_buf = kmalloc(sizeof(struct emx_rxbuf) * rdata->num_rx_desc, 2504 M_DEVBUF, M_WAITOK | M_ZERO); 2505 2506 /* 2507 * Create DMA tag for rx buffers 2508 */ 2509 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 2510 1, 0, /* alignment, bounds */ 2511 BUS_SPACE_MAXADDR, /* lowaddr */ 2512 BUS_SPACE_MAXADDR, /* highaddr */ 2513 NULL, NULL, /* filter, filterarg */ 2514 MCLBYTES, /* maxsize */ 2515 1, /* nsegments */ 2516 MCLBYTES, /* maxsegsize */ 2517 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2518 &rdata->rxtag); 2519 if (error) { 2520 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2521 kfree(rdata->rx_buf, M_DEVBUF); 2522 rdata->rx_buf = NULL; 2523 return error; 2524 } 2525 2526 /* 2527 * Create spare DMA map for rx buffers 2528 */ 2529 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2530 &rdata->rx_sparemap); 2531 if (error) { 2532 device_printf(dev, "Unable to create spare RX DMA map\n"); 2533 bus_dma_tag_destroy(rdata->rxtag); 2534 kfree(rdata->rx_buf, M_DEVBUF); 2535 rdata->rx_buf = NULL; 2536 return error; 2537 } 2538 2539 /* 2540 * Create DMA maps for rx buffers 2541 */ 2542 for (i = 0; i < rdata->num_rx_desc; i++) { 2543 rx_buffer = &rdata->rx_buf[i]; 2544 2545 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2546 &rx_buffer->map); 2547 if (error) { 2548 device_printf(dev, "Unable to create RX DMA map\n"); 2549 emx_destroy_rx_ring(sc, rdata, i); 2550 return error; 2551 } 2552 } 2553 return (0); 2554 } 2555 2556 static void 2557 emx_free_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2558 { 2559 int i; 2560 2561 for (i = 0; i < rdata->num_rx_desc; i++) { 2562 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2563 2564 if (rx_buffer->m_head != NULL) { 2565 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2566 m_freem(rx_buffer->m_head); 2567 rx_buffer->m_head = NULL; 2568 } 2569 } 2570 2571 if (rdata->fmp != NULL) 2572 m_freem(rdata->fmp); 2573 rdata->fmp = NULL; 2574 rdata->lmp = NULL; 2575 } 2576 2577 static int 2578 emx_init_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2579 { 2580 int i, error; 2581 2582 /* Reset descriptor ring */ 2583 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2584 2585 /* Allocate new ones. */ 2586 for (i = 0; i < rdata->num_rx_desc; i++) { 2587 error = emx_newbuf(sc, rdata, i, 1); 2588 if (error) 2589 return (error); 2590 } 2591 2592 /* Setup our descriptor pointers */ 2593 rdata->next_rx_desc_to_check = 0; 2594 2595 return (0); 2596 } 2597 2598 static void 2599 emx_init_rx_unit(struct emx_softc *sc) 2600 { 2601 struct ifnet *ifp = &sc->arpcom.ac_if; 2602 uint64_t bus_addr; 2603 uint32_t rctl, rxcsum, rfctl; 2604 int i; 2605 2606 /* 2607 * Make sure receives are disabled while setting 2608 * up the descriptor ring 2609 */ 2610 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2611 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2612 2613 /* 2614 * Set the interrupt throttling rate. Value is calculated 2615 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2616 */ 2617 if (sc->int_throttle_ceil) { 2618 E1000_WRITE_REG(&sc->hw, E1000_ITR, 2619 1000000000 / 256 / sc->int_throttle_ceil); 2620 } else { 2621 E1000_WRITE_REG(&sc->hw, E1000_ITR, 0); 2622 } 2623 2624 /* Use extended RX descriptor */ 2625 rfctl = E1000_RFCTL_EXTEN; 2626 2627 /* Disable accelerated ackknowledge */ 2628 if (sc->hw.mac.type == e1000_82574) 2629 rfctl |= E1000_RFCTL_ACK_DIS; 2630 2631 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2632 2633 /* Setup the Base and Length of the Rx Descriptor Ring */ 2634 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2635 struct emx_rxdata *rdata = &sc->rx_data[i]; 2636 2637 bus_addr = rdata->rx_desc_paddr; 2638 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 2639 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 2640 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 2641 (uint32_t)(bus_addr >> 32)); 2642 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 2643 (uint32_t)bus_addr); 2644 } 2645 2646 /* Setup the Receive Control Register */ 2647 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2648 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2649 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 2650 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2651 2652 /* Make sure VLAN Filters are off */ 2653 rctl &= ~E1000_RCTL_VFE; 2654 2655 /* Don't store bad paket */ 2656 rctl &= ~E1000_RCTL_SBP; 2657 2658 /* MCLBYTES */ 2659 rctl |= E1000_RCTL_SZ_2048; 2660 2661 if (ifp->if_mtu > ETHERMTU) 2662 rctl |= E1000_RCTL_LPE; 2663 else 2664 rctl &= ~E1000_RCTL_LPE; 2665 2666 /* 2667 * Receive Checksum Offload for TCP and UDP 2668 * 2669 * Checksum offloading is also enabled if multiple receive 2670 * queue is to be supported, since we need it to figure out 2671 * packet type. 2672 */ 2673 if (ifp->if_capenable & (IFCAP_RSS | IFCAP_RXCSUM)) { 2674 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2675 2676 /* 2677 * NOTE: 2678 * PCSD must be enabled to enable multiple 2679 * receive queues. 2680 */ 2681 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2682 E1000_RXCSUM_PCSD; 2683 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2684 } 2685 2686 /* 2687 * Configure multiple receive queue (RSS) 2688 */ 2689 if (ifp->if_capenable & IFCAP_RSS) { 2690 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2691 uint32_t reta; 2692 2693 KASSERT(sc->rx_ring_inuse == EMX_NRX_RING, 2694 ("invalid number of RX ring (%d)", 2695 sc->rx_ring_inuse)); 2696 2697 /* 2698 * NOTE: 2699 * When we reach here, RSS has already been disabled 2700 * in emx_stop(), so we could safely configure RSS key 2701 * and redirect table. 2702 */ 2703 2704 /* 2705 * Configure RSS key 2706 */ 2707 toeplitz_get_key(key, sizeof(key)); 2708 for (i = 0; i < EMX_NRSSRK; ++i) { 2709 uint32_t rssrk; 2710 2711 rssrk = EMX_RSSRK_VAL(key, i); 2712 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2713 2714 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2715 } 2716 2717 /* 2718 * Configure RSS redirect table in following fashion: 2719 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2720 */ 2721 reta = 0; 2722 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2723 uint32_t q; 2724 2725 q = (i % sc->rx_ring_inuse) << EMX_RETA_RINGIDX_SHIFT; 2726 reta |= q << (8 * i); 2727 } 2728 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2729 2730 for (i = 0; i < EMX_NRETA; ++i) 2731 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2732 2733 /* 2734 * Enable multiple receive queues. 2735 * Enable IPv4 RSS standard hash functions. 2736 * Disable RSS interrupt. 2737 */ 2738 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2739 E1000_MRQC_ENABLE_RSS_2Q | 2740 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2741 E1000_MRQC_RSS_FIELD_IPV4); 2742 } 2743 2744 /* 2745 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2746 * long latencies are observed, like Lenovo X60. This 2747 * change eliminates the problem, but since having positive 2748 * values in RDTR is a known source of problems on other 2749 * platforms another solution is being sought. 2750 */ 2751 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 2752 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 2753 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 2754 } 2755 2756 /* 2757 * Setup the HW Rx Head and Tail Descriptor Pointers 2758 */ 2759 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2760 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 2761 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 2762 sc->rx_data[i].num_rx_desc - 1); 2763 } 2764 2765 /* Enable Receives */ 2766 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 2767 } 2768 2769 static void 2770 emx_destroy_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata, int ndesc) 2771 { 2772 struct emx_rxbuf *rx_buffer; 2773 int i; 2774 2775 /* Free Receive Descriptor ring */ 2776 if (rdata->rx_desc) { 2777 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 2778 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 2779 rdata->rx_desc_dmap); 2780 bus_dma_tag_destroy(rdata->rx_desc_dtag); 2781 2782 rdata->rx_desc = NULL; 2783 } 2784 2785 if (rdata->rx_buf == NULL) 2786 return; 2787 2788 for (i = 0; i < ndesc; i++) { 2789 rx_buffer = &rdata->rx_buf[i]; 2790 2791 KKASSERT(rx_buffer->m_head == NULL); 2792 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 2793 } 2794 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 2795 bus_dma_tag_destroy(rdata->rxtag); 2796 2797 kfree(rdata->rx_buf, M_DEVBUF); 2798 rdata->rx_buf = NULL; 2799 } 2800 2801 static void 2802 emx_rxeof(struct emx_softc *sc, int ring_idx, int count) 2803 { 2804 struct emx_rxdata *rdata = &sc->rx_data[ring_idx]; 2805 struct ifnet *ifp = &sc->arpcom.ac_if; 2806 uint32_t staterr; 2807 emx_rxdesc_t *current_desc; 2808 struct mbuf *mp; 2809 int i; 2810 struct mbuf_chain chain[MAXCPU]; 2811 2812 i = rdata->next_rx_desc_to_check; 2813 current_desc = &rdata->rx_desc[i]; 2814 staterr = le32toh(current_desc->rxd_staterr); 2815 2816 if (!(staterr & E1000_RXD_STAT_DD)) 2817 return; 2818 2819 ether_input_chain_init(chain); 2820 2821 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2822 struct pktinfo *pi = NULL, pi0; 2823 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 2824 struct mbuf *m = NULL; 2825 int eop, len; 2826 2827 logif(pkt_receive); 2828 2829 mp = rx_buf->m_head; 2830 2831 /* 2832 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 2833 * needs to access the last received byte in the mbuf. 2834 */ 2835 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 2836 BUS_DMASYNC_POSTREAD); 2837 2838 len = le16toh(current_desc->rxd_length); 2839 if (staterr & E1000_RXD_STAT_EOP) { 2840 count--; 2841 eop = 1; 2842 } else { 2843 eop = 0; 2844 } 2845 2846 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2847 uint16_t vlan = 0; 2848 uint32_t mrq, rss_hash; 2849 2850 /* 2851 * Save several necessary information, 2852 * before emx_newbuf() destroy it. 2853 */ 2854 if ((staterr & E1000_RXD_STAT_VP) && eop) 2855 vlan = le16toh(current_desc->rxd_vlan); 2856 2857 mrq = le32toh(current_desc->rxd_mrq); 2858 rss_hash = le32toh(current_desc->rxd_rss); 2859 2860 EMX_RSS_DPRINTF(sc, 10, 2861 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 2862 ring_idx, mrq, rss_hash); 2863 2864 if (emx_newbuf(sc, rdata, i, 0) != 0) { 2865 ifp->if_iqdrops++; 2866 goto discard; 2867 } 2868 2869 /* Assign correct length to the current fragment */ 2870 mp->m_len = len; 2871 2872 if (rdata->fmp == NULL) { 2873 mp->m_pkthdr.len = len; 2874 rdata->fmp = mp; /* Store the first mbuf */ 2875 rdata->lmp = mp; 2876 } else { 2877 /* 2878 * Chain mbuf's together 2879 */ 2880 rdata->lmp->m_next = mp; 2881 rdata->lmp = rdata->lmp->m_next; 2882 rdata->fmp->m_pkthdr.len += len; 2883 } 2884 2885 if (eop) { 2886 rdata->fmp->m_pkthdr.rcvif = ifp; 2887 ifp->if_ipackets++; 2888 2889 if (ifp->if_capenable & IFCAP_RXCSUM) 2890 emx_rxcsum(staterr, rdata->fmp); 2891 2892 if (staterr & E1000_RXD_STAT_VP) { 2893 rdata->fmp->m_pkthdr.ether_vlantag = 2894 vlan; 2895 rdata->fmp->m_flags |= M_VLANTAG; 2896 } 2897 m = rdata->fmp; 2898 rdata->fmp = NULL; 2899 rdata->lmp = NULL; 2900 2901 if (ifp->if_capenable & IFCAP_RSS) { 2902 pi = emx_rssinfo(m, &pi0, mrq, 2903 rss_hash, staterr); 2904 } 2905 #ifdef EMX_RSS_DEBUG 2906 rdata->rx_pkts++; 2907 #endif 2908 } 2909 } else { 2910 ifp->if_ierrors++; 2911 discard: 2912 emx_setup_rxdesc(current_desc, rx_buf); 2913 if (rdata->fmp != NULL) { 2914 m_freem(rdata->fmp); 2915 rdata->fmp = NULL; 2916 rdata->lmp = NULL; 2917 } 2918 m = NULL; 2919 } 2920 2921 if (m != NULL) 2922 ether_input_chain(ifp, m, pi, chain); 2923 2924 /* Advance our pointers to the next descriptor. */ 2925 if (++i == rdata->num_rx_desc) 2926 i = 0; 2927 2928 current_desc = &rdata->rx_desc[i]; 2929 staterr = le32toh(current_desc->rxd_staterr); 2930 } 2931 rdata->next_rx_desc_to_check = i; 2932 2933 ether_input_dispatch(chain); 2934 2935 /* Advance the E1000's Receive Queue "Tail Pointer". */ 2936 if (--i < 0) 2937 i = rdata->num_rx_desc - 1; 2938 E1000_WRITE_REG(&sc->hw, E1000_RDT(ring_idx), i); 2939 } 2940 2941 static void 2942 emx_enable_intr(struct emx_softc *sc) 2943 { 2944 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 2945 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2946 } 2947 2948 static void 2949 emx_disable_intr(struct emx_softc *sc) 2950 { 2951 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2952 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 2953 } 2954 2955 /* 2956 * Bit of a misnomer, what this really means is 2957 * to enable OS management of the system... aka 2958 * to disable special hardware management features 2959 */ 2960 static void 2961 emx_get_mgmt(struct emx_softc *sc) 2962 { 2963 /* A shared code workaround */ 2964 if (sc->has_manage) { 2965 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2966 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2967 2968 /* disable hardware interception of ARP */ 2969 manc &= ~(E1000_MANC_ARP_EN); 2970 2971 /* enable receiving management packets to the host */ 2972 manc |= E1000_MANC_EN_MNG2HOST; 2973 #define E1000_MNG2HOST_PORT_623 (1 << 5) 2974 #define E1000_MNG2HOST_PORT_664 (1 << 6) 2975 manc2h |= E1000_MNG2HOST_PORT_623; 2976 manc2h |= E1000_MNG2HOST_PORT_664; 2977 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2978 2979 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2980 } 2981 } 2982 2983 /* 2984 * Give control back to hardware management 2985 * controller if there is one. 2986 */ 2987 static void 2988 emx_rel_mgmt(struct emx_softc *sc) 2989 { 2990 if (sc->has_manage) { 2991 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2992 2993 /* re-enable hardware interception of ARP */ 2994 manc |= E1000_MANC_ARP_EN; 2995 manc &= ~E1000_MANC_EN_MNG2HOST; 2996 2997 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2998 } 2999 } 3000 3001 /* 3002 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3003 * For ASF and Pass Through versions of f/w this means that 3004 * the driver is loaded. For AMT version (only with 82573) 3005 * of the f/w this means that the network i/f is open. 3006 */ 3007 static void 3008 emx_get_hw_control(struct emx_softc *sc) 3009 { 3010 uint32_t ctrl_ext, swsm; 3011 3012 /* Let firmware know the driver has taken over */ 3013 switch (sc->hw.mac.type) { 3014 case e1000_82573: 3015 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3016 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3017 swsm | E1000_SWSM_DRV_LOAD); 3018 break; 3019 3020 case e1000_82571: 3021 case e1000_82572: 3022 case e1000_80003es2lan: 3023 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3024 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3025 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3026 break; 3027 3028 default: 3029 break; 3030 } 3031 } 3032 3033 /* 3034 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3035 * For ASF and Pass Through versions of f/w this means that the 3036 * driver is no longer loaded. For AMT version (only with 82573) 3037 * of the f/w this means that the network i/f is closed. 3038 */ 3039 static void 3040 emx_rel_hw_control(struct emx_softc *sc) 3041 { 3042 uint32_t ctrl_ext, swsm; 3043 3044 /* Let firmware taken over control of h/w */ 3045 switch (sc->hw.mac.type) { 3046 case e1000_82573: 3047 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3048 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3049 swsm & ~E1000_SWSM_DRV_LOAD); 3050 break; 3051 3052 case e1000_82571: 3053 case e1000_82572: 3054 case e1000_80003es2lan: 3055 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3056 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3057 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3058 break; 3059 3060 default: 3061 break; 3062 } 3063 } 3064 3065 static int 3066 emx_is_valid_eaddr(const uint8_t *addr) 3067 { 3068 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3069 3070 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3071 return (FALSE); 3072 3073 return (TRUE); 3074 } 3075 3076 /* 3077 * Enable PCI Wake On Lan capability 3078 */ 3079 void 3080 emx_enable_wol(device_t dev) 3081 { 3082 uint16_t cap, status; 3083 uint8_t id; 3084 3085 /* First find the capabilities pointer*/ 3086 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3087 3088 /* Read the PM Capabilities */ 3089 id = pci_read_config(dev, cap, 1); 3090 if (id != PCIY_PMG) /* Something wrong */ 3091 return; 3092 3093 /* 3094 * OK, we have the power capabilities, 3095 * so now get the status register 3096 */ 3097 cap += PCIR_POWER_STATUS; 3098 status = pci_read_config(dev, cap, 2); 3099 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3100 pci_write_config(dev, cap, status, 2); 3101 } 3102 3103 static void 3104 emx_update_stats(struct emx_softc *sc) 3105 { 3106 struct ifnet *ifp = &sc->arpcom.ac_if; 3107 3108 if (sc->hw.phy.media_type == e1000_media_type_copper || 3109 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3110 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3111 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3112 } 3113 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3114 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3115 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3116 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3117 3118 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3119 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3120 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3121 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3122 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3123 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3124 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3125 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3126 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3127 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3128 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3129 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3130 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3131 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3132 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3133 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3134 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3135 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3136 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3137 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3138 3139 /* For the 64-bit byte counters the low dword must be read first. */ 3140 /* Both registers clear on the read of the high dword */ 3141 3142 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3143 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3144 3145 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3146 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3147 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3148 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3149 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3150 3151 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3152 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3153 3154 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3155 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3156 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3157 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3158 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3159 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3160 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3161 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3162 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3163 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3164 3165 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3166 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3167 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3168 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3169 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3170 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3171 3172 ifp->if_collisions = sc->stats.colc; 3173 3174 /* Rx Errors */ 3175 ifp->if_ierrors = sc->dropped_pkts + sc->stats.rxerrc + 3176 sc->stats.crcerrs + sc->stats.algnerrc + 3177 sc->stats.ruc + sc->stats.roc + 3178 sc->stats.mpc + sc->stats.cexterr; 3179 3180 /* Tx Errors */ 3181 ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol + 3182 sc->watchdog_events; 3183 } 3184 3185 static void 3186 emx_print_debug_info(struct emx_softc *sc) 3187 { 3188 device_t dev = sc->dev; 3189 uint8_t *hw_addr = sc->hw.hw_addr; 3190 3191 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3192 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3193 E1000_READ_REG(&sc->hw, E1000_CTRL), 3194 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3195 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3196 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3197 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3198 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3199 sc->hw.fc.high_water, sc->hw.fc.low_water); 3200 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3201 E1000_READ_REG(&sc->hw, E1000_TIDV), 3202 E1000_READ_REG(&sc->hw, E1000_TADV)); 3203 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3204 E1000_READ_REG(&sc->hw, E1000_RDTR), 3205 E1000_READ_REG(&sc->hw, E1000_RADV)); 3206 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3207 E1000_READ_REG(&sc->hw, E1000_TDH(0)), 3208 E1000_READ_REG(&sc->hw, E1000_TDT(0))); 3209 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3210 E1000_READ_REG(&sc->hw, E1000_RDH(0)), 3211 E1000_READ_REG(&sc->hw, E1000_RDT(0))); 3212 device_printf(dev, "Num Tx descriptors avail = %d\n", 3213 sc->num_tx_desc_avail); 3214 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3215 sc->no_tx_desc_avail1); 3216 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3217 sc->no_tx_desc_avail2); 3218 device_printf(dev, "Std mbuf failed = %ld\n", 3219 sc->mbuf_alloc_failed); 3220 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3221 sc->rx_data[0].mbuf_cluster_failed); 3222 device_printf(dev, "Driver dropped packets = %ld\n", 3223 sc->dropped_pkts); 3224 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3225 sc->no_tx_dma_setup); 3226 3227 device_printf(dev, "TXCSUM try pullup = %lu\n", 3228 sc->tx_csum_try_pullup); 3229 device_printf(dev, "TXCSUM m_pullup(eh) called = %lu\n", 3230 sc->tx_csum_pullup1); 3231 device_printf(dev, "TXCSUM m_pullup(eh) failed = %lu\n", 3232 sc->tx_csum_pullup1_failed); 3233 device_printf(dev, "TXCSUM m_pullup(eh+ip) called = %lu\n", 3234 sc->tx_csum_pullup2); 3235 device_printf(dev, "TXCSUM m_pullup(eh+ip) failed = %lu\n", 3236 sc->tx_csum_pullup2_failed); 3237 device_printf(dev, "TXCSUM non-writable(eh) droped = %lu\n", 3238 sc->tx_csum_drop1); 3239 device_printf(dev, "TXCSUM non-writable(eh+ip) droped = %lu\n", 3240 sc->tx_csum_drop2); 3241 } 3242 3243 static void 3244 emx_print_hw_stats(struct emx_softc *sc) 3245 { 3246 device_t dev = sc->dev; 3247 3248 device_printf(dev, "Excessive collisions = %lld\n", 3249 (long long)sc->stats.ecol); 3250 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3251 device_printf(dev, "Symbol errors = %lld\n", 3252 (long long)sc->stats.symerrs); 3253 #endif 3254 device_printf(dev, "Sequence errors = %lld\n", 3255 (long long)sc->stats.sec); 3256 device_printf(dev, "Defer count = %lld\n", 3257 (long long)sc->stats.dc); 3258 device_printf(dev, "Missed Packets = %lld\n", 3259 (long long)sc->stats.mpc); 3260 device_printf(dev, "Receive No Buffers = %lld\n", 3261 (long long)sc->stats.rnbc); 3262 /* RLEC is inaccurate on some hardware, calculate our own. */ 3263 device_printf(dev, "Receive Length Errors = %lld\n", 3264 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3265 device_printf(dev, "Receive errors = %lld\n", 3266 (long long)sc->stats.rxerrc); 3267 device_printf(dev, "Crc errors = %lld\n", 3268 (long long)sc->stats.crcerrs); 3269 device_printf(dev, "Alignment errors = %lld\n", 3270 (long long)sc->stats.algnerrc); 3271 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3272 (long long)sc->stats.cexterr); 3273 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3274 device_printf(dev, "watchdog timeouts = %ld\n", 3275 sc->watchdog_events); 3276 device_printf(dev, "XON Rcvd = %lld\n", 3277 (long long)sc->stats.xonrxc); 3278 device_printf(dev, "XON Xmtd = %lld\n", 3279 (long long)sc->stats.xontxc); 3280 device_printf(dev, "XOFF Rcvd = %lld\n", 3281 (long long)sc->stats.xoffrxc); 3282 device_printf(dev, "XOFF Xmtd = %lld\n", 3283 (long long)sc->stats.xofftxc); 3284 device_printf(dev, "Good Packets Rcvd = %lld\n", 3285 (long long)sc->stats.gprc); 3286 device_printf(dev, "Good Packets Xmtd = %lld\n", 3287 (long long)sc->stats.gptc); 3288 } 3289 3290 static void 3291 emx_print_nvm_info(struct emx_softc *sc) 3292 { 3293 uint16_t eeprom_data; 3294 int i, j, row = 0; 3295 3296 /* Its a bit crude, but it gets the job done */ 3297 kprintf("\nInterface EEPROM Dump:\n"); 3298 kprintf("Offset\n0x0000 "); 3299 for (i = 0, j = 0; i < 32; i++, j++) { 3300 if (j == 8) { /* Make the offset block */ 3301 j = 0; ++row; 3302 kprintf("\n0x00%x0 ",row); 3303 } 3304 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3305 kprintf("%04x ", eeprom_data); 3306 } 3307 kprintf("\n"); 3308 } 3309 3310 static int 3311 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3312 { 3313 struct emx_softc *sc; 3314 struct ifnet *ifp; 3315 int error, result; 3316 3317 result = -1; 3318 error = sysctl_handle_int(oidp, &result, 0, req); 3319 if (error || !req->newptr) 3320 return (error); 3321 3322 sc = (struct emx_softc *)arg1; 3323 ifp = &sc->arpcom.ac_if; 3324 3325 lwkt_serialize_enter(ifp->if_serializer); 3326 3327 if (result == 1) 3328 emx_print_debug_info(sc); 3329 3330 /* 3331 * This value will cause a hex dump of the 3332 * first 32 16-bit words of the EEPROM to 3333 * the screen. 3334 */ 3335 if (result == 2) 3336 emx_print_nvm_info(sc); 3337 3338 lwkt_serialize_exit(ifp->if_serializer); 3339 3340 return (error); 3341 } 3342 3343 static int 3344 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3345 { 3346 int error, result; 3347 3348 result = -1; 3349 error = sysctl_handle_int(oidp, &result, 0, req); 3350 if (error || !req->newptr) 3351 return (error); 3352 3353 if (result == 1) { 3354 struct emx_softc *sc = (struct emx_softc *)arg1; 3355 struct ifnet *ifp = &sc->arpcom.ac_if; 3356 3357 lwkt_serialize_enter(ifp->if_serializer); 3358 emx_print_hw_stats(sc); 3359 lwkt_serialize_exit(ifp->if_serializer); 3360 } 3361 return (error); 3362 } 3363 3364 static void 3365 emx_add_sysctl(struct emx_softc *sc) 3366 { 3367 #ifdef PROFILE_SERIALIZER 3368 struct ifnet *ifp = &sc->arpcom.ac_if; 3369 #endif 3370 #ifdef EMX_RSS_DEBUG 3371 char rx_pkt[32]; 3372 int i; 3373 #endif 3374 3375 sysctl_ctx_init(&sc->sysctl_ctx); 3376 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 3377 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 3378 device_get_nameunit(sc->dev), 3379 CTLFLAG_RD, 0, ""); 3380 if (sc->sysctl_tree == NULL) { 3381 device_printf(sc->dev, "can't add sysctl node\n"); 3382 return; 3383 } 3384 3385 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3386 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3387 emx_sysctl_debug_info, "I", "Debug Information"); 3388 3389 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3390 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3391 emx_sysctl_stats, "I", "Statistics"); 3392 3393 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3394 OID_AUTO, "rxd", CTLFLAG_RD, 3395 &sc->rx_data[0].num_rx_desc, 0, NULL); 3396 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3397 OID_AUTO, "txd", CTLFLAG_RD, &sc->num_tx_desc, 0, NULL); 3398 3399 #ifdef PROFILE_SERIALIZER 3400 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3401 OID_AUTO, "serializer_sleep", CTLFLAG_RW, 3402 &ifp->if_serializer->sleep_cnt, 0, NULL); 3403 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3404 OID_AUTO, "serializer_tryfail", CTLFLAG_RW, 3405 &ifp->if_serializer->tryfail_cnt, 0, NULL); 3406 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3407 OID_AUTO, "serializer_enter", CTLFLAG_RW, 3408 &ifp->if_serializer->enter_cnt, 0, NULL); 3409 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3410 OID_AUTO, "serializer_try", CTLFLAG_RW, 3411 &ifp->if_serializer->try_cnt, 0, NULL); 3412 #endif 3413 3414 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3415 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, 3416 sc, 0, emx_sysctl_int_throttle, "I", 3417 "interrupt throttling rate"); 3418 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3419 OID_AUTO, "int_tx_nsegs", CTLTYPE_INT|CTLFLAG_RW, 3420 sc, 0, emx_sysctl_int_tx_nsegs, "I", 3421 "# segments per TX interrupt"); 3422 3423 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3424 OID_AUTO, "rx_ring_inuse", CTLFLAG_RD, 3425 &sc->rx_ring_inuse, 0, "RX ring in use"); 3426 3427 #ifdef EMX_RSS_DEBUG 3428 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3429 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3430 0, "RSS debug level"); 3431 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3432 ksnprintf(rx_pkt, sizeof(rx_pkt), "rx%d_pkt", i); 3433 SYSCTL_ADD_UINT(&sc->sysctl_ctx, 3434 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, 3435 rx_pkt, CTLFLAG_RW, 3436 &sc->rx_data[i].rx_pkts, 0, "RXed packets"); 3437 } 3438 #endif 3439 } 3440 3441 static int 3442 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3443 { 3444 struct emx_softc *sc = (void *)arg1; 3445 struct ifnet *ifp = &sc->arpcom.ac_if; 3446 int error, throttle; 3447 3448 throttle = sc->int_throttle_ceil; 3449 error = sysctl_handle_int(oidp, &throttle, 0, req); 3450 if (error || req->newptr == NULL) 3451 return error; 3452 if (throttle < 0 || throttle > 1000000000 / 256) 3453 return EINVAL; 3454 3455 if (throttle) { 3456 /* 3457 * Set the interrupt throttling rate in 256ns increments, 3458 * recalculate sysctl value assignment to get exact frequency. 3459 */ 3460 throttle = 1000000000 / 256 / throttle; 3461 3462 /* Upper 16bits of ITR is reserved and should be zero */ 3463 if (throttle & 0xffff0000) 3464 return EINVAL; 3465 } 3466 3467 lwkt_serialize_enter(ifp->if_serializer); 3468 3469 if (throttle) 3470 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3471 else 3472 sc->int_throttle_ceil = 0; 3473 3474 if (ifp->if_flags & IFF_RUNNING) 3475 E1000_WRITE_REG(&sc->hw, E1000_ITR, throttle); 3476 3477 lwkt_serialize_exit(ifp->if_serializer); 3478 3479 if (bootverbose) { 3480 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3481 sc->int_throttle_ceil); 3482 } 3483 return 0; 3484 } 3485 3486 static int 3487 emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 3488 { 3489 struct emx_softc *sc = (void *)arg1; 3490 struct ifnet *ifp = &sc->arpcom.ac_if; 3491 int error, segs; 3492 3493 segs = sc->tx_int_nsegs; 3494 error = sysctl_handle_int(oidp, &segs, 0, req); 3495 if (error || req->newptr == NULL) 3496 return error; 3497 if (segs <= 0) 3498 return EINVAL; 3499 3500 lwkt_serialize_enter(ifp->if_serializer); 3501 3502 /* 3503 * Don't allow int_tx_nsegs to become: 3504 * o Less the oact_tx_desc 3505 * o Too large that no TX desc will cause TX interrupt to 3506 * be generated (OACTIVE will never recover) 3507 * o Too small that will cause tx_dd[] overflow 3508 */ 3509 if (segs < sc->oact_tx_desc || 3510 segs >= sc->num_tx_desc - sc->oact_tx_desc || 3511 segs < sc->num_tx_desc / EMX_TXDD_SAFE) { 3512 error = EINVAL; 3513 } else { 3514 error = 0; 3515 sc->tx_int_nsegs = segs; 3516 } 3517 3518 lwkt_serialize_exit(ifp->if_serializer); 3519 3520 return error; 3521 } 3522 3523 static int 3524 emx_dma_alloc(struct emx_softc *sc) 3525 { 3526 int error, i; 3527 3528 /* 3529 * Create top level busdma tag 3530 */ 3531 error = bus_dma_tag_create(NULL, 1, 0, 3532 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3533 NULL, NULL, 3534 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3535 0, &sc->parent_dtag); 3536 if (error) { 3537 device_printf(sc->dev, "could not create top level DMA tag\n"); 3538 return error; 3539 } 3540 3541 /* 3542 * Allocate transmit descriptors ring and buffers 3543 */ 3544 error = emx_create_tx_ring(sc); 3545 if (error) { 3546 device_printf(sc->dev, "Could not setup transmit structures\n"); 3547 return error; 3548 } 3549 3550 /* 3551 * Allocate receive descriptors ring and buffers 3552 */ 3553 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3554 error = emx_create_rx_ring(sc, &sc->rx_data[i]); 3555 if (error) { 3556 device_printf(sc->dev, 3557 "Could not setup receive structures\n"); 3558 return error; 3559 } 3560 } 3561 return 0; 3562 } 3563 3564 static void 3565 emx_dma_free(struct emx_softc *sc) 3566 { 3567 int i; 3568 3569 emx_destroy_tx_ring(sc, sc->num_tx_desc); 3570 3571 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3572 emx_destroy_rx_ring(sc, &sc->rx_data[i], 3573 sc->rx_data[i].num_rx_desc); 3574 } 3575 3576 /* Free top level busdma tag */ 3577 if (sc->parent_dtag != NULL) 3578 bus_dma_tag_destroy(sc->parent_dtag); 3579 } 3580