1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2015, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 */ 67 /* 68 * SERIALIZATION API RULES: 69 * 70 * - We must call lwkt_serialize_handler_enable() prior to enabling the 71 * hardware interrupt and lwkt_serialize_handler_disable() after disabling 72 * the hardware interrupt in order to avoid handler execution races from 73 * scheduled interrupt threads. 74 */ 75 76 #include "opt_ifpoll.h" 77 78 #include <sys/param.h> 79 #include <sys/bus.h> 80 #include <sys/endian.h> 81 #include <sys/interrupt.h> 82 #include <sys/kernel.h> 83 #include <sys/ktr.h> 84 #include <sys/malloc.h> 85 #include <sys/mbuf.h> 86 #include <sys/proc.h> 87 #include <sys/rman.h> 88 #include <sys/serialize.h> 89 #include <sys/socket.h> 90 #include <sys/sockio.h> 91 #include <sys/sysctl.h> 92 #include <sys/systm.h> 93 94 #include <net/bpf.h> 95 #include <net/ethernet.h> 96 #include <net/if.h> 97 #include <net/if_arp.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_poll.h> 101 #include <net/ifq_var.h> 102 #include <net/vlan/if_vlan_var.h> 103 #include <net/vlan/if_vlan_ether.h> 104 105 #include <netinet/ip.h> 106 #include <netinet/tcp.h> 107 #include <netinet/udp.h> 108 109 #include <bus/pci/pcivar.h> 110 #include <bus/pci/pcireg.h> 111 112 #include <dev/netif/ig_hal/e1000_api.h> 113 #include <dev/netif/ig_hal/e1000_82571.h> 114 #include <dev/netif/ig_hal/e1000_dragonfly.h> 115 #include <dev/netif/em/if_em.h> 116 117 #define DEBUG_HW 0 118 119 #define EM_NAME "Intel(R) PRO/1000 Network Connection " 120 #define EM_VER " 7.6.2" 121 122 #define _EM_DEVICE(id, ret) \ 123 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER } 124 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100) 125 #define EM_DEVICE(id) _EM_DEVICE(id, 0) 126 #define EM_DEVICE_NULL { 0, 0, 0, NULL } 127 128 static const struct em_vendor_info em_vendor_info_array[] = { 129 EM_DEVICE(82540EM), 130 EM_DEVICE(82540EM_LOM), 131 EM_DEVICE(82540EP), 132 EM_DEVICE(82540EP_LOM), 133 EM_DEVICE(82540EP_LP), 134 135 EM_DEVICE(82541EI), 136 EM_DEVICE(82541ER), 137 EM_DEVICE(82541ER_LOM), 138 EM_DEVICE(82541EI_MOBILE), 139 EM_DEVICE(82541GI), 140 EM_DEVICE(82541GI_LF), 141 EM_DEVICE(82541GI_MOBILE), 142 143 EM_DEVICE(82542), 144 145 EM_DEVICE(82543GC_FIBER), 146 EM_DEVICE(82543GC_COPPER), 147 148 EM_DEVICE(82544EI_COPPER), 149 EM_DEVICE(82544EI_FIBER), 150 EM_DEVICE(82544GC_COPPER), 151 EM_DEVICE(82544GC_LOM), 152 153 EM_DEVICE(82545EM_COPPER), 154 EM_DEVICE(82545EM_FIBER), 155 EM_DEVICE(82545GM_COPPER), 156 EM_DEVICE(82545GM_FIBER), 157 EM_DEVICE(82545GM_SERDES), 158 159 EM_DEVICE(82546EB_COPPER), 160 EM_DEVICE(82546EB_FIBER), 161 EM_DEVICE(82546EB_QUAD_COPPER), 162 EM_DEVICE(82546GB_COPPER), 163 EM_DEVICE(82546GB_FIBER), 164 EM_DEVICE(82546GB_SERDES), 165 EM_DEVICE(82546GB_PCIE), 166 EM_DEVICE(82546GB_QUAD_COPPER), 167 EM_DEVICE(82546GB_QUAD_COPPER_KSP3), 168 169 EM_DEVICE(82547EI), 170 EM_DEVICE(82547EI_MOBILE), 171 EM_DEVICE(82547GI), 172 173 EM_EMX_DEVICE(82571EB_COPPER), 174 EM_EMX_DEVICE(82571EB_FIBER), 175 EM_EMX_DEVICE(82571EB_SERDES), 176 EM_EMX_DEVICE(82571EB_SERDES_DUAL), 177 EM_EMX_DEVICE(82571EB_SERDES_QUAD), 178 EM_EMX_DEVICE(82571EB_QUAD_COPPER), 179 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP), 180 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP), 181 EM_EMX_DEVICE(82571EB_QUAD_FIBER), 182 EM_EMX_DEVICE(82571PT_QUAD_COPPER), 183 184 EM_EMX_DEVICE(82572EI_COPPER), 185 EM_EMX_DEVICE(82572EI_FIBER), 186 EM_EMX_DEVICE(82572EI_SERDES), 187 EM_EMX_DEVICE(82572EI), 188 189 EM_EMX_DEVICE(82573E), 190 EM_EMX_DEVICE(82573E_IAMT), 191 EM_EMX_DEVICE(82573L), 192 193 EM_DEVICE(82583V), 194 195 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT), 196 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT), 197 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT), 198 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT), 199 200 EM_DEVICE(ICH8_IGP_M_AMT), 201 EM_DEVICE(ICH8_IGP_AMT), 202 EM_DEVICE(ICH8_IGP_C), 203 EM_DEVICE(ICH8_IFE), 204 EM_DEVICE(ICH8_IFE_GT), 205 EM_DEVICE(ICH8_IFE_G), 206 EM_DEVICE(ICH8_IGP_M), 207 EM_DEVICE(ICH8_82567V_3), 208 209 EM_DEVICE(ICH9_IGP_M_AMT), 210 EM_DEVICE(ICH9_IGP_AMT), 211 EM_DEVICE(ICH9_IGP_C), 212 EM_DEVICE(ICH9_IGP_M), 213 EM_DEVICE(ICH9_IGP_M_V), 214 EM_DEVICE(ICH9_IFE), 215 EM_DEVICE(ICH9_IFE_GT), 216 EM_DEVICE(ICH9_IFE_G), 217 EM_DEVICE(ICH9_BM), 218 219 EM_EMX_DEVICE(82574L), 220 EM_EMX_DEVICE(82574LA), 221 222 EM_DEVICE(ICH10_R_BM_LM), 223 EM_DEVICE(ICH10_R_BM_LF), 224 EM_DEVICE(ICH10_R_BM_V), 225 EM_DEVICE(ICH10_D_BM_LM), 226 EM_DEVICE(ICH10_D_BM_LF), 227 EM_DEVICE(ICH10_D_BM_V), 228 229 EM_DEVICE(PCH_M_HV_LM), 230 EM_DEVICE(PCH_M_HV_LC), 231 EM_DEVICE(PCH_D_HV_DM), 232 EM_DEVICE(PCH_D_HV_DC), 233 234 EM_DEVICE(PCH2_LV_LM), 235 EM_DEVICE(PCH2_LV_V), 236 237 EM_EMX_DEVICE(PCH_LPT_I217_LM), 238 EM_EMX_DEVICE(PCH_LPT_I217_V), 239 EM_EMX_DEVICE(PCH_LPTLP_I218_LM), 240 EM_EMX_DEVICE(PCH_LPTLP_I218_V), 241 EM_EMX_DEVICE(PCH_I218_LM2), 242 EM_EMX_DEVICE(PCH_I218_V2), 243 EM_EMX_DEVICE(PCH_I218_LM3), 244 EM_EMX_DEVICE(PCH_I218_V3), 245 EM_EMX_DEVICE(PCH_SPT_I219_LM), 246 EM_EMX_DEVICE(PCH_SPT_I219_V), 247 EM_EMX_DEVICE(PCH_SPT_I219_LM2), 248 EM_EMX_DEVICE(PCH_SPT_I219_V2), 249 EM_EMX_DEVICE(PCH_LBG_I219_LM3), 250 EM_EMX_DEVICE(PCH_SPT_I219_LM4), 251 EM_EMX_DEVICE(PCH_SPT_I219_V4), 252 EM_EMX_DEVICE(PCH_SPT_I219_LM5), 253 EM_EMX_DEVICE(PCH_SPT_I219_V5), 254 255 /* required last entry */ 256 EM_DEVICE_NULL 257 }; 258 259 static int em_probe(device_t); 260 static int em_attach(device_t); 261 static int em_detach(device_t); 262 static int em_shutdown(device_t); 263 static int em_suspend(device_t); 264 static int em_resume(device_t); 265 266 static void em_init(void *); 267 static void em_stop(struct adapter *); 268 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 269 static void em_start(struct ifnet *, struct ifaltq_subque *); 270 #ifdef IFPOLL_ENABLE 271 static void em_npoll(struct ifnet *, struct ifpoll_info *); 272 static void em_npoll_compat(struct ifnet *, void *, int); 273 #endif 274 static void em_watchdog(struct ifnet *); 275 static void em_media_status(struct ifnet *, struct ifmediareq *); 276 static int em_media_change(struct ifnet *); 277 static void em_timer(void *); 278 279 static void em_intr(void *); 280 static void em_intr_mask(void *); 281 static void em_intr_body(struct adapter *, boolean_t); 282 static void em_rxeof(struct adapter *, int); 283 static void em_txeof(struct adapter *); 284 static void em_tx_collect(struct adapter *, boolean_t); 285 static void em_tx_purge(struct adapter *); 286 static void em_txgc_timer(void *); 287 static void em_enable_intr(struct adapter *); 288 static void em_disable_intr(struct adapter *); 289 290 static int em_dma_malloc(struct adapter *, bus_size_t, 291 struct em_dma_alloc *); 292 static void em_dma_free(struct adapter *, struct em_dma_alloc *); 293 static void em_init_tx_ring(struct adapter *); 294 static int em_init_rx_ring(struct adapter *); 295 static int em_create_tx_ring(struct adapter *); 296 static int em_create_rx_ring(struct adapter *); 297 static void em_destroy_tx_ring(struct adapter *, int); 298 static void em_destroy_rx_ring(struct adapter *, int); 299 static int em_newbuf(struct adapter *, int, int); 300 static int em_encap(struct adapter *, struct mbuf **, int *, int *); 301 static void em_rxcsum(struct adapter *, struct e1000_rx_desc *, 302 struct mbuf *); 303 static int em_txcsum(struct adapter *, struct mbuf *, 304 uint32_t *, uint32_t *); 305 static int em_tso_pullup(struct adapter *, struct mbuf **); 306 static int em_tso_setup(struct adapter *, struct mbuf *, 307 uint32_t *, uint32_t *); 308 309 static int em_get_hw_info(struct adapter *); 310 static int em_is_valid_eaddr(const uint8_t *); 311 static int em_alloc_pci_res(struct adapter *); 312 static void em_free_pci_res(struct adapter *); 313 static int em_reset(struct adapter *); 314 static void em_setup_ifp(struct adapter *); 315 static void em_init_tx_unit(struct adapter *); 316 static void em_init_rx_unit(struct adapter *); 317 static void em_update_stats(struct adapter *); 318 static void em_set_promisc(struct adapter *); 319 static void em_disable_promisc(struct adapter *); 320 static void em_set_multi(struct adapter *); 321 static void em_update_link_status(struct adapter *); 322 static void em_smartspeed(struct adapter *); 323 static void em_set_itr(struct adapter *, uint32_t); 324 static void em_disable_aspm(struct adapter *); 325 static void em_flush_tx_ring(struct adapter *); 326 static void em_flush_rx_ring(struct adapter *); 327 static void em_flush_txrx_ring(struct adapter *); 328 329 /* Hardware workarounds */ 330 static int em_82547_fifo_workaround(struct adapter *, int); 331 static void em_82547_update_fifo_head(struct adapter *, int); 332 static int em_82547_tx_fifo_reset(struct adapter *); 333 static void em_82547_move_tail(void *); 334 static void em_82547_move_tail_serialized(struct adapter *); 335 static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY); 336 337 static void em_print_debug_info(struct adapter *); 338 static void em_print_nvm_info(struct adapter *); 339 static void em_print_hw_stats(struct adapter *); 340 341 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 342 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 343 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 344 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 345 static void em_add_sysctl(struct adapter *adapter); 346 347 /* Management and WOL Support */ 348 static void em_get_mgmt(struct adapter *); 349 static void em_rel_mgmt(struct adapter *); 350 static void em_get_hw_control(struct adapter *); 351 static void em_rel_hw_control(struct adapter *); 352 static void em_enable_wol(device_t); 353 354 static device_method_t em_methods[] = { 355 /* Device interface */ 356 DEVMETHOD(device_probe, em_probe), 357 DEVMETHOD(device_attach, em_attach), 358 DEVMETHOD(device_detach, em_detach), 359 DEVMETHOD(device_shutdown, em_shutdown), 360 DEVMETHOD(device_suspend, em_suspend), 361 DEVMETHOD(device_resume, em_resume), 362 DEVMETHOD_END 363 }; 364 365 static driver_t em_driver = { 366 "em", 367 em_methods, 368 sizeof(struct adapter), 369 }; 370 371 static devclass_t em_devclass; 372 373 DECLARE_DUMMY_MODULE(if_em); 374 MODULE_DEPEND(em, ig_hal, 1, 1, 1); 375 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL); 376 377 /* 378 * Tunables 379 */ 380 static int em_int_throttle_ceil = EM_DEFAULT_ITR; 381 static int em_rxd = EM_DEFAULT_RXD; 382 static int em_txd = EM_DEFAULT_TXD; 383 static int em_smart_pwr_down = 0; 384 385 /* Controls whether promiscuous also shows bad packets */ 386 static int em_debug_sbp = FALSE; 387 388 static int em_82573_workaround = 1; 389 static int em_msi_enable = 1; 390 391 static char em_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 392 393 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil); 394 TUNABLE_INT("hw.em.rxd", &em_rxd); 395 TUNABLE_INT("hw.em.txd", &em_txd); 396 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 397 TUNABLE_INT("hw.em.sbp", &em_debug_sbp); 398 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround); 399 TUNABLE_INT("hw.em.msi.enable", &em_msi_enable); 400 TUNABLE_STR("hw.em.flow_ctrl", em_flowctrl, sizeof(em_flowctrl)); 401 402 /* Global used in WOL setup with multiport cards */ 403 static int em_global_quad_port_a = 0; 404 405 /* Set this to one to display debug statistics */ 406 static int em_display_debug_stats = 0; 407 408 #if !defined(KTR_IF_EM) 409 #define KTR_IF_EM KTR_ALL 410 #endif 411 KTR_INFO_MASTER(if_em); 412 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin"); 413 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end"); 414 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet"); 415 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet"); 416 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean"); 417 #define logif(name) KTR_LOG(if_em_ ## name) 418 419 static __inline void 420 em_tx_intr(struct adapter *adapter) 421 { 422 struct ifnet *ifp = &adapter->arpcom.ac_if; 423 424 em_txeof(adapter); 425 if (!ifq_is_empty(&ifp->if_snd)) 426 if_devstart(ifp); 427 } 428 429 static __inline void 430 em_free_txbuffer(struct adapter *adapter, struct em_buffer *tx_buffer) 431 { 432 433 KKASSERT(tx_buffer->m_head != NULL); 434 KKASSERT(adapter->tx_nmbuf > 0); 435 adapter->tx_nmbuf--; 436 437 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 438 m_freem(tx_buffer->m_head); 439 tx_buffer->m_head = NULL; 440 } 441 442 static __inline void 443 em_try_txgc(struct adapter *adapter, int dec) 444 { 445 446 if (adapter->tx_running > 0) { 447 adapter->tx_running -= dec; 448 if (adapter->tx_running <= 0 && adapter->tx_nmbuf && 449 adapter->num_tx_desc_avail < adapter->num_tx_desc && 450 adapter->num_tx_desc_avail + adapter->tx_int_nsegs > 451 adapter->num_tx_desc) 452 em_tx_collect(adapter, TRUE); 453 } 454 } 455 456 static void 457 em_txgc_timer(void *xadapter) 458 { 459 struct adapter *adapter = xadapter; 460 struct ifnet *ifp = &adapter->arpcom.ac_if; 461 462 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 463 (IFF_RUNNING | IFF_UP)) 464 return; 465 466 if (!lwkt_serialize_try(ifp->if_serializer)) 467 goto done; 468 469 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 470 (IFF_RUNNING | IFF_UP)) { 471 lwkt_serialize_exit(ifp->if_serializer); 472 return; 473 } 474 em_try_txgc(adapter, EM_TX_RUNNING_DEC); 475 476 lwkt_serialize_exit(ifp->if_serializer); 477 done: 478 callout_reset(&adapter->tx_gc_timer, 1, em_txgc_timer, adapter); 479 } 480 481 static int 482 em_probe(device_t dev) 483 { 484 const struct em_vendor_info *ent; 485 uint16_t vid, did; 486 487 vid = pci_get_vendor(dev); 488 did = pci_get_device(dev); 489 490 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) { 491 if (vid == ent->vendor_id && did == ent->device_id) { 492 device_set_desc(dev, ent->desc); 493 device_set_async_attach(dev, TRUE); 494 return (ent->ret); 495 } 496 } 497 return (ENXIO); 498 } 499 500 static int 501 em_attach(device_t dev) 502 { 503 struct adapter *adapter = device_get_softc(dev); 504 struct ifnet *ifp = &adapter->arpcom.ac_if; 505 int tsize, rsize; 506 int error = 0; 507 int cap; 508 uint16_t eeprom_data, device_id, apme_mask; 509 driver_intr_t *intr_func; 510 char flowctrl[IFM_ETH_FC_STRLEN]; 511 512 adapter->dev = adapter->osdep.dev = dev; 513 514 /* 515 * Some versions of I219 only have PCI AF. 516 */ 517 if (pci_is_pcie(dev) || pci_find_extcap(dev, PCIY_PCIAF, &cap) == 0) 518 adapter->flags |= EM_FLAG_GEN2; 519 520 callout_init_mp(&adapter->timer); 521 callout_init_mp(&adapter->tx_fifo_timer); 522 callout_init_mp(&adapter->tx_gc_timer); 523 524 ifmedia_init(&adapter->media, IFM_IMASK | IFM_ETH_FCMASK, 525 em_media_change, em_media_status); 526 527 /* Determine hardware and mac info */ 528 error = em_get_hw_info(adapter); 529 if (error) { 530 device_printf(dev, "Identify hardware failed\n"); 531 goto fail; 532 } 533 534 /* Setup PCI resources */ 535 error = em_alloc_pci_res(adapter); 536 if (error) { 537 device_printf(dev, "Allocation of PCI resources failed\n"); 538 goto fail; 539 } 540 541 /* 542 * For ICH8 and family we need to map the flash memory, 543 * and this must happen after the MAC is identified. 544 * 545 * (SPT does not map the flash with a separate BAR) 546 */ 547 if (adapter->hw.mac.type == e1000_ich8lan || 548 adapter->hw.mac.type == e1000_ich9lan || 549 adapter->hw.mac.type == e1000_ich10lan || 550 adapter->hw.mac.type == e1000_pchlan || 551 adapter->hw.mac.type == e1000_pch2lan || 552 adapter->hw.mac.type == e1000_pch_lpt) { 553 adapter->flash_rid = EM_BAR_FLASH; 554 555 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 556 &adapter->flash_rid, RF_ACTIVE); 557 if (adapter->flash == NULL) { 558 device_printf(dev, "Mapping of Flash failed\n"); 559 error = ENXIO; 560 goto fail; 561 } 562 adapter->osdep.flash_bus_space_tag = 563 rman_get_bustag(adapter->flash); 564 adapter->osdep.flash_bus_space_handle = 565 rman_get_bushandle(adapter->flash); 566 567 /* 568 * This is used in the shared code 569 * XXX this goof is actually not used. 570 */ 571 adapter->hw.flash_address = (uint8_t *)adapter->flash; 572 } else if (adapter->hw.mac.type == e1000_pch_spt) { 573 /* 574 * In the new SPT device flash is not a seperate BAR, 575 * rather it is also in BAR0, so use the same tag and 576 * an offset handle for the FLASH read/write macros 577 * in the shared code. 578 */ 579 adapter->osdep.flash_bus_space_tag = 580 adapter->osdep.mem_bus_space_tag; 581 adapter->osdep.flash_bus_space_handle = 582 adapter->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; 583 } 584 585 switch (adapter->hw.mac.type) { 586 case e1000_82571: 587 case e1000_82572: 588 case e1000_pch_lpt: 589 case e1000_pch_spt: 590 /* 591 * Pullup extra 4bytes into the first data segment for 592 * TSO, see: 593 * 82571/82572 specification update errata #7 594 * 595 * Same applies to I217 (and maybe I218 and I219). 596 * 597 * NOTE: 598 * 4bytes instead of 2bytes, which are mentioned in the 599 * errata, are pulled; mainly to keep rest of the data 600 * properly aligned. 601 */ 602 adapter->flags |= EM_FLAG_TSO_PULLEX; 603 /* FALL THROUGH */ 604 605 default: 606 if (adapter->flags & EM_FLAG_GEN2) 607 adapter->flags |= EM_FLAG_TSO; 608 break; 609 } 610 611 /* Do Shared Code initialization */ 612 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 613 device_printf(dev, "Setup of Shared code failed\n"); 614 error = ENXIO; 615 goto fail; 616 } 617 618 e1000_get_bus_info(&adapter->hw); 619 620 /* 621 * Validate number of transmit and receive descriptors. It 622 * must not exceed hardware maximum, and must be multiple 623 * of E1000_DBA_ALIGN. 624 */ 625 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 || 626 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || 627 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || 628 em_txd < EM_MIN_TXD) { 629 if (adapter->hw.mac.type < e1000_82544) 630 adapter->num_tx_desc = EM_MAX_TXD_82543; 631 else 632 adapter->num_tx_desc = EM_DEFAULT_TXD; 633 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 634 adapter->num_tx_desc, em_txd); 635 } else { 636 adapter->num_tx_desc = em_txd; 637 } 638 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 || 639 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || 640 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || 641 em_rxd < EM_MIN_RXD) { 642 if (adapter->hw.mac.type < e1000_82544) 643 adapter->num_rx_desc = EM_MAX_RXD_82543; 644 else 645 adapter->num_rx_desc = EM_DEFAULT_RXD; 646 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 647 adapter->num_rx_desc, em_rxd); 648 } else { 649 adapter->num_rx_desc = em_rxd; 650 } 651 652 adapter->hw.mac.autoneg = DO_AUTO_NEG; 653 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 654 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 655 adapter->rx_buffer_len = MCLBYTES; 656 657 /* 658 * Interrupt throttle rate 659 */ 660 if (em_int_throttle_ceil == 0) { 661 adapter->int_throttle_ceil = 0; 662 } else { 663 int throttle = em_int_throttle_ceil; 664 665 if (throttle < 0) 666 throttle = EM_DEFAULT_ITR; 667 668 /* Recalculate the tunable value to get the exact frequency. */ 669 throttle = 1000000000 / 256 / throttle; 670 671 /* Upper 16bits of ITR is reserved and should be zero */ 672 if (throttle & 0xffff0000) 673 throttle = 1000000000 / 256 / EM_DEFAULT_ITR; 674 675 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 676 } 677 678 e1000_init_script_state_82541(&adapter->hw, TRUE); 679 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 680 681 /* Copper options */ 682 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 683 adapter->hw.phy.mdix = AUTO_ALL_MODES; 684 adapter->hw.phy.disable_polarity_correction = FALSE; 685 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 686 } 687 688 /* Set the frame limits assuming standard ethernet sized frames. */ 689 adapter->hw.mac.max_frame_size = 690 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 691 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN; 692 693 /* This controls when hardware reports transmit completion status. */ 694 adapter->hw.mac.report_tx_early = 1; 695 696 /* 697 * Create top level busdma tag 698 */ 699 error = bus_dma_tag_create(NULL, 1, 0, 700 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 701 NULL, NULL, 702 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 703 0, &adapter->parent_dtag); 704 if (error) { 705 device_printf(dev, "could not create top level DMA tag\n"); 706 goto fail; 707 } 708 709 /* 710 * Allocate Transmit Descriptor ring 711 */ 712 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 713 EM_DBA_ALIGN); 714 error = em_dma_malloc(adapter, tsize, &adapter->txdma); 715 if (error) { 716 device_printf(dev, "Unable to allocate tx_desc memory\n"); 717 goto fail; 718 } 719 adapter->tx_desc_base = adapter->txdma.dma_vaddr; 720 721 /* 722 * Allocate Receive Descriptor ring 723 */ 724 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 725 EM_DBA_ALIGN); 726 error = em_dma_malloc(adapter, rsize, &adapter->rxdma); 727 if (error) { 728 device_printf(dev, "Unable to allocate rx_desc memory\n"); 729 goto fail; 730 } 731 adapter->rx_desc_base = adapter->rxdma.dma_vaddr; 732 733 /* Allocate multicast array memory. */ 734 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 735 M_DEVBUF, M_WAITOK); 736 737 /* Indicate SOL/IDER usage */ 738 if (e1000_check_reset_block(&adapter->hw)) { 739 device_printf(dev, 740 "PHY reset is blocked due to SOL/IDER session.\n"); 741 } 742 743 /* Disable EEE */ 744 adapter->hw.dev_spec.ich8lan.eee_disable = 1; 745 746 /* 747 * Start from a known state, this is important in reading the 748 * nvm and mac from that. 749 */ 750 e1000_reset_hw(&adapter->hw); 751 752 /* Make sure we have a good EEPROM before we read from it */ 753 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 754 /* 755 * Some PCI-E parts fail the first check due to 756 * the link being in sleep state, call it again, 757 * if it fails a second time its a real issue. 758 */ 759 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 760 device_printf(dev, 761 "The EEPROM Checksum Is Not Valid\n"); 762 error = EIO; 763 goto fail; 764 } 765 } 766 767 /* Copy the permanent MAC address out of the EEPROM */ 768 if (e1000_read_mac_addr(&adapter->hw) < 0) { 769 device_printf(dev, "EEPROM read error while reading MAC" 770 " address\n"); 771 error = EIO; 772 goto fail; 773 } 774 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) { 775 device_printf(dev, "Invalid MAC address\n"); 776 error = EIO; 777 goto fail; 778 } 779 780 /* Disable ULP support */ 781 e1000_disable_ulp_lpt_lp(&adapter->hw, TRUE); 782 783 /* Allocate transmit descriptors and buffers */ 784 error = em_create_tx_ring(adapter); 785 if (error) { 786 device_printf(dev, "Could not setup transmit structures\n"); 787 goto fail; 788 } 789 790 /* Allocate receive descriptors and buffers */ 791 error = em_create_rx_ring(adapter); 792 if (error) { 793 device_printf(dev, "Could not setup receive structures\n"); 794 goto fail; 795 } 796 797 /* Manually turn off all interrupts */ 798 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 799 800 /* Determine if we have to control management hardware */ 801 if (e1000_enable_mng_pass_thru(&adapter->hw)) 802 adapter->flags |= EM_FLAG_HAS_MGMT; 803 804 /* 805 * Setup Wake-on-Lan 806 */ 807 apme_mask = EM_EEPROM_APME; 808 eeprom_data = 0; 809 switch (adapter->hw.mac.type) { 810 case e1000_82542: 811 case e1000_82543: 812 break; 813 814 case e1000_82573: 815 case e1000_82583: 816 adapter->flags |= EM_FLAG_HAS_AMT; 817 /* FALL THROUGH */ 818 819 case e1000_82546: 820 case e1000_82546_rev_3: 821 case e1000_82571: 822 case e1000_82572: 823 case e1000_80003es2lan: 824 if (adapter->hw.bus.func == 1) { 825 e1000_read_nvm(&adapter->hw, 826 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 827 } else { 828 e1000_read_nvm(&adapter->hw, 829 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 830 } 831 break; 832 833 case e1000_ich8lan: 834 case e1000_ich9lan: 835 case e1000_ich10lan: 836 case e1000_pchlan: 837 case e1000_pch2lan: 838 apme_mask = E1000_WUC_APME; 839 adapter->flags |= EM_FLAG_HAS_AMT; 840 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); 841 break; 842 843 default: 844 e1000_read_nvm(&adapter->hw, 845 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 846 break; 847 } 848 if (eeprom_data & apme_mask) 849 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 850 851 /* 852 * We have the eeprom settings, now apply the special cases 853 * where the eeprom may be wrong or the board won't support 854 * wake on lan on a particular port 855 */ 856 device_id = pci_get_device(dev); 857 switch (device_id) { 858 case E1000_DEV_ID_82546GB_PCIE: 859 adapter->wol = 0; 860 break; 861 862 case E1000_DEV_ID_82546EB_FIBER: 863 case E1000_DEV_ID_82546GB_FIBER: 864 case E1000_DEV_ID_82571EB_FIBER: 865 /* 866 * Wake events only supported on port A for dual fiber 867 * regardless of eeprom setting 868 */ 869 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 870 E1000_STATUS_FUNC_1) 871 adapter->wol = 0; 872 break; 873 874 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 875 case E1000_DEV_ID_82571EB_QUAD_COPPER: 876 case E1000_DEV_ID_82571EB_QUAD_FIBER: 877 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 878 /* if quad port adapter, disable WoL on all but port A */ 879 if (em_global_quad_port_a != 0) 880 adapter->wol = 0; 881 /* Reset for multiple quad port adapters */ 882 if (++em_global_quad_port_a == 4) 883 em_global_quad_port_a = 0; 884 break; 885 } 886 887 /* XXX disable wol */ 888 adapter->wol = 0; 889 890 /* Setup flow control. */ 891 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 892 em_flowctrl); 893 adapter->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 894 if (adapter->hw.mac.type == e1000_pchlan) { 895 /* Only PAUSE reception is supported on PCH */ 896 adapter->ifm_flowctrl &= ~IFM_ETH_TXPAUSE; 897 } 898 899 /* Setup OS specific network interface */ 900 em_setup_ifp(adapter); 901 902 /* Add sysctl tree, must after em_setup_ifp() */ 903 em_add_sysctl(adapter); 904 905 #ifdef IFPOLL_ENABLE 906 /* Polling setup */ 907 ifpoll_compat_setup(&adapter->npoll, 908 device_get_sysctl_ctx(dev), device_get_sysctl_tree(dev), 909 device_get_unit(dev), ifp->if_serializer); 910 #endif 911 912 /* Reset the hardware */ 913 error = em_reset(adapter); 914 if (error) { 915 /* 916 * Some 82573 parts fail the first reset, call it again, 917 * if it fails a second time its a real issue. 918 */ 919 error = em_reset(adapter); 920 if (error) { 921 device_printf(dev, "Unable to reset the hardware\n"); 922 ether_ifdetach(ifp); 923 goto fail; 924 } 925 } 926 927 /* Initialize statistics */ 928 em_update_stats(adapter); 929 930 adapter->hw.mac.get_link_status = 1; 931 em_update_link_status(adapter); 932 933 /* Do we need workaround for 82544 PCI-X adapter? */ 934 if (adapter->hw.bus.type == e1000_bus_type_pcix && 935 adapter->hw.mac.type == e1000_82544) 936 adapter->pcix_82544 = TRUE; 937 else 938 adapter->pcix_82544 = FALSE; 939 940 if (adapter->pcix_82544) { 941 /* 942 * 82544 on PCI-X may split one TX segment 943 * into two TX descs, so we double its number 944 * of spare TX desc here. 945 */ 946 adapter->spare_tx_desc = 2 * EM_TX_SPARE; 947 } else { 948 adapter->spare_tx_desc = EM_TX_SPARE; 949 } 950 if (adapter->flags & EM_FLAG_TSO) 951 adapter->spare_tx_desc = EM_TX_SPARE_TSO; 952 adapter->tx_wreg_nsegs = EM_DEFAULT_TXWREG; 953 954 /* 955 * Keep following relationship between spare_tx_desc, oact_tx_desc 956 * and tx_int_nsegs: 957 * (spare_tx_desc + EM_TX_RESERVED) <= 958 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs 959 */ 960 adapter->oact_tx_desc = adapter->num_tx_desc / 8; 961 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX) 962 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX; 963 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED) 964 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED; 965 966 adapter->tx_int_nsegs = adapter->num_tx_desc / 16; 967 if (adapter->tx_int_nsegs < adapter->oact_tx_desc) 968 adapter->tx_int_nsegs = adapter->oact_tx_desc; 969 970 /* Non-AMT based hardware can now take control from firmware */ 971 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 972 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571) 973 em_get_hw_control(adapter); 974 975 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 976 977 /* 978 * Missing Interrupt Following ICR read: 979 * 980 * 82571/82572 specification update errata #76 981 * 82573 specification update errata #31 982 * 82574 specification update errata #12 983 * 82583 specification update errata #4 984 */ 985 intr_func = em_intr; 986 if ((adapter->flags & EM_FLAG_SHARED_INTR) && 987 (adapter->hw.mac.type == e1000_82571 || 988 adapter->hw.mac.type == e1000_82572 || 989 adapter->hw.mac.type == e1000_82573 || 990 adapter->hw.mac.type == e1000_82574 || 991 adapter->hw.mac.type == e1000_82583)) 992 intr_func = em_intr_mask; 993 994 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE, 995 intr_func, adapter, &adapter->intr_tag, 996 ifp->if_serializer); 997 if (error) { 998 device_printf(dev, "Failed to register interrupt handler"); 999 ether_ifdetach(ifp); 1000 goto fail; 1001 } 1002 return (0); 1003 fail: 1004 em_detach(dev); 1005 return (error); 1006 } 1007 1008 static int 1009 em_detach(device_t dev) 1010 { 1011 struct adapter *adapter = device_get_softc(dev); 1012 1013 if (device_is_attached(dev)) { 1014 struct ifnet *ifp = &adapter->arpcom.ac_if; 1015 1016 lwkt_serialize_enter(ifp->if_serializer); 1017 1018 em_stop(adapter); 1019 1020 e1000_phy_hw_reset(&adapter->hw); 1021 1022 em_rel_mgmt(adapter); 1023 em_rel_hw_control(adapter); 1024 1025 if (adapter->wol) { 1026 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 1027 E1000_WUC_PME_EN); 1028 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 1029 em_enable_wol(dev); 1030 } 1031 1032 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag); 1033 1034 lwkt_serialize_exit(ifp->if_serializer); 1035 1036 ether_ifdetach(ifp); 1037 } else if (adapter->memory != NULL) { 1038 em_rel_hw_control(adapter); 1039 } 1040 1041 ifmedia_removeall(&adapter->media); 1042 bus_generic_detach(dev); 1043 1044 em_free_pci_res(adapter); 1045 1046 em_destroy_tx_ring(adapter, adapter->num_tx_desc); 1047 em_destroy_rx_ring(adapter, adapter->num_rx_desc); 1048 1049 /* Free Transmit Descriptor ring */ 1050 if (adapter->tx_desc_base) 1051 em_dma_free(adapter, &adapter->txdma); 1052 1053 /* Free Receive Descriptor ring */ 1054 if (adapter->rx_desc_base) 1055 em_dma_free(adapter, &adapter->rxdma); 1056 1057 /* Free top level busdma tag */ 1058 if (adapter->parent_dtag != NULL) 1059 bus_dma_tag_destroy(adapter->parent_dtag); 1060 1061 if (adapter->mta != NULL) 1062 kfree(adapter->mta, M_DEVBUF); 1063 1064 return (0); 1065 } 1066 1067 static int 1068 em_shutdown(device_t dev) 1069 { 1070 return em_suspend(dev); 1071 } 1072 1073 static int 1074 em_suspend(device_t dev) 1075 { 1076 struct adapter *adapter = device_get_softc(dev); 1077 struct ifnet *ifp = &adapter->arpcom.ac_if; 1078 1079 lwkt_serialize_enter(ifp->if_serializer); 1080 1081 em_stop(adapter); 1082 1083 em_rel_mgmt(adapter); 1084 em_rel_hw_control(adapter); 1085 1086 if (adapter->wol) { 1087 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 1088 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 1089 em_enable_wol(dev); 1090 } 1091 1092 lwkt_serialize_exit(ifp->if_serializer); 1093 1094 return bus_generic_suspend(dev); 1095 } 1096 1097 static int 1098 em_resume(device_t dev) 1099 { 1100 struct adapter *adapter = device_get_softc(dev); 1101 struct ifnet *ifp = &adapter->arpcom.ac_if; 1102 1103 lwkt_serialize_enter(ifp->if_serializer); 1104 1105 if (adapter->hw.mac.type == e1000_pch2lan) 1106 e1000_resume_workarounds_pchlan(&adapter->hw); 1107 1108 em_init(adapter); 1109 em_get_mgmt(adapter); 1110 if_devstart(ifp); 1111 1112 lwkt_serialize_exit(ifp->if_serializer); 1113 1114 return bus_generic_resume(dev); 1115 } 1116 1117 static void 1118 em_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1119 { 1120 struct adapter *adapter = ifp->if_softc; 1121 struct mbuf *m_head; 1122 int idx = -1, nsegs = 0; 1123 1124 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1125 ASSERT_SERIALIZED(ifp->if_serializer); 1126 1127 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1128 return; 1129 1130 if (!adapter->link_active) { 1131 ifq_purge(&ifp->if_snd); 1132 return; 1133 } 1134 1135 while (!ifq_is_empty(&ifp->if_snd)) { 1136 /* Now do we at least have a minimal? */ 1137 if (EM_IS_OACTIVE(adapter)) { 1138 em_tx_collect(adapter, FALSE); 1139 if (EM_IS_OACTIVE(adapter)) { 1140 ifq_set_oactive(&ifp->if_snd); 1141 adapter->no_tx_desc_avail1++; 1142 break; 1143 } 1144 } 1145 1146 logif(pkt_txqueue); 1147 m_head = ifq_dequeue(&ifp->if_snd); 1148 if (m_head == NULL) 1149 break; 1150 1151 if (em_encap(adapter, &m_head, &nsegs, &idx)) { 1152 IFNET_STAT_INC(ifp, oerrors, 1); 1153 em_tx_collect(adapter, FALSE); 1154 continue; 1155 } 1156 1157 /* 1158 * TX interrupt are aggressively aggregated, so increasing 1159 * opackets at TX interrupt time will make the opackets 1160 * statistics vastly inaccurate; we do the opackets increment 1161 * now. 1162 */ 1163 IFNET_STAT_INC(ifp, opackets, 1); 1164 1165 if (nsegs >= adapter->tx_wreg_nsegs && idx >= 0) { 1166 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1167 nsegs = 0; 1168 idx = -1; 1169 } 1170 1171 /* Send a copy of the frame to the BPF listener */ 1172 ETHER_BPF_MTAP(ifp, m_head); 1173 1174 /* Set timeout in case hardware has problems transmitting. */ 1175 ifp->if_timer = EM_TX_TIMEOUT; 1176 } 1177 if (idx >= 0) 1178 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1179 adapter->tx_running = EM_TX_RUNNING; 1180 } 1181 1182 static int 1183 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1184 { 1185 struct adapter *adapter = ifp->if_softc; 1186 struct ifreq *ifr = (struct ifreq *)data; 1187 uint16_t eeprom_data = 0; 1188 int max_frame_size, mask, reinit; 1189 int error = 0; 1190 1191 ASSERT_SERIALIZED(ifp->if_serializer); 1192 1193 switch (command) { 1194 case SIOCSIFMTU: 1195 switch (adapter->hw.mac.type) { 1196 case e1000_82573: 1197 /* 1198 * 82573 only supports jumbo frames 1199 * if ASPM is disabled. 1200 */ 1201 e1000_read_nvm(&adapter->hw, 1202 NVM_INIT_3GIO_3, 1, &eeprom_data); 1203 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1204 max_frame_size = ETHER_MAX_LEN; 1205 break; 1206 } 1207 /* FALL THROUGH */ 1208 1209 /* Limit Jumbo Frame size */ 1210 case e1000_82571: 1211 case e1000_82572: 1212 case e1000_ich9lan: 1213 case e1000_ich10lan: 1214 case e1000_pch2lan: 1215 case e1000_pch_lpt: 1216 case e1000_pch_spt: 1217 case e1000_82574: 1218 case e1000_82583: 1219 case e1000_80003es2lan: 1220 max_frame_size = 9234; 1221 break; 1222 1223 case e1000_pchlan: 1224 max_frame_size = 4096; 1225 break; 1226 1227 /* Adapters that do not support jumbo frames */ 1228 case e1000_82542: 1229 case e1000_ich8lan: 1230 max_frame_size = ETHER_MAX_LEN; 1231 break; 1232 1233 default: 1234 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1235 break; 1236 } 1237 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1238 ETHER_CRC_LEN) { 1239 error = EINVAL; 1240 break; 1241 } 1242 1243 ifp->if_mtu = ifr->ifr_mtu; 1244 adapter->hw.mac.max_frame_size = 1245 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1246 1247 if (ifp->if_flags & IFF_RUNNING) 1248 em_init(adapter); 1249 break; 1250 1251 case SIOCSIFFLAGS: 1252 if (ifp->if_flags & IFF_UP) { 1253 if ((ifp->if_flags & IFF_RUNNING)) { 1254 if ((ifp->if_flags ^ adapter->if_flags) & 1255 (IFF_PROMISC | IFF_ALLMULTI)) { 1256 em_disable_promisc(adapter); 1257 em_set_promisc(adapter); 1258 } 1259 } else { 1260 em_init(adapter); 1261 } 1262 } else if (ifp->if_flags & IFF_RUNNING) { 1263 em_stop(adapter); 1264 } 1265 adapter->if_flags = ifp->if_flags; 1266 break; 1267 1268 case SIOCADDMULTI: 1269 case SIOCDELMULTI: 1270 if (ifp->if_flags & IFF_RUNNING) { 1271 em_disable_intr(adapter); 1272 em_set_multi(adapter); 1273 if (adapter->hw.mac.type == e1000_82542 && 1274 adapter->hw.revision_id == E1000_REVISION_2) 1275 em_init_rx_unit(adapter); 1276 #ifdef IFPOLL_ENABLE 1277 if (!(ifp->if_flags & IFF_NPOLLING)) 1278 #endif 1279 em_enable_intr(adapter); 1280 } 1281 break; 1282 1283 case SIOCSIFMEDIA: 1284 /* Check SOL/IDER usage */ 1285 if (e1000_check_reset_block(&adapter->hw)) { 1286 device_printf(adapter->dev, "Media change is" 1287 " blocked due to SOL/IDER session.\n"); 1288 break; 1289 } 1290 /* FALL THROUGH */ 1291 1292 case SIOCGIFMEDIA: 1293 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1294 break; 1295 1296 case SIOCSIFCAP: 1297 reinit = 0; 1298 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1299 if (mask & IFCAP_RXCSUM) { 1300 ifp->if_capenable ^= IFCAP_RXCSUM; 1301 reinit = 1; 1302 } 1303 if (mask & IFCAP_TXCSUM) { 1304 ifp->if_capenable ^= IFCAP_TXCSUM; 1305 if (ifp->if_capenable & IFCAP_TXCSUM) 1306 ifp->if_hwassist |= EM_CSUM_FEATURES; 1307 else 1308 ifp->if_hwassist &= ~EM_CSUM_FEATURES; 1309 } 1310 if (mask & IFCAP_TSO) { 1311 ifp->if_capenable ^= IFCAP_TSO; 1312 if (ifp->if_capenable & IFCAP_TSO) 1313 ifp->if_hwassist |= CSUM_TSO; 1314 else 1315 ifp->if_hwassist &= ~CSUM_TSO; 1316 } 1317 if (mask & IFCAP_VLAN_HWTAGGING) { 1318 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1319 reinit = 1; 1320 } 1321 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1322 em_init(adapter); 1323 break; 1324 1325 default: 1326 error = ether_ioctl(ifp, command, data); 1327 break; 1328 } 1329 return (error); 1330 } 1331 1332 static void 1333 em_watchdog(struct ifnet *ifp) 1334 { 1335 struct adapter *adapter = ifp->if_softc; 1336 1337 ASSERT_SERIALIZED(ifp->if_serializer); 1338 1339 /* 1340 * The timer is set to 5 every time start queues a packet. 1341 * Then txeof keeps resetting it as long as it cleans at 1342 * least one descriptor. 1343 * Finally, anytime all descriptors are clean the timer is 1344 * set to 0. 1345 */ 1346 1347 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1348 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) { 1349 /* 1350 * If we reach here, all TX jobs are completed and 1351 * the TX engine should have been idled for some time. 1352 * We don't need to call if_devstart() here. 1353 */ 1354 ifq_clr_oactive(&ifp->if_snd); 1355 ifp->if_timer = 0; 1356 return; 1357 } 1358 1359 /* 1360 * If we are in this routine because of pause frames, then 1361 * don't reset the hardware. 1362 */ 1363 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 1364 E1000_STATUS_TXOFF) { 1365 ifp->if_timer = EM_TX_TIMEOUT; 1366 return; 1367 } 1368 1369 if (e1000_check_for_link(&adapter->hw) == 0) 1370 if_printf(ifp, "watchdog timeout -- resetting\n"); 1371 1372 IFNET_STAT_INC(ifp, oerrors, 1); 1373 adapter->watchdog_events++; 1374 1375 em_init(adapter); 1376 1377 if (!ifq_is_empty(&ifp->if_snd)) 1378 if_devstart(ifp); 1379 } 1380 1381 static void 1382 em_init(void *xsc) 1383 { 1384 struct adapter *adapter = xsc; 1385 struct ifnet *ifp = &adapter->arpcom.ac_if; 1386 device_t dev = adapter->dev; 1387 1388 ASSERT_SERIALIZED(ifp->if_serializer); 1389 1390 em_stop(adapter); 1391 1392 /* Get the latest mac address, User can use a LAA */ 1393 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); 1394 1395 /* Put the address into the Receive Address Array */ 1396 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1397 1398 /* 1399 * With the 82571 adapter, RAR[0] may be overwritten 1400 * when the other port is reset, we make a duplicate 1401 * in RAR[14] for that eventuality, this assures 1402 * the interface continues to function. 1403 */ 1404 if (adapter->hw.mac.type == e1000_82571) { 1405 e1000_set_laa_state_82571(&adapter->hw, TRUE); 1406 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 1407 E1000_RAR_ENTRIES - 1); 1408 } 1409 1410 /* Reset the hardware */ 1411 if (em_reset(adapter)) { 1412 device_printf(dev, "Unable to reset the hardware\n"); 1413 /* XXX em_stop()? */ 1414 return; 1415 } 1416 em_update_link_status(adapter); 1417 1418 /* Setup VLAN support, basic and offload if available */ 1419 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1420 1421 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1422 uint32_t ctrl; 1423 1424 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1425 ctrl |= E1000_CTRL_VME; 1426 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1427 } 1428 1429 /* Configure for OS presence */ 1430 em_get_mgmt(adapter); 1431 1432 /* Prepare transmit descriptors and buffers */ 1433 em_init_tx_ring(adapter); 1434 em_init_tx_unit(adapter); 1435 1436 /* Setup Multicast table */ 1437 em_set_multi(adapter); 1438 1439 /* Prepare receive descriptors and buffers */ 1440 if (em_init_rx_ring(adapter)) { 1441 device_printf(dev, "Could not setup receive structures\n"); 1442 em_stop(adapter); 1443 return; 1444 } 1445 em_init_rx_unit(adapter); 1446 1447 /* Don't lose promiscuous settings */ 1448 em_set_promisc(adapter); 1449 1450 /* Reset hardware counters */ 1451 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1452 1453 /* MSI/X configuration for 82574 */ 1454 if (adapter->hw.mac.type == e1000_82574) { 1455 int tmp; 1456 1457 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 1458 tmp |= E1000_CTRL_EXT_PBA_CLR; 1459 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); 1460 /* 1461 * XXX MSIX 1462 * Set the IVAR - interrupt vector routing. 1463 * Each nibble represents a vector, high bit 1464 * is enable, other 3 bits are the MSIX table 1465 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1466 * Link (other) to 2, hence the magic number. 1467 */ 1468 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); 1469 } 1470 1471 #ifdef IFPOLL_ENABLE 1472 /* 1473 * Only enable interrupts if we are not polling, make sure 1474 * they are off otherwise. 1475 */ 1476 if (ifp->if_flags & IFF_NPOLLING) 1477 em_disable_intr(adapter); 1478 else 1479 #endif /* IFPOLL_ENABLE */ 1480 em_enable_intr(adapter); 1481 1482 /* AMT based hardware can now take control from firmware */ 1483 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 1484 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) && 1485 adapter->hw.mac.type >= e1000_82571) 1486 em_get_hw_control(adapter); 1487 1488 ifp->if_flags |= IFF_RUNNING; 1489 ifq_clr_oactive(&ifp->if_snd); 1490 1491 #ifdef IFPOLL_ENABLE 1492 if ((ifp->if_flags & IFF_NPOLLING) == 0) 1493 #endif 1494 { 1495 callout_reset_bycpu(&adapter->tx_gc_timer, 1, 1496 em_txgc_timer, adapter, 1497 rman_get_cpuid(adapter->intr_res)); 1498 } 1499 callout_reset(&adapter->timer, hz, em_timer, adapter); 1500 } 1501 1502 #ifdef IFPOLL_ENABLE 1503 1504 static void 1505 em_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1506 { 1507 struct adapter *adapter = ifp->if_softc; 1508 1509 ASSERT_SERIALIZED(ifp->if_serializer); 1510 1511 if (adapter->npoll.ifpc_stcount-- == 0) { 1512 uint32_t reg_icr; 1513 1514 adapter->npoll.ifpc_stcount = adapter->npoll.ifpc_stfrac; 1515 1516 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1517 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1518 callout_stop(&adapter->timer); 1519 adapter->hw.mac.get_link_status = 1; 1520 em_update_link_status(adapter); 1521 callout_reset(&adapter->timer, hz, em_timer, adapter); 1522 } 1523 } 1524 1525 em_rxeof(adapter, count); 1526 1527 em_tx_intr(adapter); 1528 em_try_txgc(adapter, 1); 1529 } 1530 1531 static void 1532 em_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1533 { 1534 struct adapter *adapter = ifp->if_softc; 1535 1536 ASSERT_SERIALIZED(ifp->if_serializer); 1537 1538 if (info != NULL) { 1539 int cpuid = adapter->npoll.ifpc_cpuid; 1540 1541 info->ifpi_rx[cpuid].poll_func = em_npoll_compat; 1542 info->ifpi_rx[cpuid].arg = NULL; 1543 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1544 1545 ifq_set_cpuid(&ifp->if_snd, cpuid); 1546 } else { 1547 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 1548 } 1549 if (ifp->if_flags & IFF_RUNNING) 1550 em_init(adapter); 1551 } 1552 1553 #endif /* IFPOLL_ENABLE */ 1554 1555 static void 1556 em_intr(void *xsc) 1557 { 1558 em_intr_body(xsc, TRUE); 1559 } 1560 1561 static void 1562 em_intr_body(struct adapter *adapter, boolean_t chk_asserted) 1563 { 1564 struct ifnet *ifp = &adapter->arpcom.ac_if; 1565 uint32_t reg_icr; 1566 1567 logif(intr_beg); 1568 ASSERT_SERIALIZED(ifp->if_serializer); 1569 1570 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1571 1572 if (chk_asserted && 1573 ((adapter->hw.mac.type >= e1000_82571 && 1574 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) || 1575 reg_icr == 0)) { 1576 logif(intr_end); 1577 return; 1578 } 1579 1580 /* 1581 * XXX: some laptops trigger several spurious interrupts 1582 * on em(4) when in the resume cycle. The ICR register 1583 * reports all-ones value in this case. Processing such 1584 * interrupts would lead to a freeze. I don't know why. 1585 */ 1586 if (reg_icr == 0xffffffff) { 1587 logif(intr_end); 1588 return; 1589 } 1590 1591 if (ifp->if_flags & IFF_RUNNING) { 1592 if (reg_icr & 1593 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) 1594 em_rxeof(adapter, -1); 1595 if (reg_icr & E1000_ICR_TXDW) 1596 em_tx_intr(adapter); 1597 } 1598 1599 /* Link status change */ 1600 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1601 callout_stop(&adapter->timer); 1602 adapter->hw.mac.get_link_status = 1; 1603 em_update_link_status(adapter); 1604 1605 /* Deal with TX cruft when link lost */ 1606 em_tx_purge(adapter); 1607 1608 callout_reset(&adapter->timer, hz, em_timer, adapter); 1609 } 1610 1611 if (reg_icr & E1000_ICR_RXO) 1612 adapter->rx_overruns++; 1613 1614 logif(intr_end); 1615 } 1616 1617 static void 1618 em_intr_mask(void *xsc) 1619 { 1620 struct adapter *adapter = xsc; 1621 1622 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 1623 /* 1624 * NOTE: 1625 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1626 * so don't check it. 1627 */ 1628 em_intr_body(adapter, FALSE); 1629 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); 1630 } 1631 1632 static void 1633 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1634 { 1635 struct adapter *adapter = ifp->if_softc; 1636 1637 ASSERT_SERIALIZED(ifp->if_serializer); 1638 1639 em_update_link_status(adapter); 1640 1641 ifmr->ifm_status = IFM_AVALID; 1642 ifmr->ifm_active = IFM_ETHER; 1643 1644 if (!adapter->link_active) { 1645 if (adapter->hw.mac.autoneg) 1646 ifmr->ifm_active |= IFM_NONE; 1647 else 1648 ifmr->ifm_active = adapter->media.ifm_media; 1649 return; 1650 } 1651 1652 ifmr->ifm_status |= IFM_ACTIVE; 1653 if (adapter->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1654 ifmr->ifm_active |= adapter->ifm_flowctrl; 1655 1656 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 1657 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 1658 u_char fiber_type = IFM_1000_SX; 1659 1660 if (adapter->hw.mac.type == e1000_82545) 1661 fiber_type = IFM_1000_LX; 1662 ifmr->ifm_active |= fiber_type | IFM_FDX; 1663 } else { 1664 switch (adapter->link_speed) { 1665 case 10: 1666 ifmr->ifm_active |= IFM_10_T; 1667 break; 1668 case 100: 1669 ifmr->ifm_active |= IFM_100_TX; 1670 break; 1671 1672 case 1000: 1673 ifmr->ifm_active |= IFM_1000_T; 1674 break; 1675 } 1676 if (adapter->link_duplex == FULL_DUPLEX) 1677 ifmr->ifm_active |= IFM_FDX; 1678 else 1679 ifmr->ifm_active |= IFM_HDX; 1680 } 1681 if (ifmr->ifm_active & IFM_FDX) { 1682 ifmr->ifm_active |= 1683 e1000_fc2ifmedia(adapter->hw.fc.current_mode); 1684 } 1685 } 1686 1687 static int 1688 em_media_change(struct ifnet *ifp) 1689 { 1690 struct adapter *adapter = ifp->if_softc; 1691 struct ifmedia *ifm = &adapter->media; 1692 1693 ASSERT_SERIALIZED(ifp->if_serializer); 1694 1695 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1696 return (EINVAL); 1697 1698 if (adapter->hw.mac.type == e1000_pchlan && 1699 (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)) { 1700 if (bootverbose) 1701 if_printf(ifp, "TX PAUSE is not supported on PCH\n"); 1702 return EINVAL; 1703 } 1704 1705 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1706 case IFM_AUTO: 1707 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1708 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1709 break; 1710 1711 case IFM_1000_LX: 1712 case IFM_1000_SX: 1713 case IFM_1000_T: 1714 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1715 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1716 break; 1717 1718 case IFM_100_TX: 1719 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1720 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1721 } else { 1722 if (IFM_OPTIONS(ifm->ifm_media) & 1723 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1724 if (bootverbose) { 1725 if_printf(ifp, "Flow control is not " 1726 "allowed for half-duplex\n"); 1727 } 1728 return EINVAL; 1729 } 1730 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1731 } 1732 adapter->hw.mac.autoneg = FALSE; 1733 adapter->hw.phy.autoneg_advertised = 0; 1734 break; 1735 1736 case IFM_10_T: 1737 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1738 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1739 } else { 1740 if (IFM_OPTIONS(ifm->ifm_media) & 1741 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1742 if (bootverbose) { 1743 if_printf(ifp, "Flow control is not " 1744 "allowed for half-duplex\n"); 1745 } 1746 return EINVAL; 1747 } 1748 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1749 } 1750 adapter->hw.mac.autoneg = FALSE; 1751 adapter->hw.phy.autoneg_advertised = 0; 1752 break; 1753 1754 default: 1755 if (bootverbose) { 1756 if_printf(ifp, "Unsupported media type %d\n", 1757 IFM_SUBTYPE(ifm->ifm_media)); 1758 } 1759 return EINVAL; 1760 } 1761 adapter->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1762 1763 if (ifp->if_flags & IFF_RUNNING) 1764 em_init(adapter); 1765 1766 return (0); 1767 } 1768 1769 static int 1770 em_encap(struct adapter *adapter, struct mbuf **m_headp, 1771 int *segs_used, int *idx) 1772 { 1773 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1774 bus_dmamap_t map; 1775 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1776 struct e1000_tx_desc *ctxd = NULL; 1777 struct mbuf *m_head = *m_headp; 1778 uint32_t txd_upper, txd_lower, txd_used, cmd = 0; 1779 int maxsegs, nsegs, i, j, first, last = 0, error; 1780 1781 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1782 error = em_tso_pullup(adapter, m_headp); 1783 if (error) 1784 return error; 1785 m_head = *m_headp; 1786 } 1787 1788 txd_upper = txd_lower = 0; 1789 txd_used = 0; 1790 1791 /* 1792 * Capture the first descriptor index, this descriptor 1793 * will have the index of the EOP which is the only one 1794 * that now gets a DONE bit writeback. 1795 */ 1796 first = adapter->next_avail_tx_desc; 1797 tx_buffer = &adapter->tx_buffer_area[first]; 1798 tx_buffer_mapped = tx_buffer; 1799 map = tx_buffer->map; 1800 1801 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED; 1802 KASSERT(maxsegs >= adapter->spare_tx_desc, 1803 ("not enough spare TX desc")); 1804 if (adapter->pcix_82544) { 1805 /* Half it; see the comment in em_attach() */ 1806 maxsegs >>= 1; 1807 } 1808 if (maxsegs > EM_MAX_SCATTER) 1809 maxsegs = EM_MAX_SCATTER; 1810 1811 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp, 1812 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1813 if (error) { 1814 if (error == ENOBUFS) 1815 adapter->mbuf_alloc_failed++; 1816 else 1817 adapter->no_tx_dma_setup++; 1818 1819 m_freem(*m_headp); 1820 *m_headp = NULL; 1821 return error; 1822 } 1823 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1824 1825 m_head = *m_headp; 1826 adapter->tx_nsegs += nsegs; 1827 *segs_used += nsegs; 1828 1829 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1830 /* TSO will consume one TX desc */ 1831 i = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower); 1832 adapter->tx_nsegs += i; 1833 *segs_used += i; 1834 } else if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) { 1835 /* TX csum offloading will consume one TX desc */ 1836 i = em_txcsum(adapter, m_head, &txd_upper, &txd_lower); 1837 adapter->tx_nsegs += i; 1838 *segs_used += i; 1839 } 1840 1841 /* Handle VLAN tag */ 1842 if (m_head->m_flags & M_VLANTAG) { 1843 /* Set the vlan id. */ 1844 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1845 /* Tell hardware to add tag */ 1846 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1847 } 1848 1849 i = adapter->next_avail_tx_desc; 1850 1851 /* Set up our transmit descriptors */ 1852 for (j = 0; j < nsegs; j++) { 1853 /* If adapter is 82544 and on PCIX bus */ 1854 if(adapter->pcix_82544) { 1855 DESC_ARRAY desc_array; 1856 uint32_t array_elements, counter; 1857 1858 /* 1859 * Check the Address and Length combination and 1860 * split the data accordingly 1861 */ 1862 array_elements = em_82544_fill_desc(segs[j].ds_addr, 1863 segs[j].ds_len, &desc_array); 1864 for (counter = 0; counter < array_elements; counter++) { 1865 KKASSERT(txd_used < adapter->num_tx_desc_avail); 1866 1867 tx_buffer = &adapter->tx_buffer_area[i]; 1868 ctxd = &adapter->tx_desc_base[i]; 1869 1870 ctxd->buffer_addr = htole64( 1871 desc_array.descriptor[counter].address); 1872 ctxd->lower.data = htole32( 1873 E1000_TXD_CMD_IFCS | txd_lower | 1874 desc_array.descriptor[counter].length); 1875 ctxd->upper.data = htole32(txd_upper); 1876 1877 last = i; 1878 if (++i == adapter->num_tx_desc) 1879 i = 0; 1880 1881 txd_used++; 1882 } 1883 } else { 1884 tx_buffer = &adapter->tx_buffer_area[i]; 1885 ctxd = &adapter->tx_desc_base[i]; 1886 1887 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1888 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1889 txd_lower | segs[j].ds_len); 1890 ctxd->upper.data = htole32(txd_upper); 1891 1892 last = i; 1893 if (++i == adapter->num_tx_desc) 1894 i = 0; 1895 } 1896 } 1897 1898 adapter->next_avail_tx_desc = i; 1899 if (adapter->pcix_82544) { 1900 KKASSERT(adapter->num_tx_desc_avail > txd_used); 1901 adapter->num_tx_desc_avail -= txd_used; 1902 } else { 1903 KKASSERT(adapter->num_tx_desc_avail > nsegs); 1904 adapter->num_tx_desc_avail -= nsegs; 1905 } 1906 adapter->tx_nmbuf++; 1907 1908 tx_buffer->m_head = m_head; 1909 tx_buffer_mapped->map = tx_buffer->map; 1910 tx_buffer->map = map; 1911 1912 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) { 1913 adapter->tx_nsegs = 0; 1914 1915 /* 1916 * Report Status (RS) is turned on 1917 * every tx_int_nsegs descriptors. 1918 */ 1919 cmd = E1000_TXD_CMD_RS; 1920 1921 /* 1922 * Keep track of the descriptor, which will 1923 * be written back by hardware. 1924 */ 1925 adapter->tx_dd[adapter->tx_dd_tail] = last; 1926 EM_INC_TXDD_IDX(adapter->tx_dd_tail); 1927 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head); 1928 } 1929 1930 /* 1931 * Last Descriptor of Packet needs End Of Packet (EOP) 1932 */ 1933 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1934 1935 if (adapter->hw.mac.type == e1000_82547) { 1936 /* 1937 * Advance the Transmit Descriptor Tail (TDT), this tells the 1938 * E1000 that this frame is available to transmit. 1939 */ 1940 if (adapter->link_duplex == HALF_DUPLEX) { 1941 em_82547_move_tail_serialized(adapter); 1942 } else { 1943 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1944 em_82547_update_fifo_head(adapter, 1945 m_head->m_pkthdr.len); 1946 } 1947 } else { 1948 /* 1949 * Defer TDT updating, until enough descriptors are setup 1950 */ 1951 *idx = i; 1952 } 1953 return (0); 1954 } 1955 1956 /* 1957 * 82547 workaround to avoid controller hang in half-duplex environment. 1958 * The workaround is to avoid queuing a large packet that would span 1959 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1960 * in this case. We do that only when FIFO is quiescent. 1961 */ 1962 static void 1963 em_82547_move_tail_serialized(struct adapter *adapter) 1964 { 1965 struct e1000_tx_desc *tx_desc; 1966 uint16_t hw_tdt, sw_tdt, length = 0; 1967 bool eop = 0; 1968 1969 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer); 1970 1971 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1972 sw_tdt = adapter->next_avail_tx_desc; 1973 1974 while (hw_tdt != sw_tdt) { 1975 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1976 length += tx_desc->lower.flags.length; 1977 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1978 if (++hw_tdt == adapter->num_tx_desc) 1979 hw_tdt = 0; 1980 1981 if (eop) { 1982 if (em_82547_fifo_workaround(adapter, length)) { 1983 adapter->tx_fifo_wrk_cnt++; 1984 callout_reset(&adapter->tx_fifo_timer, 1, 1985 em_82547_move_tail, adapter); 1986 break; 1987 } 1988 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1989 em_82547_update_fifo_head(adapter, length); 1990 length = 0; 1991 } 1992 } 1993 } 1994 1995 static void 1996 em_82547_move_tail(void *xsc) 1997 { 1998 struct adapter *adapter = xsc; 1999 struct ifnet *ifp = &adapter->arpcom.ac_if; 2000 2001 lwkt_serialize_enter(ifp->if_serializer); 2002 em_82547_move_tail_serialized(adapter); 2003 lwkt_serialize_exit(ifp->if_serializer); 2004 } 2005 2006 static int 2007 em_82547_fifo_workaround(struct adapter *adapter, int len) 2008 { 2009 int fifo_space, fifo_pkt_len; 2010 2011 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 2012 2013 if (adapter->link_duplex == HALF_DUPLEX) { 2014 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 2015 2016 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 2017 if (em_82547_tx_fifo_reset(adapter)) 2018 return (0); 2019 else 2020 return (1); 2021 } 2022 } 2023 return (0); 2024 } 2025 2026 static void 2027 em_82547_update_fifo_head(struct adapter *adapter, int len) 2028 { 2029 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 2030 2031 /* tx_fifo_head is always 16 byte aligned */ 2032 adapter->tx_fifo_head += fifo_pkt_len; 2033 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 2034 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2035 } 2036 2037 static int 2038 em_82547_tx_fifo_reset(struct adapter *adapter) 2039 { 2040 uint32_t tctl; 2041 2042 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 2043 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 2044 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 2045 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 2046 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 2047 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 2048 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 2049 /* Disable TX unit */ 2050 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2051 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 2052 tctl & ~E1000_TCTL_EN); 2053 2054 /* Reset FIFO pointers */ 2055 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 2056 adapter->tx_head_addr); 2057 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 2058 adapter->tx_head_addr); 2059 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 2060 adapter->tx_head_addr); 2061 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 2062 adapter->tx_head_addr); 2063 2064 /* Re-enable TX unit */ 2065 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2066 E1000_WRITE_FLUSH(&adapter->hw); 2067 2068 adapter->tx_fifo_head = 0; 2069 adapter->tx_fifo_reset_cnt++; 2070 2071 return (TRUE); 2072 } else { 2073 return (FALSE); 2074 } 2075 } 2076 2077 static void 2078 em_set_promisc(struct adapter *adapter) 2079 { 2080 struct ifnet *ifp = &adapter->arpcom.ac_if; 2081 uint32_t reg_rctl; 2082 2083 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2084 2085 if (ifp->if_flags & IFF_PROMISC) { 2086 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2087 /* Turn this on if you want to see bad packets */ 2088 if (em_debug_sbp) 2089 reg_rctl |= E1000_RCTL_SBP; 2090 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2091 } else if (ifp->if_flags & IFF_ALLMULTI) { 2092 reg_rctl |= E1000_RCTL_MPE; 2093 reg_rctl &= ~E1000_RCTL_UPE; 2094 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2095 } 2096 } 2097 2098 static void 2099 em_disable_promisc(struct adapter *adapter) 2100 { 2101 struct ifnet *ifp = &adapter->arpcom.ac_if; 2102 uint32_t reg_rctl; 2103 int mcnt = 0; 2104 2105 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2106 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 2107 2108 if (ifp->if_flags & IFF_ALLMULTI) { 2109 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2110 } else { 2111 const struct ifmultiaddr *ifma; 2112 2113 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2114 if (ifma->ifma_addr->sa_family != AF_LINK) 2115 continue; 2116 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2117 break; 2118 mcnt++; 2119 } 2120 } 2121 /* Don't disable if in MAX groups */ 2122 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2123 reg_rctl &= ~E1000_RCTL_MPE; 2124 2125 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2126 } 2127 2128 static void 2129 em_set_multi(struct adapter *adapter) 2130 { 2131 struct ifnet *ifp = &adapter->arpcom.ac_if; 2132 struct ifmultiaddr *ifma; 2133 uint32_t reg_rctl = 0; 2134 uint8_t *mta; 2135 int mcnt = 0; 2136 2137 mta = adapter->mta; 2138 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 2139 2140 if (adapter->hw.mac.type == e1000_82542 && 2141 adapter->hw.revision_id == E1000_REVISION_2) { 2142 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2143 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2144 e1000_pci_clear_mwi(&adapter->hw); 2145 reg_rctl |= E1000_RCTL_RST; 2146 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2147 msec_delay(5); 2148 } 2149 2150 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2151 if (ifma->ifma_addr->sa_family != AF_LINK) 2152 continue; 2153 2154 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2155 break; 2156 2157 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2158 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 2159 mcnt++; 2160 } 2161 2162 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 2163 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2164 reg_rctl |= E1000_RCTL_MPE; 2165 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2166 } else { 2167 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 2168 } 2169 2170 if (adapter->hw.mac.type == e1000_82542 && 2171 adapter->hw.revision_id == E1000_REVISION_2) { 2172 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2173 reg_rctl &= ~E1000_RCTL_RST; 2174 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2175 msec_delay(5); 2176 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2177 e1000_pci_set_mwi(&adapter->hw); 2178 } 2179 } 2180 2181 /* 2182 * This routine checks for link status and updates statistics. 2183 */ 2184 static void 2185 em_timer(void *xsc) 2186 { 2187 struct adapter *adapter = xsc; 2188 struct ifnet *ifp = &adapter->arpcom.ac_if; 2189 2190 lwkt_serialize_enter(ifp->if_serializer); 2191 2192 em_update_link_status(adapter); 2193 em_update_stats(adapter); 2194 2195 /* Reset LAA into RAR[0] on 82571 */ 2196 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) 2197 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2198 2199 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 2200 em_print_hw_stats(adapter); 2201 2202 em_smartspeed(adapter); 2203 2204 callout_reset(&adapter->timer, hz, em_timer, adapter); 2205 2206 lwkt_serialize_exit(ifp->if_serializer); 2207 } 2208 2209 static void 2210 em_update_link_status(struct adapter *adapter) 2211 { 2212 struct e1000_hw *hw = &adapter->hw; 2213 struct ifnet *ifp = &adapter->arpcom.ac_if; 2214 device_t dev = adapter->dev; 2215 uint32_t link_check = 0; 2216 2217 /* Get the cached link value or read phy for real */ 2218 switch (hw->phy.media_type) { 2219 case e1000_media_type_copper: 2220 if (hw->mac.get_link_status) { 2221 if (hw->mac.type == e1000_pch_spt) 2222 msec_delay(50); 2223 /* Do the work to read phy */ 2224 e1000_check_for_link(hw); 2225 link_check = !hw->mac.get_link_status; 2226 if (link_check) /* ESB2 fix */ 2227 e1000_cfg_on_link_up(hw); 2228 } else { 2229 link_check = TRUE; 2230 } 2231 break; 2232 2233 case e1000_media_type_fiber: 2234 e1000_check_for_link(hw); 2235 link_check = 2236 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 2237 break; 2238 2239 case e1000_media_type_internal_serdes: 2240 e1000_check_for_link(hw); 2241 link_check = adapter->hw.mac.serdes_has_link; 2242 break; 2243 2244 case e1000_media_type_unknown: 2245 default: 2246 break; 2247 } 2248 2249 /* Now check for a transition */ 2250 if (link_check && adapter->link_active == 0) { 2251 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2252 &adapter->link_duplex); 2253 2254 /* 2255 * Check if we should enable/disable SPEED_MODE bit on 2256 * 82571/82572 2257 */ 2258 if (adapter->link_speed != SPEED_1000 && 2259 (hw->mac.type == e1000_82571 || 2260 hw->mac.type == e1000_82572)) { 2261 int tarc0; 2262 2263 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 2264 tarc0 &= ~TARC_SPEED_MODE_BIT; 2265 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 2266 } 2267 if (bootverbose) { 2268 char flowctrl[IFM_ETH_FC_STRLEN]; 2269 2270 e1000_fc2str(hw->fc.current_mode, flowctrl, 2271 sizeof(flowctrl)); 2272 device_printf(dev, "Link is up %d Mbps %s, " 2273 "Flow control: %s\n", 2274 adapter->link_speed, 2275 (adapter->link_duplex == FULL_DUPLEX) ? 2276 "Full Duplex" : "Half Duplex", 2277 flowctrl); 2278 } 2279 if (adapter->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 2280 e1000_force_flowctrl(hw, adapter->ifm_flowctrl); 2281 adapter->link_active = 1; 2282 adapter->smartspeed = 0; 2283 ifp->if_baudrate = adapter->link_speed * 1000000; 2284 ifp->if_link_state = LINK_STATE_UP; 2285 if_link_state_change(ifp); 2286 } else if (!link_check && adapter->link_active == 1) { 2287 ifp->if_baudrate = adapter->link_speed = 0; 2288 adapter->link_duplex = 0; 2289 if (bootverbose) 2290 device_printf(dev, "Link is Down\n"); 2291 adapter->link_active = 0; 2292 #if 0 2293 /* Link down, disable watchdog */ 2294 if->if_timer = 0; 2295 #endif 2296 ifp->if_link_state = LINK_STATE_DOWN; 2297 if_link_state_change(ifp); 2298 } 2299 } 2300 2301 static void 2302 em_stop(struct adapter *adapter) 2303 { 2304 struct ifnet *ifp = &adapter->arpcom.ac_if; 2305 int i; 2306 2307 ASSERT_SERIALIZED(ifp->if_serializer); 2308 2309 em_disable_intr(adapter); 2310 2311 callout_stop(&adapter->timer); 2312 callout_stop(&adapter->tx_fifo_timer); 2313 2314 ifp->if_flags &= ~IFF_RUNNING; 2315 ifq_clr_oactive(&ifp->if_snd); 2316 ifp->if_timer = 0; 2317 adapter->tx_running = 0; 2318 callout_stop(&adapter->tx_gc_timer); 2319 2320 /* I219 needs some special flushing to avoid hangs */ 2321 if (adapter->hw.mac.type == e1000_pch_spt) 2322 em_flush_txrx_ring(adapter); 2323 2324 e1000_reset_hw(&adapter->hw); 2325 if (adapter->hw.mac.type >= e1000_82544) 2326 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2327 2328 for (i = 0; i < adapter->num_tx_desc; i++) { 2329 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i]; 2330 2331 if (tx_buffer->m_head != NULL) 2332 em_free_txbuffer(adapter, tx_buffer); 2333 } 2334 2335 for (i = 0; i < adapter->num_rx_desc; i++) { 2336 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i]; 2337 2338 if (rx_buffer->m_head != NULL) { 2339 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2340 m_freem(rx_buffer->m_head); 2341 rx_buffer->m_head = NULL; 2342 } 2343 } 2344 2345 if (adapter->fmp != NULL) 2346 m_freem(adapter->fmp); 2347 adapter->fmp = NULL; 2348 adapter->lmp = NULL; 2349 2350 adapter->csum_flags = 0; 2351 adapter->csum_lhlen = 0; 2352 adapter->csum_iphlen = 0; 2353 adapter->csum_thlen = 0; 2354 adapter->csum_mss = 0; 2355 adapter->csum_pktlen = 0; 2356 2357 adapter->tx_dd_head = 0; 2358 adapter->tx_dd_tail = 0; 2359 adapter->tx_nsegs = 0; 2360 } 2361 2362 static int 2363 em_get_hw_info(struct adapter *adapter) 2364 { 2365 device_t dev = adapter->dev; 2366 2367 /* Save off the information about this board */ 2368 adapter->hw.vendor_id = pci_get_vendor(dev); 2369 adapter->hw.device_id = pci_get_device(dev); 2370 adapter->hw.revision_id = pci_get_revid(dev); 2371 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev); 2372 adapter->hw.subsystem_device_id = pci_get_subdevice(dev); 2373 2374 /* Do Shared Code Init and Setup */ 2375 if (e1000_set_mac_type(&adapter->hw)) 2376 return ENXIO; 2377 return 0; 2378 } 2379 2380 static int 2381 em_alloc_pci_res(struct adapter *adapter) 2382 { 2383 device_t dev = adapter->dev; 2384 u_int intr_flags; 2385 int val, rid, msi_enable; 2386 2387 /* Enable bus mastering */ 2388 pci_enable_busmaster(dev); 2389 2390 adapter->memory_rid = EM_BAR_MEM; 2391 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2392 &adapter->memory_rid, RF_ACTIVE); 2393 if (adapter->memory == NULL) { 2394 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2395 return (ENXIO); 2396 } 2397 adapter->osdep.mem_bus_space_tag = 2398 rman_get_bustag(adapter->memory); 2399 adapter->osdep.mem_bus_space_handle = 2400 rman_get_bushandle(adapter->memory); 2401 2402 /* XXX This is quite goofy, it is not actually used */ 2403 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2404 2405 /* Only older adapters use IO mapping */ 2406 if (adapter->hw.mac.type > e1000_82543 && 2407 adapter->hw.mac.type < e1000_82571) { 2408 /* Figure our where our IO BAR is ? */ 2409 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) { 2410 val = pci_read_config(dev, rid, 4); 2411 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2412 adapter->io_rid = rid; 2413 break; 2414 } 2415 rid += 4; 2416 /* check for 64bit BAR */ 2417 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2418 rid += 4; 2419 } 2420 if (rid >= PCIR_CARDBUSCIS) { 2421 device_printf(dev, "Unable to locate IO BAR\n"); 2422 return (ENXIO); 2423 } 2424 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2425 &adapter->io_rid, RF_ACTIVE); 2426 if (adapter->ioport == NULL) { 2427 device_printf(dev, "Unable to allocate bus resource: " 2428 "ioport\n"); 2429 return (ENXIO); 2430 } 2431 adapter->hw.io_base = 0; 2432 adapter->osdep.io_bus_space_tag = 2433 rman_get_bustag(adapter->ioport); 2434 adapter->osdep.io_bus_space_handle = 2435 rman_get_bushandle(adapter->ioport); 2436 } 2437 2438 /* 2439 * Don't enable MSI-X on 82574, see: 2440 * 82574 specification update errata #15 2441 * 2442 * Don't enable MSI on PCI/PCI-X chips, see: 2443 * 82540 specification update errata #6 2444 * 82545 specification update errata #4 2445 * 2446 * Don't enable MSI on 82571/82572, see: 2447 * 82571/82572 specification update errata #63 2448 */ 2449 msi_enable = em_msi_enable; 2450 if (msi_enable && 2451 ((adapter->flags & EM_FLAG_GEN2) == 0 || 2452 adapter->hw.mac.type == e1000_82571 || 2453 adapter->hw.mac.type == e1000_82572)) 2454 msi_enable = 0; 2455 again: 2456 adapter->intr_type = pci_alloc_1intr(dev, msi_enable, 2457 &adapter->intr_rid, &intr_flags); 2458 2459 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) { 2460 int unshared; 2461 2462 unshared = device_getenv_int(dev, "irq.unshared", 0); 2463 if (!unshared) { 2464 adapter->flags |= EM_FLAG_SHARED_INTR; 2465 if (bootverbose) 2466 device_printf(dev, "IRQ shared\n"); 2467 } else { 2468 intr_flags &= ~RF_SHAREABLE; 2469 if (bootverbose) 2470 device_printf(dev, "IRQ unshared\n"); 2471 } 2472 } 2473 2474 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2475 &adapter->intr_rid, intr_flags); 2476 if (adapter->intr_res == NULL) { 2477 device_printf(dev, "Unable to allocate bus resource: %s\n", 2478 adapter->intr_type == PCI_INTR_TYPE_MSI ? 2479 "MSI" : "legacy intr"); 2480 if (!msi_enable) { 2481 /* Retry with MSI. */ 2482 msi_enable = 1; 2483 adapter->flags &= ~EM_FLAG_SHARED_INTR; 2484 goto again; 2485 } 2486 return (ENXIO); 2487 } 2488 2489 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2490 adapter->hw.back = &adapter->osdep; 2491 return (0); 2492 } 2493 2494 static void 2495 em_free_pci_res(struct adapter *adapter) 2496 { 2497 device_t dev = adapter->dev; 2498 2499 if (adapter->intr_res != NULL) { 2500 bus_release_resource(dev, SYS_RES_IRQ, 2501 adapter->intr_rid, adapter->intr_res); 2502 } 2503 2504 if (adapter->intr_type == PCI_INTR_TYPE_MSI) 2505 pci_release_msi(dev); 2506 2507 if (adapter->memory != NULL) { 2508 bus_release_resource(dev, SYS_RES_MEMORY, 2509 adapter->memory_rid, adapter->memory); 2510 } 2511 2512 if (adapter->flash != NULL) { 2513 bus_release_resource(dev, SYS_RES_MEMORY, 2514 adapter->flash_rid, adapter->flash); 2515 } 2516 2517 if (adapter->ioport != NULL) { 2518 bus_release_resource(dev, SYS_RES_IOPORT, 2519 adapter->io_rid, adapter->ioport); 2520 } 2521 } 2522 2523 static int 2524 em_reset(struct adapter *adapter) 2525 { 2526 device_t dev = adapter->dev; 2527 uint16_t rx_buffer_size; 2528 uint32_t pba; 2529 2530 /* When hardware is reset, fifo_head is also reset */ 2531 adapter->tx_fifo_head = 0; 2532 2533 /* Set up smart power down as default off on newer adapters. */ 2534 if (!em_smart_pwr_down && 2535 (adapter->hw.mac.type == e1000_82571 || 2536 adapter->hw.mac.type == e1000_82572)) { 2537 uint16_t phy_tmp = 0; 2538 2539 /* Speed up time to link by disabling smart power down. */ 2540 e1000_read_phy_reg(&adapter->hw, 2541 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2542 phy_tmp &= ~IGP02E1000_PM_SPD; 2543 e1000_write_phy_reg(&adapter->hw, 2544 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2545 } 2546 2547 /* 2548 * Packet Buffer Allocation (PBA) 2549 * Writing PBA sets the receive portion of the buffer 2550 * the remainder is used for the transmit buffer. 2551 * 2552 * Devices before the 82547 had a Packet Buffer of 64K. 2553 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 2554 * After the 82547 the buffer was reduced to 40K. 2555 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 2556 * Note: default does not leave enough room for Jumbo Frame >10k. 2557 */ 2558 switch (adapter->hw.mac.type) { 2559 case e1000_82547: 2560 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 2561 if (adapter->hw.mac.max_frame_size > 8192) 2562 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 2563 else 2564 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 2565 adapter->tx_fifo_head = 0; 2566 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 2567 adapter->tx_fifo_size = 2568 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 2569 break; 2570 2571 /* Total Packet Buffer on these is 48K */ 2572 case e1000_82571: 2573 case e1000_82572: 2574 case e1000_80003es2lan: 2575 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2576 break; 2577 2578 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2579 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2580 break; 2581 2582 case e1000_82574: 2583 case e1000_82583: 2584 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2585 break; 2586 2587 case e1000_ich8lan: 2588 pba = E1000_PBA_8K; 2589 break; 2590 2591 case e1000_ich9lan: 2592 case e1000_ich10lan: 2593 #define E1000_PBA_10K 0x000A 2594 pba = E1000_PBA_10K; 2595 break; 2596 2597 case e1000_pchlan: 2598 case e1000_pch2lan: 2599 case e1000_pch_lpt: 2600 case e1000_pch_spt: 2601 pba = E1000_PBA_26K; 2602 break; 2603 2604 default: 2605 /* Devices before 82547 had a Packet Buffer of 64K. */ 2606 if (adapter->hw.mac.max_frame_size > 8192) 2607 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2608 else 2609 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2610 } 2611 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 2612 2613 /* 2614 * These parameters control the automatic generation (Tx) and 2615 * response (Rx) to Ethernet PAUSE frames. 2616 * - High water mark should allow for at least two frames to be 2617 * received after sending an XOFF. 2618 * - Low water mark works best when it is very near the high water mark. 2619 * This allows the receiver to restart by sending XON when it has 2620 * drained a bit. Here we use an arbitary value of 1500 which will 2621 * restart after one full frame is pulled from the buffer. There 2622 * could be several smaller frames in the buffer and if so they will 2623 * not trigger the XON until their total number reduces the buffer 2624 * by 1500. 2625 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2626 */ 2627 rx_buffer_size = 2628 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10; 2629 2630 adapter->hw.fc.high_water = rx_buffer_size - 2631 roundup2(adapter->hw.mac.max_frame_size, 1024); 2632 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2633 2634 if (adapter->hw.mac.type == e1000_80003es2lan) 2635 adapter->hw.fc.pause_time = 0xFFFF; 2636 else 2637 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2638 2639 adapter->hw.fc.send_xon = TRUE; 2640 2641 adapter->hw.fc.requested_mode = e1000_ifmedia2fc(adapter->ifm_flowctrl); 2642 2643 /* 2644 * Device specific overrides/settings 2645 */ 2646 switch (adapter->hw.mac.type) { 2647 case e1000_pchlan: 2648 KASSERT(adapter->hw.fc.requested_mode == e1000_fc_rx_pause || 2649 adapter->hw.fc.requested_mode == e1000_fc_none, 2650 ("unsupported flow control on PCH %d", 2651 adapter->hw.fc.requested_mode)); 2652 adapter->hw.fc.pause_time = 0xFFFF; /* override */ 2653 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2654 adapter->hw.fc.high_water = 0x3500; 2655 adapter->hw.fc.low_water = 0x1500; 2656 } else { 2657 adapter->hw.fc.high_water = 0x5000; 2658 adapter->hw.fc.low_water = 0x3000; 2659 } 2660 adapter->hw.fc.refresh_time = 0x1000; 2661 break; 2662 2663 case e1000_pch2lan: 2664 case e1000_pch_lpt: 2665 case e1000_pch_spt: 2666 adapter->hw.fc.high_water = 0x5C20; 2667 adapter->hw.fc.low_water = 0x5048; 2668 adapter->hw.fc.pause_time = 0x0650; 2669 adapter->hw.fc.refresh_time = 0x0400; 2670 /* Jumbos need adjusted PBA */ 2671 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) 2672 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12); 2673 else 2674 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26); 2675 break; 2676 2677 case e1000_ich9lan: 2678 case e1000_ich10lan: 2679 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2680 adapter->hw.fc.high_water = 0x2800; 2681 adapter->hw.fc.low_water = 2682 adapter->hw.fc.high_water - 8; 2683 break; 2684 } 2685 /* FALL THROUGH */ 2686 default: 2687 if (adapter->hw.mac.type == e1000_80003es2lan) 2688 adapter->hw.fc.pause_time = 0xFFFF; 2689 break; 2690 } 2691 2692 /* I219 needs some special flushing to avoid hangs */ 2693 if (adapter->hw.mac.type == e1000_pch_spt) 2694 em_flush_txrx_ring(adapter); 2695 2696 /* Issue a global reset */ 2697 e1000_reset_hw(&adapter->hw); 2698 if (adapter->hw.mac.type >= e1000_82544) 2699 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2700 em_disable_aspm(adapter); 2701 2702 if (e1000_init_hw(&adapter->hw) < 0) { 2703 device_printf(dev, "Hardware Initialization Failed\n"); 2704 return (EIO); 2705 } 2706 2707 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 2708 e1000_get_phy_info(&adapter->hw); 2709 e1000_check_for_link(&adapter->hw); 2710 2711 return (0); 2712 } 2713 2714 static void 2715 em_setup_ifp(struct adapter *adapter) 2716 { 2717 struct ifnet *ifp = &adapter->arpcom.ac_if; 2718 2719 if_initname(ifp, device_get_name(adapter->dev), 2720 device_get_unit(adapter->dev)); 2721 ifp->if_softc = adapter; 2722 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2723 ifp->if_init = em_init; 2724 ifp->if_ioctl = em_ioctl; 2725 ifp->if_start = em_start; 2726 #ifdef IFPOLL_ENABLE 2727 ifp->if_npoll = em_npoll; 2728 #endif 2729 ifp->if_watchdog = em_watchdog; 2730 ifp->if_nmbclusters = adapter->num_rx_desc; 2731 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1); 2732 ifq_set_ready(&ifp->if_snd); 2733 2734 ether_ifattach(ifp, adapter->hw.mac.addr, NULL); 2735 2736 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2737 if (adapter->hw.mac.type >= e1000_82543) 2738 ifp->if_capabilities |= IFCAP_HWCSUM; 2739 if (adapter->flags & EM_FLAG_TSO) 2740 ifp->if_capabilities |= IFCAP_TSO; 2741 ifp->if_capenable = ifp->if_capabilities; 2742 2743 if (ifp->if_capenable & IFCAP_TXCSUM) 2744 ifp->if_hwassist |= EM_CSUM_FEATURES; 2745 if (ifp->if_capenable & IFCAP_TSO) 2746 ifp->if_hwassist |= CSUM_TSO; 2747 2748 /* 2749 * Tell the upper layer(s) we support long frames. 2750 */ 2751 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2752 2753 /* 2754 * Specify the media types supported by this adapter and register 2755 * callbacks to update media and link information 2756 */ 2757 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2758 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 2759 u_char fiber_type = IFM_1000_SX; /* default type */ 2760 2761 if (adapter->hw.mac.type == e1000_82545) 2762 fiber_type = IFM_1000_LX; 2763 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2764 0, NULL); 2765 } else { 2766 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2767 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2768 0, NULL); 2769 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2770 0, NULL); 2771 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2772 0, NULL); 2773 if (adapter->hw.phy.type != e1000_phy_ife) { 2774 ifmedia_add(&adapter->media, 2775 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2776 } 2777 } 2778 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2779 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO | 2780 adapter->ifm_flowctrl); 2781 } 2782 2783 2784 /* 2785 * Workaround for SmartSpeed on 82541 and 82547 controllers 2786 */ 2787 static void 2788 em_smartspeed(struct adapter *adapter) 2789 { 2790 uint16_t phy_tmp; 2791 2792 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp || 2793 adapter->hw.mac.autoneg == 0 || 2794 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2795 return; 2796 2797 if (adapter->smartspeed == 0) { 2798 /* 2799 * If Master/Slave config fault is asserted twice, 2800 * we assume back-to-back 2801 */ 2802 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2803 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2804 return; 2805 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2806 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2807 e1000_read_phy_reg(&adapter->hw, 2808 PHY_1000T_CTRL, &phy_tmp); 2809 if (phy_tmp & CR_1000T_MS_ENABLE) { 2810 phy_tmp &= ~CR_1000T_MS_ENABLE; 2811 e1000_write_phy_reg(&adapter->hw, 2812 PHY_1000T_CTRL, phy_tmp); 2813 adapter->smartspeed++; 2814 if (adapter->hw.mac.autoneg && 2815 !e1000_phy_setup_autoneg(&adapter->hw) && 2816 !e1000_read_phy_reg(&adapter->hw, 2817 PHY_CONTROL, &phy_tmp)) { 2818 phy_tmp |= MII_CR_AUTO_NEG_EN | 2819 MII_CR_RESTART_AUTO_NEG; 2820 e1000_write_phy_reg(&adapter->hw, 2821 PHY_CONTROL, phy_tmp); 2822 } 2823 } 2824 } 2825 return; 2826 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2827 /* If still no link, perhaps using 2/3 pair cable */ 2828 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2829 phy_tmp |= CR_1000T_MS_ENABLE; 2830 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2831 if (adapter->hw.mac.autoneg && 2832 !e1000_phy_setup_autoneg(&adapter->hw) && 2833 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2834 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2835 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2836 } 2837 } 2838 2839 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2840 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2841 adapter->smartspeed = 0; 2842 } 2843 2844 static int 2845 em_dma_malloc(struct adapter *adapter, bus_size_t size, 2846 struct em_dma_alloc *dma) 2847 { 2848 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag, 2849 EM_DBA_ALIGN, size, BUS_DMA_WAITOK, 2850 &dma->dma_tag, &dma->dma_map, 2851 &dma->dma_paddr); 2852 if (dma->dma_vaddr == NULL) 2853 return ENOMEM; 2854 else 2855 return 0; 2856 } 2857 2858 static void 2859 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2860 { 2861 if (dma->dma_tag == NULL) 2862 return; 2863 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2864 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2865 bus_dma_tag_destroy(dma->dma_tag); 2866 } 2867 2868 static int 2869 em_create_tx_ring(struct adapter *adapter) 2870 { 2871 device_t dev = adapter->dev; 2872 struct em_buffer *tx_buffer; 2873 int error, i; 2874 2875 adapter->tx_buffer_area = 2876 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc, 2877 M_DEVBUF, M_WAITOK | M_ZERO); 2878 2879 /* 2880 * Create DMA tags for tx buffers 2881 */ 2882 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2883 1, 0, /* alignment, bounds */ 2884 BUS_SPACE_MAXADDR, /* lowaddr */ 2885 BUS_SPACE_MAXADDR, /* highaddr */ 2886 NULL, NULL, /* filter, filterarg */ 2887 EM_TSO_SIZE, /* maxsize */ 2888 EM_MAX_SCATTER, /* nsegments */ 2889 PAGE_SIZE, /* maxsegsize */ 2890 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2891 BUS_DMA_ONEBPAGE, /* flags */ 2892 &adapter->txtag); 2893 if (error) { 2894 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2895 kfree(adapter->tx_buffer_area, M_DEVBUF); 2896 adapter->tx_buffer_area = NULL; 2897 return error; 2898 } 2899 2900 /* 2901 * Create DMA maps for tx buffers 2902 */ 2903 for (i = 0; i < adapter->num_tx_desc; i++) { 2904 tx_buffer = &adapter->tx_buffer_area[i]; 2905 2906 error = bus_dmamap_create(adapter->txtag, 2907 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2908 &tx_buffer->map); 2909 if (error) { 2910 device_printf(dev, "Unable to create TX DMA map\n"); 2911 em_destroy_tx_ring(adapter, i); 2912 return error; 2913 } 2914 } 2915 return (0); 2916 } 2917 2918 static void 2919 em_init_tx_ring(struct adapter *adapter) 2920 { 2921 /* Clear the old ring contents */ 2922 bzero(adapter->tx_desc_base, 2923 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2924 2925 /* Reset state */ 2926 adapter->next_avail_tx_desc = 0; 2927 adapter->next_tx_to_clean = 0; 2928 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2929 adapter->tx_nmbuf = 0; 2930 adapter->tx_running = 0; 2931 } 2932 2933 static void 2934 em_init_tx_unit(struct adapter *adapter) 2935 { 2936 uint32_t tctl, tarc, tipg = 0; 2937 uint64_t bus_addr; 2938 2939 /* Setup the Base and Length of the Tx Descriptor Ring */ 2940 bus_addr = adapter->txdma.dma_paddr; 2941 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2942 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2943 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2944 (uint32_t)(bus_addr >> 32)); 2945 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2946 (uint32_t)bus_addr); 2947 /* Setup the HW Tx Head and Tail descriptor pointers */ 2948 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2949 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2950 if (adapter->flags & EM_FLAG_GEN2) { 2951 uint32_t txdctl = 0; 2952 2953 txdctl |= 0x1f; /* PTHRESH */ 2954 txdctl |= 1 << 8; /* HTHRESH */ 2955 txdctl |= 1 << 16; /* WTHRESH */ 2956 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2957 txdctl |= E1000_TXDCTL_GRAN; 2958 txdctl |= 1 << 25; /* LWTHRESH */ 2959 2960 E1000_WRITE_REG(&adapter->hw, E1000_TXDCTL(0), txdctl); 2961 } 2962 2963 /* Set the default values for the Tx Inter Packet Gap timer */ 2964 switch (adapter->hw.mac.type) { 2965 case e1000_82542: 2966 tipg = DEFAULT_82542_TIPG_IPGT; 2967 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2968 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2969 break; 2970 2971 case e1000_80003es2lan: 2972 tipg = DEFAULT_82543_TIPG_IPGR1; 2973 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2974 E1000_TIPG_IPGR2_SHIFT; 2975 break; 2976 2977 default: 2978 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2979 adapter->hw.phy.media_type == 2980 e1000_media_type_internal_serdes) 2981 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2982 else 2983 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2984 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2985 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2986 break; 2987 } 2988 2989 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2990 2991 /* NOTE: 0 is not allowed for TIDV */ 2992 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1); 2993 if(adapter->hw.mac.type >= e1000_82540) 2994 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0); 2995 2996 if (adapter->hw.mac.type == e1000_82571 || 2997 adapter->hw.mac.type == e1000_82572) { 2998 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2999 tarc |= TARC_SPEED_MODE_BIT; 3000 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 3001 } else if (adapter->hw.mac.type == e1000_80003es2lan) { 3002 /* errata: program both queues to unweighted RR */ 3003 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 3004 tarc |= 1; 3005 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 3006 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 3007 tarc |= 1; 3008 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 3009 } else if (adapter->hw.mac.type == e1000_82574) { 3010 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 3011 tarc |= TARC_ERRATA_BIT; 3012 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 3013 } 3014 3015 /* Program the Transmit Control Register */ 3016 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 3017 tctl &= ~E1000_TCTL_CT; 3018 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 3019 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 3020 3021 if (adapter->hw.mac.type >= e1000_82571) 3022 tctl |= E1000_TCTL_MULR; 3023 3024 /* This write will effectively turn on the transmit unit. */ 3025 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 3026 3027 if (adapter->hw.mac.type == e1000_82571 || 3028 adapter->hw.mac.type == e1000_82572 || 3029 adapter->hw.mac.type == e1000_80003es2lan) { 3030 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 3031 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 3032 tarc &= ~(1 << 28); 3033 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 3034 } else if (adapter->hw.mac.type == e1000_pch_spt) { 3035 uint32_t reg; 3036 3037 reg = E1000_READ_REG(&adapter->hw, E1000_IOSFPC); 3038 reg |= E1000_RCTL_RDMTS_HEX; 3039 E1000_WRITE_REG(&adapter->hw, E1000_IOSFPC, reg); 3040 reg = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 3041 reg |= E1000_TARC0_CB_MULTIQ_3_REQ; 3042 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), reg); 3043 } 3044 } 3045 3046 static void 3047 em_destroy_tx_ring(struct adapter *adapter, int ndesc) 3048 { 3049 struct em_buffer *tx_buffer; 3050 int i; 3051 3052 if (adapter->tx_buffer_area == NULL) 3053 return; 3054 3055 for (i = 0; i < ndesc; i++) { 3056 tx_buffer = &adapter->tx_buffer_area[i]; 3057 3058 KKASSERT(tx_buffer->m_head == NULL); 3059 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 3060 } 3061 bus_dma_tag_destroy(adapter->txtag); 3062 3063 kfree(adapter->tx_buffer_area, M_DEVBUF); 3064 adapter->tx_buffer_area = NULL; 3065 } 3066 3067 /* 3068 * The offload context needs to be set when we transfer the first 3069 * packet of a particular protocol (TCP/UDP). This routine has been 3070 * enhanced to deal with inserted VLAN headers. 3071 * 3072 * If the new packet's ether header length, ip header length and 3073 * csum offloading type are same as the previous packet, we should 3074 * avoid allocating a new csum context descriptor; mainly to take 3075 * advantage of the pipeline effect of the TX data read request. 3076 * 3077 * This function returns number of TX descrptors allocated for 3078 * csum context. 3079 */ 3080 static int 3081 em_txcsum(struct adapter *adapter, struct mbuf *mp, 3082 uint32_t *txd_upper, uint32_t *txd_lower) 3083 { 3084 struct e1000_context_desc *TXD; 3085 int curr_txd, ehdrlen, csum_flags; 3086 uint32_t cmd, hdr_len, ip_hlen; 3087 3088 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES; 3089 ip_hlen = mp->m_pkthdr.csum_iphlen; 3090 ehdrlen = mp->m_pkthdr.csum_lhlen; 3091 3092 if (adapter->csum_lhlen == ehdrlen && 3093 adapter->csum_iphlen == ip_hlen && 3094 adapter->csum_flags == csum_flags) { 3095 /* 3096 * Same csum offload context as the previous packets; 3097 * just return. 3098 */ 3099 *txd_upper = adapter->csum_txd_upper; 3100 *txd_lower = adapter->csum_txd_lower; 3101 return 0; 3102 } 3103 3104 /* 3105 * Setup a new csum offload context. 3106 */ 3107 3108 curr_txd = adapter->next_avail_tx_desc; 3109 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 3110 3111 cmd = 0; 3112 3113 /* Setup of IP header checksum. */ 3114 if (csum_flags & CSUM_IP) { 3115 /* 3116 * Start offset for header checksum calculation. 3117 * End offset for header checksum calculation. 3118 * Offset of place to put the checksum. 3119 */ 3120 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 3121 TXD->lower_setup.ip_fields.ipcse = 3122 htole16(ehdrlen + ip_hlen - 1); 3123 TXD->lower_setup.ip_fields.ipcso = 3124 ehdrlen + offsetof(struct ip, ip_sum); 3125 cmd |= E1000_TXD_CMD_IP; 3126 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 3127 } 3128 hdr_len = ehdrlen + ip_hlen; 3129 3130 if (csum_flags & CSUM_TCP) { 3131 /* 3132 * Start offset for payload checksum calculation. 3133 * End offset for payload checksum calculation. 3134 * Offset of place to put the checksum. 3135 */ 3136 TXD->upper_setup.tcp_fields.tucss = hdr_len; 3137 TXD->upper_setup.tcp_fields.tucse = htole16(0); 3138 TXD->upper_setup.tcp_fields.tucso = 3139 hdr_len + offsetof(struct tcphdr, th_sum); 3140 cmd |= E1000_TXD_CMD_TCP; 3141 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3142 } else if (csum_flags & CSUM_UDP) { 3143 /* 3144 * Start offset for header checksum calculation. 3145 * End offset for header checksum calculation. 3146 * Offset of place to put the checksum. 3147 */ 3148 TXD->upper_setup.tcp_fields.tucss = hdr_len; 3149 TXD->upper_setup.tcp_fields.tucse = htole16(0); 3150 TXD->upper_setup.tcp_fields.tucso = 3151 hdr_len + offsetof(struct udphdr, uh_sum); 3152 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3153 } 3154 3155 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 3156 E1000_TXD_DTYP_D; /* Data descr */ 3157 3158 /* Save the information for this csum offloading context */ 3159 adapter->csum_lhlen = ehdrlen; 3160 adapter->csum_iphlen = ip_hlen; 3161 adapter->csum_flags = csum_flags; 3162 adapter->csum_txd_upper = *txd_upper; 3163 adapter->csum_txd_lower = *txd_lower; 3164 3165 TXD->tcp_seg_setup.data = htole32(0); 3166 TXD->cmd_and_length = 3167 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 3168 3169 if (++curr_txd == adapter->num_tx_desc) 3170 curr_txd = 0; 3171 3172 KKASSERT(adapter->num_tx_desc_avail > 0); 3173 adapter->num_tx_desc_avail--; 3174 3175 adapter->next_avail_tx_desc = curr_txd; 3176 return 1; 3177 } 3178 3179 static void 3180 em_txeof(struct adapter *adapter) 3181 { 3182 struct ifnet *ifp = &adapter->arpcom.ac_if; 3183 struct em_buffer *tx_buffer; 3184 int first, num_avail; 3185 3186 if (adapter->tx_dd_head == adapter->tx_dd_tail) 3187 return; 3188 3189 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3190 return; 3191 3192 num_avail = adapter->num_tx_desc_avail; 3193 first = adapter->next_tx_to_clean; 3194 3195 while (adapter->tx_dd_head != adapter->tx_dd_tail) { 3196 struct e1000_tx_desc *tx_desc; 3197 int dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3198 3199 tx_desc = &adapter->tx_desc_base[dd_idx]; 3200 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 3201 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3202 3203 if (++dd_idx == adapter->num_tx_desc) 3204 dd_idx = 0; 3205 3206 while (first != dd_idx) { 3207 logif(pkt_txclean); 3208 3209 KKASSERT(num_avail < adapter->num_tx_desc); 3210 num_avail++; 3211 3212 tx_buffer = &adapter->tx_buffer_area[first]; 3213 if (tx_buffer->m_head != NULL) 3214 em_free_txbuffer(adapter, tx_buffer); 3215 3216 if (++first == adapter->num_tx_desc) 3217 first = 0; 3218 } 3219 } else { 3220 break; 3221 } 3222 } 3223 adapter->next_tx_to_clean = first; 3224 adapter->num_tx_desc_avail = num_avail; 3225 3226 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3227 adapter->tx_dd_head = 0; 3228 adapter->tx_dd_tail = 0; 3229 } 3230 3231 if (!EM_IS_OACTIVE(adapter)) { 3232 ifq_clr_oactive(&ifp->if_snd); 3233 3234 /* All clean, turn off the timer */ 3235 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3236 ifp->if_timer = 0; 3237 } 3238 adapter->tx_running = EM_TX_RUNNING; 3239 } 3240 3241 static void 3242 em_tx_collect(struct adapter *adapter, boolean_t gc) 3243 { 3244 struct ifnet *ifp = &adapter->arpcom.ac_if; 3245 struct em_buffer *tx_buffer; 3246 int tdh, first, num_avail, dd_idx = -1; 3247 3248 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3249 return; 3250 3251 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 3252 if (tdh == adapter->next_tx_to_clean) { 3253 if (gc && adapter->tx_nmbuf > 0) 3254 adapter->tx_running = EM_TX_RUNNING; 3255 return; 3256 } 3257 if (gc) 3258 adapter->tx_gc++; 3259 3260 if (adapter->tx_dd_head != adapter->tx_dd_tail) 3261 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3262 3263 num_avail = adapter->num_tx_desc_avail; 3264 first = adapter->next_tx_to_clean; 3265 3266 while (first != tdh) { 3267 logif(pkt_txclean); 3268 3269 KKASSERT(num_avail < adapter->num_tx_desc); 3270 num_avail++; 3271 3272 tx_buffer = &adapter->tx_buffer_area[first]; 3273 if (tx_buffer->m_head != NULL) 3274 em_free_txbuffer(adapter, tx_buffer); 3275 3276 if (first == dd_idx) { 3277 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3278 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3279 adapter->tx_dd_head = 0; 3280 adapter->tx_dd_tail = 0; 3281 dd_idx = -1; 3282 } else { 3283 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3284 } 3285 } 3286 3287 if (++first == adapter->num_tx_desc) 3288 first = 0; 3289 } 3290 adapter->next_tx_to_clean = first; 3291 adapter->num_tx_desc_avail = num_avail; 3292 3293 if (!EM_IS_OACTIVE(adapter)) { 3294 ifq_clr_oactive(&ifp->if_snd); 3295 3296 /* All clean, turn off the timer */ 3297 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3298 ifp->if_timer = 0; 3299 } 3300 if (!gc || adapter->tx_nmbuf > 0) 3301 adapter->tx_running = EM_TX_RUNNING; 3302 } 3303 3304 /* 3305 * When Link is lost sometimes there is work still in the TX ring 3306 * which will result in a watchdog, rather than allow that do an 3307 * attempted cleanup and then reinit here. Note that this has been 3308 * seens mostly with fiber adapters. 3309 */ 3310 static void 3311 em_tx_purge(struct adapter *adapter) 3312 { 3313 struct ifnet *ifp = &adapter->arpcom.ac_if; 3314 3315 if (!adapter->link_active && ifp->if_timer) { 3316 em_tx_collect(adapter, FALSE); 3317 if (ifp->if_timer) { 3318 if_printf(ifp, "Link lost, TX pending, reinit\n"); 3319 ifp->if_timer = 0; 3320 em_init(adapter); 3321 } 3322 } 3323 } 3324 3325 static int 3326 em_newbuf(struct adapter *adapter, int i, int init) 3327 { 3328 struct mbuf *m; 3329 bus_dma_segment_t seg; 3330 bus_dmamap_t map; 3331 struct em_buffer *rx_buffer; 3332 int error, nseg; 3333 3334 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3335 if (m == NULL) { 3336 adapter->mbuf_cluster_failed++; 3337 if (init) { 3338 if_printf(&adapter->arpcom.ac_if, 3339 "Unable to allocate RX mbuf\n"); 3340 } 3341 return (ENOBUFS); 3342 } 3343 m->m_len = m->m_pkthdr.len = MCLBYTES; 3344 3345 if (adapter->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 3346 m_adj(m, ETHER_ALIGN); 3347 3348 error = bus_dmamap_load_mbuf_segment(adapter->rxtag, 3349 adapter->rx_sparemap, m, 3350 &seg, 1, &nseg, BUS_DMA_NOWAIT); 3351 if (error) { 3352 m_freem(m); 3353 if (init) { 3354 if_printf(&adapter->arpcom.ac_if, 3355 "Unable to load RX mbuf\n"); 3356 } 3357 return (error); 3358 } 3359 3360 rx_buffer = &adapter->rx_buffer_area[i]; 3361 if (rx_buffer->m_head != NULL) 3362 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3363 3364 map = rx_buffer->map; 3365 rx_buffer->map = adapter->rx_sparemap; 3366 adapter->rx_sparemap = map; 3367 3368 rx_buffer->m_head = m; 3369 3370 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr); 3371 return (0); 3372 } 3373 3374 static int 3375 em_create_rx_ring(struct adapter *adapter) 3376 { 3377 device_t dev = adapter->dev; 3378 struct em_buffer *rx_buffer; 3379 int i, error; 3380 3381 adapter->rx_buffer_area = 3382 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 3383 M_DEVBUF, M_WAITOK | M_ZERO); 3384 3385 /* 3386 * Create DMA tag for rx buffers 3387 */ 3388 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 3389 1, 0, /* alignment, bounds */ 3390 BUS_SPACE_MAXADDR, /* lowaddr */ 3391 BUS_SPACE_MAXADDR, /* highaddr */ 3392 NULL, NULL, /* filter, filterarg */ 3393 MCLBYTES, /* maxsize */ 3394 1, /* nsegments */ 3395 MCLBYTES, /* maxsegsize */ 3396 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 3397 &adapter->rxtag); 3398 if (error) { 3399 device_printf(dev, "Unable to allocate RX DMA tag\n"); 3400 kfree(adapter->rx_buffer_area, M_DEVBUF); 3401 adapter->rx_buffer_area = NULL; 3402 return error; 3403 } 3404 3405 /* 3406 * Create spare DMA map for rx buffers 3407 */ 3408 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3409 &adapter->rx_sparemap); 3410 if (error) { 3411 device_printf(dev, "Unable to create spare RX DMA map\n"); 3412 bus_dma_tag_destroy(adapter->rxtag); 3413 kfree(adapter->rx_buffer_area, M_DEVBUF); 3414 adapter->rx_buffer_area = NULL; 3415 return error; 3416 } 3417 3418 /* 3419 * Create DMA maps for rx buffers 3420 */ 3421 for (i = 0; i < adapter->num_rx_desc; i++) { 3422 rx_buffer = &adapter->rx_buffer_area[i]; 3423 3424 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3425 &rx_buffer->map); 3426 if (error) { 3427 device_printf(dev, "Unable to create RX DMA map\n"); 3428 em_destroy_rx_ring(adapter, i); 3429 return error; 3430 } 3431 } 3432 return (0); 3433 } 3434 3435 static int 3436 em_init_rx_ring(struct adapter *adapter) 3437 { 3438 int i, error; 3439 3440 /* Reset descriptor ring */ 3441 bzero(adapter->rx_desc_base, 3442 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3443 3444 /* Allocate new ones. */ 3445 for (i = 0; i < adapter->num_rx_desc; i++) { 3446 error = em_newbuf(adapter, i, 1); 3447 if (error) 3448 return (error); 3449 } 3450 3451 /* Setup our descriptor pointers */ 3452 adapter->next_rx_desc_to_check = 0; 3453 3454 return (0); 3455 } 3456 3457 static void 3458 em_init_rx_unit(struct adapter *adapter) 3459 { 3460 struct ifnet *ifp = &adapter->arpcom.ac_if; 3461 uint64_t bus_addr; 3462 uint32_t rctl, rxcsum; 3463 3464 /* 3465 * Make sure receives are disabled while setting 3466 * up the descriptor ring 3467 */ 3468 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3469 /* Do not disable if ever enabled on this hardware */ 3470 if (adapter->hw.mac.type != e1000_82574 && 3471 adapter->hw.mac.type != e1000_82583) 3472 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3473 3474 if (adapter->hw.mac.type >= e1000_82540) { 3475 uint32_t itr; 3476 3477 /* 3478 * Set the interrupt throttling rate. Value is calculated 3479 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3480 */ 3481 if (adapter->int_throttle_ceil) 3482 itr = 1000000000 / 256 / adapter->int_throttle_ceil; 3483 else 3484 itr = 0; 3485 em_set_itr(adapter, itr); 3486 } 3487 3488 /* Disable accelerated ackknowledge */ 3489 if (adapter->hw.mac.type == e1000_82574) { 3490 uint32_t rfctl; 3491 3492 rfctl = E1000_READ_REG(&adapter->hw, E1000_RFCTL); 3493 rfctl |= E1000_RFCTL_ACK_DIS; 3494 E1000_WRITE_REG(&adapter->hw, E1000_RFCTL, rfctl); 3495 } 3496 3497 /* Receive Checksum Offload for IP and TCP/UDP */ 3498 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3499 if (ifp->if_capenable & IFCAP_RXCSUM) 3500 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3501 else 3502 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3503 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3504 3505 /* 3506 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3507 * long latencies are observed, like Lenovo X60. This 3508 * change eliminates the problem, but since having positive 3509 * values in RDTR is a known source of problems on other 3510 * platforms another solution is being sought. 3511 */ 3512 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) { 3513 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573); 3514 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573); 3515 } 3516 3517 /* 3518 * Setup the Base and Length of the Rx Descriptor Ring 3519 */ 3520 bus_addr = adapter->rxdma.dma_paddr; 3521 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3522 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3523 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3524 (uint32_t)(bus_addr >> 32)); 3525 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3526 (uint32_t)bus_addr); 3527 3528 /* 3529 * Setup the HW Rx Head and Tail Descriptor Pointers 3530 */ 3531 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3532 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); 3533 3534 /* Set PTHRESH for improved jumbo performance */ 3535 if (ifp->if_mtu > ETHERMTU) { 3536 uint32_t rxdctl; 3537 3538 if (adapter->hw.mac.type == e1000_ich9lan || 3539 adapter->hw.mac.type == e1000_pch2lan || 3540 adapter->hw.mac.type == e1000_ich10lan) { 3541 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0)); 3542 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), 3543 rxdctl | 3); 3544 } else if (adapter->hw.mac.type == e1000_82574) { 3545 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0)); 3546 rxdctl |= 0x20; /* PTHRESH */ 3547 rxdctl |= 4 << 8; /* HTHRESH */ 3548 rxdctl |= 4 << 16; /* WTHRESH */ 3549 rxdctl |= 1 << 24; /* Switch to granularity */ 3550 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl); 3551 } 3552 } 3553 3554 if (adapter->hw.mac.type >= e1000_pch2lan) { 3555 if (ifp->if_mtu > ETHERMTU) 3556 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE); 3557 else 3558 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE); 3559 } 3560 3561 /* Setup the Receive Control Register */ 3562 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3563 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3564 E1000_RCTL_RDMTS_HALF | 3565 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3566 3567 /* Make sure VLAN Filters are off */ 3568 rctl &= ~E1000_RCTL_VFE; 3569 3570 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3571 rctl |= E1000_RCTL_SBP; 3572 else 3573 rctl &= ~E1000_RCTL_SBP; 3574 3575 switch (adapter->rx_buffer_len) { 3576 default: 3577 case 2048: 3578 rctl |= E1000_RCTL_SZ_2048; 3579 break; 3580 3581 case 4096: 3582 rctl |= E1000_RCTL_SZ_4096 | 3583 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3584 break; 3585 3586 case 8192: 3587 rctl |= E1000_RCTL_SZ_8192 | 3588 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3589 break; 3590 3591 case 16384: 3592 rctl |= E1000_RCTL_SZ_16384 | 3593 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3594 break; 3595 } 3596 3597 if (ifp->if_mtu > ETHERMTU) 3598 rctl |= E1000_RCTL_LPE; 3599 else 3600 rctl &= ~E1000_RCTL_LPE; 3601 3602 /* Enable Receives */ 3603 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3604 } 3605 3606 static void 3607 em_destroy_rx_ring(struct adapter *adapter, int ndesc) 3608 { 3609 struct em_buffer *rx_buffer; 3610 int i; 3611 3612 if (adapter->rx_buffer_area == NULL) 3613 return; 3614 3615 for (i = 0; i < ndesc; i++) { 3616 rx_buffer = &adapter->rx_buffer_area[i]; 3617 3618 KKASSERT(rx_buffer->m_head == NULL); 3619 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 3620 } 3621 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3622 bus_dma_tag_destroy(adapter->rxtag); 3623 3624 kfree(adapter->rx_buffer_area, M_DEVBUF); 3625 adapter->rx_buffer_area = NULL; 3626 } 3627 3628 static void 3629 em_rxeof(struct adapter *adapter, int count) 3630 { 3631 struct ifnet *ifp = &adapter->arpcom.ac_if; 3632 uint8_t status, accept_frame = 0, eop = 0; 3633 uint16_t len, desc_len, prev_len_adj; 3634 struct e1000_rx_desc *current_desc; 3635 struct mbuf *mp; 3636 int i; 3637 3638 i = adapter->next_rx_desc_to_check; 3639 current_desc = &adapter->rx_desc_base[i]; 3640 3641 if (!(current_desc->status & E1000_RXD_STAT_DD)) 3642 return; 3643 3644 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) { 3645 struct mbuf *m = NULL; 3646 3647 logif(pkt_receive); 3648 3649 mp = adapter->rx_buffer_area[i].m_head; 3650 3651 /* 3652 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3653 * needs to access the last received byte in the mbuf. 3654 */ 3655 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3656 BUS_DMASYNC_POSTREAD); 3657 3658 accept_frame = 1; 3659 prev_len_adj = 0; 3660 desc_len = le16toh(current_desc->length); 3661 status = current_desc->status; 3662 if (status & E1000_RXD_STAT_EOP) { 3663 count--; 3664 eop = 1; 3665 if (desc_len < ETHER_CRC_LEN) { 3666 len = 0; 3667 prev_len_adj = ETHER_CRC_LEN - desc_len; 3668 } else { 3669 len = desc_len - ETHER_CRC_LEN; 3670 } 3671 } else { 3672 eop = 0; 3673 len = desc_len; 3674 } 3675 3676 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3677 uint8_t last_byte; 3678 uint32_t pkt_len = desc_len; 3679 3680 if (adapter->fmp != NULL) 3681 pkt_len += adapter->fmp->m_pkthdr.len; 3682 3683 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3684 if (TBI_ACCEPT(&adapter->hw, status, 3685 current_desc->errors, pkt_len, last_byte, 3686 adapter->min_frame_size, 3687 adapter->hw.mac.max_frame_size)) { 3688 e1000_tbi_adjust_stats_82543(&adapter->hw, 3689 &adapter->stats, pkt_len, 3690 adapter->hw.mac.addr, 3691 adapter->hw.mac.max_frame_size); 3692 if (len > 0) 3693 len--; 3694 } else { 3695 accept_frame = 0; 3696 } 3697 } 3698 3699 if (accept_frame) { 3700 if (em_newbuf(adapter, i, 0) != 0) { 3701 IFNET_STAT_INC(ifp, iqdrops, 1); 3702 goto discard; 3703 } 3704 3705 /* Assign correct length to the current fragment */ 3706 mp->m_len = len; 3707 3708 if (adapter->fmp == NULL) { 3709 mp->m_pkthdr.len = len; 3710 adapter->fmp = mp; /* Store the first mbuf */ 3711 adapter->lmp = mp; 3712 } else { 3713 /* 3714 * Chain mbuf's together 3715 */ 3716 3717 /* 3718 * Adjust length of previous mbuf in chain if 3719 * we received less than 4 bytes in the last 3720 * descriptor. 3721 */ 3722 if (prev_len_adj > 0) { 3723 adapter->lmp->m_len -= prev_len_adj; 3724 adapter->fmp->m_pkthdr.len -= 3725 prev_len_adj; 3726 } 3727 adapter->lmp->m_next = mp; 3728 adapter->lmp = adapter->lmp->m_next; 3729 adapter->fmp->m_pkthdr.len += len; 3730 } 3731 3732 if (eop) { 3733 adapter->fmp->m_pkthdr.rcvif = ifp; 3734 IFNET_STAT_INC(ifp, ipackets, 1); 3735 3736 if (ifp->if_capenable & IFCAP_RXCSUM) { 3737 em_rxcsum(adapter, current_desc, 3738 adapter->fmp); 3739 } 3740 3741 if (status & E1000_RXD_STAT_VP) { 3742 adapter->fmp->m_pkthdr.ether_vlantag = 3743 (le16toh(current_desc->special) & 3744 E1000_RXD_SPC_VLAN_MASK); 3745 adapter->fmp->m_flags |= M_VLANTAG; 3746 } 3747 m = adapter->fmp; 3748 adapter->fmp = NULL; 3749 adapter->lmp = NULL; 3750 } 3751 } else { 3752 IFNET_STAT_INC(ifp, ierrors, 1); 3753 discard: 3754 #ifdef foo 3755 /* Reuse loaded DMA map and just update mbuf chain */ 3756 mp = adapter->rx_buffer_area[i].m_head; 3757 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3758 mp->m_data = mp->m_ext.ext_buf; 3759 mp->m_next = NULL; 3760 if (adapter->hw.mac.max_frame_size <= 3761 (MCLBYTES - ETHER_ALIGN)) 3762 m_adj(mp, ETHER_ALIGN); 3763 #endif 3764 if (adapter->fmp != NULL) { 3765 m_freem(adapter->fmp); 3766 adapter->fmp = NULL; 3767 adapter->lmp = NULL; 3768 } 3769 m = NULL; 3770 } 3771 3772 /* Zero out the receive descriptors status. */ 3773 current_desc->status = 0; 3774 3775 if (m != NULL) 3776 ifp->if_input(ifp, m, NULL, -1); 3777 3778 /* Advance our pointers to the next descriptor. */ 3779 if (++i == adapter->num_rx_desc) 3780 i = 0; 3781 current_desc = &adapter->rx_desc_base[i]; 3782 } 3783 adapter->next_rx_desc_to_check = i; 3784 3785 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3786 if (--i < 0) 3787 i = adapter->num_rx_desc - 1; 3788 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3789 } 3790 3791 static void 3792 em_rxcsum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, 3793 struct mbuf *mp) 3794 { 3795 /* 82543 or newer only */ 3796 if (adapter->hw.mac.type < e1000_82543 || 3797 /* Ignore Checksum bit is set */ 3798 (rx_desc->status & E1000_RXD_STAT_IXSM)) 3799 return; 3800 3801 if ((rx_desc->status & E1000_RXD_STAT_IPCS) && 3802 !(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3803 /* IP Checksum Good */ 3804 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 3805 } 3806 3807 if ((rx_desc->status & E1000_RXD_STAT_TCPCS) && 3808 !(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3809 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3810 CSUM_PSEUDO_HDR | 3811 CSUM_FRAG_NOT_CHECKED; 3812 mp->m_pkthdr.csum_data = htons(0xffff); 3813 } 3814 } 3815 3816 static void 3817 em_enable_intr(struct adapter *adapter) 3818 { 3819 uint32_t ims_mask = IMS_ENABLE_MASK; 3820 3821 lwkt_serialize_handler_enable(adapter->arpcom.ac_if.if_serializer); 3822 3823 #if 0 3824 /* XXX MSIX */ 3825 if (adapter->hw.mac.type == e1000_82574) { 3826 E1000_WRITE_REG(&adapter->hw, EM_EIAC, EM_MSIX_MASK); 3827 ims_mask |= EM_MSIX_MASK; 3828 } 3829 #endif 3830 E1000_WRITE_REG(&adapter->hw, E1000_IMS, ims_mask); 3831 } 3832 3833 static void 3834 em_disable_intr(struct adapter *adapter) 3835 { 3836 uint32_t clear = 0xffffffff; 3837 3838 /* 3839 * The first version of 82542 had an errata where when link was forced 3840 * it would stay up even up even if the cable was disconnected. 3841 * Sequence errors were used to detect the disconnect and then the 3842 * driver would unforce the link. This code in the in the ISR. For 3843 * this to work correctly the Sequence error interrupt had to be 3844 * enabled all the time. 3845 */ 3846 if (adapter->hw.mac.type == e1000_82542 && 3847 adapter->hw.revision_id == E1000_REVISION_2) 3848 clear &= ~E1000_ICR_RXSEQ; 3849 else if (adapter->hw.mac.type == e1000_82574) 3850 E1000_WRITE_REG(&adapter->hw, EM_EIAC, 0); 3851 3852 E1000_WRITE_REG(&adapter->hw, E1000_IMC, clear); 3853 3854 adapter->npoll.ifpc_stcount = 0; 3855 3856 lwkt_serialize_handler_disable(adapter->arpcom.ac_if.if_serializer); 3857 } 3858 3859 /* 3860 * Bit of a misnomer, what this really means is 3861 * to enable OS management of the system... aka 3862 * to disable special hardware management features 3863 */ 3864 static void 3865 em_get_mgmt(struct adapter *adapter) 3866 { 3867 /* A shared code workaround */ 3868 #define E1000_82542_MANC2H E1000_MANC2H 3869 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3870 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 3871 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3872 3873 /* disable hardware interception of ARP */ 3874 manc &= ~(E1000_MANC_ARP_EN); 3875 3876 /* enable receiving management packets to the host */ 3877 if (adapter->hw.mac.type >= e1000_82571) { 3878 manc |= E1000_MANC_EN_MNG2HOST; 3879 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3880 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3881 manc2h |= E1000_MNG2HOST_PORT_623; 3882 manc2h |= E1000_MNG2HOST_PORT_664; 3883 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 3884 } 3885 3886 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3887 } 3888 } 3889 3890 /* 3891 * Give control back to hardware management 3892 * controller if there is one. 3893 */ 3894 static void 3895 em_rel_mgmt(struct adapter *adapter) 3896 { 3897 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3898 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3899 3900 /* re-enable hardware interception of ARP */ 3901 manc |= E1000_MANC_ARP_EN; 3902 3903 if (adapter->hw.mac.type >= e1000_82571) 3904 manc &= ~E1000_MANC_EN_MNG2HOST; 3905 3906 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3907 } 3908 } 3909 3910 /* 3911 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3912 * For ASF and Pass Through versions of f/w this means that 3913 * the driver is loaded. For AMT version (only with 82573) 3914 * of the f/w this means that the network i/f is open. 3915 */ 3916 static void 3917 em_get_hw_control(struct adapter *adapter) 3918 { 3919 /* Let firmware know the driver has taken over */ 3920 if (adapter->hw.mac.type == e1000_82573) { 3921 uint32_t swsm; 3922 3923 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3924 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3925 swsm | E1000_SWSM_DRV_LOAD); 3926 } else { 3927 uint32_t ctrl_ext; 3928 3929 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3930 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3931 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3932 } 3933 adapter->flags |= EM_FLAG_HW_CTRL; 3934 } 3935 3936 /* 3937 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3938 * For ASF and Pass Through versions of f/w this means that the 3939 * driver is no longer loaded. For AMT version (only with 82573) 3940 * of the f/w this means that the network i/f is closed. 3941 */ 3942 static void 3943 em_rel_hw_control(struct adapter *adapter) 3944 { 3945 if ((adapter->flags & EM_FLAG_HW_CTRL) == 0) 3946 return; 3947 adapter->flags &= ~EM_FLAG_HW_CTRL; 3948 3949 /* Let firmware taken over control of h/w */ 3950 if (adapter->hw.mac.type == e1000_82573) { 3951 uint32_t swsm; 3952 3953 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3954 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3955 swsm & ~E1000_SWSM_DRV_LOAD); 3956 } else { 3957 uint32_t ctrl_ext; 3958 3959 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3960 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3961 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3962 } 3963 } 3964 3965 static int 3966 em_is_valid_eaddr(const uint8_t *addr) 3967 { 3968 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3969 3970 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3971 return (FALSE); 3972 3973 return (TRUE); 3974 } 3975 3976 /* 3977 * Enable PCI Wake On Lan capability 3978 */ 3979 static void 3980 em_enable_wol(device_t dev) 3981 { 3982 uint16_t cap, status; 3983 uint8_t id; 3984 3985 /* First find the capabilities pointer*/ 3986 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3987 3988 /* Read the PM Capabilities */ 3989 id = pci_read_config(dev, cap, 1); 3990 if (id != PCIY_PMG) /* Something wrong */ 3991 return; 3992 3993 /* 3994 * OK, we have the power capabilities, 3995 * so now get the status register 3996 */ 3997 cap += PCIR_POWER_STATUS; 3998 status = pci_read_config(dev, cap, 2); 3999 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 4000 pci_write_config(dev, cap, status, 2); 4001 } 4002 4003 4004 /* 4005 * 82544 Coexistence issue workaround. 4006 * There are 2 issues. 4007 * 1. Transmit Hang issue. 4008 * To detect this issue, following equation can be used... 4009 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4010 * If SUM[3:0] is in between 1 to 4, we will have this issue. 4011 * 4012 * 2. DAC issue. 4013 * To detect this issue, following equation can be used... 4014 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4015 * If SUM[3:0] is in between 9 to c, we will have this issue. 4016 * 4017 * WORKAROUND: 4018 * Make sure we do not have ending address 4019 * as 1,2,3,4(Hang) or 9,a,b,c (DAC) 4020 */ 4021 static uint32_t 4022 em_82544_fill_desc(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array) 4023 { 4024 uint32_t safe_terminator; 4025 4026 /* 4027 * Since issue is sensitive to length and address. 4028 * Let us first check the address... 4029 */ 4030 if (length <= 4) { 4031 desc_array->descriptor[0].address = address; 4032 desc_array->descriptor[0].length = length; 4033 desc_array->elements = 1; 4034 return (desc_array->elements); 4035 } 4036 4037 safe_terminator = 4038 (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 4039 4040 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 4041 if (safe_terminator == 0 || 4042 (safe_terminator > 4 && safe_terminator < 9) || 4043 (safe_terminator > 0xC && safe_terminator <= 0xF)) { 4044 desc_array->descriptor[0].address = address; 4045 desc_array->descriptor[0].length = length; 4046 desc_array->elements = 1; 4047 return (desc_array->elements); 4048 } 4049 4050 desc_array->descriptor[0].address = address; 4051 desc_array->descriptor[0].length = length - 4; 4052 desc_array->descriptor[1].address = address + (length - 4); 4053 desc_array->descriptor[1].length = 4; 4054 desc_array->elements = 2; 4055 return (desc_array->elements); 4056 } 4057 4058 static void 4059 em_update_stats(struct adapter *adapter) 4060 { 4061 struct ifnet *ifp = &adapter->arpcom.ac_if; 4062 4063 if (adapter->hw.phy.media_type == e1000_media_type_copper || 4064 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 4065 adapter->stats.symerrs += 4066 E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 4067 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 4068 } 4069 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 4070 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 4071 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 4072 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 4073 4074 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 4075 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 4076 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 4077 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 4078 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 4079 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 4080 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 4081 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 4082 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 4083 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 4084 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 4085 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 4086 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 4087 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 4088 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 4089 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 4090 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 4091 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 4092 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 4093 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 4094 4095 /* For the 64-bit byte counters the low dword must be read first. */ 4096 /* Both registers clear on the read of the high dword */ 4097 4098 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); 4099 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); 4100 4101 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 4102 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 4103 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 4104 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 4105 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 4106 4107 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 4108 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 4109 4110 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 4111 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 4112 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 4113 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 4114 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 4115 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 4116 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 4117 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 4118 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 4119 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 4120 4121 if (adapter->hw.mac.type >= e1000_82543) { 4122 adapter->stats.algnerrc += 4123 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 4124 adapter->stats.rxerrc += 4125 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 4126 adapter->stats.tncrs += 4127 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 4128 adapter->stats.cexterr += 4129 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 4130 adapter->stats.tsctc += 4131 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 4132 adapter->stats.tsctfc += 4133 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 4134 } 4135 4136 IFNET_STAT_SET(ifp, collisions, adapter->stats.colc); 4137 4138 /* Rx Errors */ 4139 IFNET_STAT_SET(ifp, ierrors, 4140 adapter->dropped_pkts + adapter->stats.rxerrc + 4141 adapter->stats.crcerrs + adapter->stats.algnerrc + 4142 adapter->stats.ruc + adapter->stats.roc + 4143 adapter->stats.mpc + adapter->stats.cexterr); 4144 4145 /* Tx Errors */ 4146 IFNET_STAT_SET(ifp, oerrors, 4147 adapter->stats.ecol + adapter->stats.latecol + 4148 adapter->watchdog_events); 4149 } 4150 4151 static void 4152 em_print_debug_info(struct adapter *adapter) 4153 { 4154 device_t dev = adapter->dev; 4155 uint8_t *hw_addr = adapter->hw.hw_addr; 4156 4157 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 4158 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 4159 E1000_READ_REG(&adapter->hw, E1000_CTRL), 4160 E1000_READ_REG(&adapter->hw, E1000_RCTL)); 4161 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 4162 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ 4163 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); 4164 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 4165 adapter->hw.fc.high_water, 4166 adapter->hw.fc.low_water); 4167 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 4168 E1000_READ_REG(&adapter->hw, E1000_TIDV), 4169 E1000_READ_REG(&adapter->hw, E1000_TADV)); 4170 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 4171 E1000_READ_REG(&adapter->hw, E1000_RDTR), 4172 E1000_READ_REG(&adapter->hw, E1000_RADV)); 4173 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 4174 (long long)adapter->tx_fifo_wrk_cnt, 4175 (long long)adapter->tx_fifo_reset_cnt); 4176 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 4177 E1000_READ_REG(&adapter->hw, E1000_TDH(0)), 4178 E1000_READ_REG(&adapter->hw, E1000_TDT(0))); 4179 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 4180 E1000_READ_REG(&adapter->hw, E1000_RDH(0)), 4181 E1000_READ_REG(&adapter->hw, E1000_RDT(0))); 4182 device_printf(dev, "Num Tx descriptors avail = %d\n", 4183 adapter->num_tx_desc_avail); 4184 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 4185 adapter->no_tx_desc_avail1); 4186 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 4187 adapter->no_tx_desc_avail2); 4188 device_printf(dev, "Std mbuf failed = %ld\n", 4189 adapter->mbuf_alloc_failed); 4190 device_printf(dev, "Std mbuf cluster failed = %ld\n", 4191 adapter->mbuf_cluster_failed); 4192 device_printf(dev, "Driver dropped packets = %ld\n", 4193 adapter->dropped_pkts); 4194 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 4195 adapter->no_tx_dma_setup); 4196 } 4197 4198 static void 4199 em_print_hw_stats(struct adapter *adapter) 4200 { 4201 device_t dev = adapter->dev; 4202 4203 device_printf(dev, "Excessive collisions = %lld\n", 4204 (long long)adapter->stats.ecol); 4205 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 4206 device_printf(dev, "Symbol errors = %lld\n", 4207 (long long)adapter->stats.symerrs); 4208 #endif 4209 device_printf(dev, "Sequence errors = %lld\n", 4210 (long long)adapter->stats.sec); 4211 device_printf(dev, "Defer count = %lld\n", 4212 (long long)adapter->stats.dc); 4213 device_printf(dev, "Missed Packets = %lld\n", 4214 (long long)adapter->stats.mpc); 4215 device_printf(dev, "Receive No Buffers = %lld\n", 4216 (long long)adapter->stats.rnbc); 4217 /* RLEC is inaccurate on some hardware, calculate our own. */ 4218 device_printf(dev, "Receive Length Errors = %lld\n", 4219 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 4220 device_printf(dev, "Receive errors = %lld\n", 4221 (long long)adapter->stats.rxerrc); 4222 device_printf(dev, "Crc errors = %lld\n", 4223 (long long)adapter->stats.crcerrs); 4224 device_printf(dev, "Alignment errors = %lld\n", 4225 (long long)adapter->stats.algnerrc); 4226 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 4227 (long long)adapter->stats.cexterr); 4228 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 4229 device_printf(dev, "watchdog timeouts = %ld\n", 4230 adapter->watchdog_events); 4231 device_printf(dev, "XON Rcvd = %lld\n", 4232 (long long)adapter->stats.xonrxc); 4233 device_printf(dev, "XON Xmtd = %lld\n", 4234 (long long)adapter->stats.xontxc); 4235 device_printf(dev, "XOFF Rcvd = %lld\n", 4236 (long long)adapter->stats.xoffrxc); 4237 device_printf(dev, "XOFF Xmtd = %lld\n", 4238 (long long)adapter->stats.xofftxc); 4239 device_printf(dev, "Good Packets Rcvd = %lld\n", 4240 (long long)adapter->stats.gprc); 4241 device_printf(dev, "Good Packets Xmtd = %lld\n", 4242 (long long)adapter->stats.gptc); 4243 } 4244 4245 static void 4246 em_print_nvm_info(struct adapter *adapter) 4247 { 4248 uint16_t eeprom_data; 4249 int i, j, row = 0; 4250 4251 /* Its a bit crude, but it gets the job done */ 4252 kprintf("\nInterface EEPROM Dump:\n"); 4253 kprintf("Offset\n0x0000 "); 4254 for (i = 0, j = 0; i < 32; i++, j++) { 4255 if (j == 8) { /* Make the offset block */ 4256 j = 0; ++row; 4257 kprintf("\n0x00%x0 ",row); 4258 } 4259 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 4260 kprintf("%04x ", eeprom_data); 4261 } 4262 kprintf("\n"); 4263 } 4264 4265 static int 4266 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 4267 { 4268 struct adapter *adapter; 4269 struct ifnet *ifp; 4270 int error, result; 4271 4272 result = -1; 4273 error = sysctl_handle_int(oidp, &result, 0, req); 4274 if (error || !req->newptr) 4275 return (error); 4276 4277 adapter = (struct adapter *)arg1; 4278 ifp = &adapter->arpcom.ac_if; 4279 4280 lwkt_serialize_enter(ifp->if_serializer); 4281 4282 if (result == 1) 4283 em_print_debug_info(adapter); 4284 4285 /* 4286 * This value will cause a hex dump of the 4287 * first 32 16-bit words of the EEPROM to 4288 * the screen. 4289 */ 4290 if (result == 2) 4291 em_print_nvm_info(adapter); 4292 4293 lwkt_serialize_exit(ifp->if_serializer); 4294 4295 return (error); 4296 } 4297 4298 static int 4299 em_sysctl_stats(SYSCTL_HANDLER_ARGS) 4300 { 4301 int error, result; 4302 4303 result = -1; 4304 error = sysctl_handle_int(oidp, &result, 0, req); 4305 if (error || !req->newptr) 4306 return (error); 4307 4308 if (result == 1) { 4309 struct adapter *adapter = (struct adapter *)arg1; 4310 struct ifnet *ifp = &adapter->arpcom.ac_if; 4311 4312 lwkt_serialize_enter(ifp->if_serializer); 4313 em_print_hw_stats(adapter); 4314 lwkt_serialize_exit(ifp->if_serializer); 4315 } 4316 return (error); 4317 } 4318 4319 static void 4320 em_add_sysctl(struct adapter *adapter) 4321 { 4322 struct sysctl_ctx_list *ctx; 4323 struct sysctl_oid *tree; 4324 4325 ctx = device_get_sysctl_ctx(adapter->dev); 4326 tree = device_get_sysctl_tree(adapter->dev); 4327 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4328 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4329 em_sysctl_debug_info, "I", "Debug Information"); 4330 4331 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4332 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4333 em_sysctl_stats, "I", "Statistics"); 4334 4335 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4336 OID_AUTO, "rxd", CTLFLAG_RD, 4337 &adapter->num_rx_desc, 0, NULL); 4338 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4339 OID_AUTO, "txd", CTLFLAG_RD, 4340 &adapter->num_tx_desc, 0, NULL); 4341 4342 if (adapter->hw.mac.type >= e1000_82540) { 4343 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4344 OID_AUTO, "int_throttle_ceil", 4345 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4346 em_sysctl_int_throttle, "I", 4347 "interrupt throttling rate"); 4348 } 4349 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4350 OID_AUTO, "int_tx_nsegs", 4351 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4352 em_sysctl_int_tx_nsegs, "I", 4353 "# segments per TX interrupt"); 4354 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4355 OID_AUTO, "wreg_tx_nsegs", CTLFLAG_RW, 4356 &adapter->tx_wreg_nsegs, 0, 4357 "# segments before write to hardware register"); 4358 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_nmbuf", 4359 CTLFLAG_RD, &adapter->tx_nmbuf, 0, "# of pending TX mbufs"); 4360 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_gc", 4361 CTLFLAG_RW, &adapter->tx_gc, "# of TX GC"); 4362 } 4363 4364 static int 4365 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 4366 { 4367 struct adapter *adapter = (void *)arg1; 4368 struct ifnet *ifp = &adapter->arpcom.ac_if; 4369 int error, throttle; 4370 4371 throttle = adapter->int_throttle_ceil; 4372 error = sysctl_handle_int(oidp, &throttle, 0, req); 4373 if (error || req->newptr == NULL) 4374 return error; 4375 if (throttle < 0 || throttle > 1000000000 / 256) 4376 return EINVAL; 4377 4378 if (throttle) { 4379 /* 4380 * Set the interrupt throttling rate in 256ns increments, 4381 * recalculate sysctl value assignment to get exact frequency. 4382 */ 4383 throttle = 1000000000 / 256 / throttle; 4384 4385 /* Upper 16bits of ITR is reserved and should be zero */ 4386 if (throttle & 0xffff0000) 4387 return EINVAL; 4388 } 4389 4390 lwkt_serialize_enter(ifp->if_serializer); 4391 4392 if (throttle) 4393 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 4394 else 4395 adapter->int_throttle_ceil = 0; 4396 4397 if (ifp->if_flags & IFF_RUNNING) 4398 em_set_itr(adapter, throttle); 4399 4400 lwkt_serialize_exit(ifp->if_serializer); 4401 4402 if (bootverbose) { 4403 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 4404 adapter->int_throttle_ceil); 4405 } 4406 return 0; 4407 } 4408 4409 static int 4410 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 4411 { 4412 struct adapter *adapter = (void *)arg1; 4413 struct ifnet *ifp = &adapter->arpcom.ac_if; 4414 int error, segs; 4415 4416 segs = adapter->tx_int_nsegs; 4417 error = sysctl_handle_int(oidp, &segs, 0, req); 4418 if (error || req->newptr == NULL) 4419 return error; 4420 if (segs <= 0) 4421 return EINVAL; 4422 4423 lwkt_serialize_enter(ifp->if_serializer); 4424 4425 /* 4426 * Don't allow int_tx_nsegs to become: 4427 * o Less the oact_tx_desc 4428 * o Too large that no TX desc will cause TX interrupt to 4429 * be generated (OACTIVE will never recover) 4430 * o Too small that will cause tx_dd[] overflow 4431 */ 4432 if (segs < adapter->oact_tx_desc || 4433 segs >= adapter->num_tx_desc - adapter->oact_tx_desc || 4434 segs < adapter->num_tx_desc / EM_TXDD_SAFE) { 4435 error = EINVAL; 4436 } else { 4437 error = 0; 4438 adapter->tx_int_nsegs = segs; 4439 } 4440 4441 lwkt_serialize_exit(ifp->if_serializer); 4442 4443 return error; 4444 } 4445 4446 static void 4447 em_set_itr(struct adapter *adapter, uint32_t itr) 4448 { 4449 E1000_WRITE_REG(&adapter->hw, E1000_ITR, itr); 4450 if (adapter->hw.mac.type == e1000_82574) { 4451 int i; 4452 4453 /* 4454 * When using MSIX interrupts we need to 4455 * throttle using the EITR register 4456 */ 4457 for (i = 0; i < 4; ++i) { 4458 E1000_WRITE_REG(&adapter->hw, 4459 E1000_EITR_82574(i), itr); 4460 } 4461 } 4462 } 4463 4464 static void 4465 em_disable_aspm(struct adapter *adapter) 4466 { 4467 uint16_t link_cap, link_ctrl, disable; 4468 uint8_t pcie_ptr, reg; 4469 device_t dev = adapter->dev; 4470 4471 switch (adapter->hw.mac.type) { 4472 case e1000_82571: 4473 case e1000_82572: 4474 case e1000_82573: 4475 /* 4476 * 82573 specification update 4477 * errata #8 disable L0s 4478 * errata #41 disable L1 4479 * 4480 * 82571/82572 specification update 4481 # errata #13 disable L1 4482 * errata #68 disable L0s 4483 */ 4484 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4485 break; 4486 4487 case e1000_82574: 4488 case e1000_82583: 4489 /* 4490 * 82574 specification update errata #20 4491 * 82583 specification update errata #9 4492 * 4493 * There is no need to disable L1 4494 */ 4495 disable = PCIEM_LNKCTL_ASPM_L0S; 4496 break; 4497 4498 default: 4499 return; 4500 } 4501 4502 pcie_ptr = pci_get_pciecap_ptr(dev); 4503 if (pcie_ptr == 0) 4504 return; 4505 4506 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4507 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4508 return; 4509 4510 if (bootverbose) { 4511 if_printf(&adapter->arpcom.ac_if, 4512 "disable ASPM %#02x\n", disable); 4513 } 4514 4515 reg = pcie_ptr + PCIER_LINKCTRL; 4516 link_ctrl = pci_read_config(dev, reg, 2); 4517 link_ctrl &= ~disable; 4518 pci_write_config(dev, reg, link_ctrl, 2); 4519 } 4520 4521 static int 4522 em_tso_pullup(struct adapter *adapter, struct mbuf **mp) 4523 { 4524 int iphlen, hoff, thoff, ex = 0; 4525 struct mbuf *m; 4526 struct ip *ip; 4527 4528 m = *mp; 4529 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4530 4531 iphlen = m->m_pkthdr.csum_iphlen; 4532 thoff = m->m_pkthdr.csum_thlen; 4533 hoff = m->m_pkthdr.csum_lhlen; 4534 4535 KASSERT(iphlen > 0, ("invalid ip hlen")); 4536 KASSERT(thoff > 0, ("invalid tcp hlen")); 4537 KASSERT(hoff > 0, ("invalid ether hlen")); 4538 4539 if (adapter->flags & EM_FLAG_TSO_PULLEX) 4540 ex = 4; 4541 4542 if (m->m_len < hoff + iphlen + thoff + ex) { 4543 m = m_pullup(m, hoff + iphlen + thoff + ex); 4544 if (m == NULL) { 4545 *mp = NULL; 4546 return ENOBUFS; 4547 } 4548 *mp = m; 4549 } 4550 ip = mtodoff(m, struct ip *, hoff); 4551 ip->ip_len = 0; 4552 4553 return 0; 4554 } 4555 4556 static int 4557 em_tso_setup(struct adapter *adapter, struct mbuf *mp, 4558 uint32_t *txd_upper, uint32_t *txd_lower) 4559 { 4560 struct e1000_context_desc *TXD; 4561 int hoff, iphlen, thoff, hlen; 4562 int mss, pktlen, curr_txd; 4563 4564 iphlen = mp->m_pkthdr.csum_iphlen; 4565 thoff = mp->m_pkthdr.csum_thlen; 4566 hoff = mp->m_pkthdr.csum_lhlen; 4567 mss = mp->m_pkthdr.tso_segsz; 4568 pktlen = mp->m_pkthdr.len; 4569 4570 if (adapter->csum_flags == CSUM_TSO && 4571 adapter->csum_iphlen == iphlen && 4572 adapter->csum_lhlen == hoff && 4573 adapter->csum_thlen == thoff && 4574 adapter->csum_mss == mss && 4575 adapter->csum_pktlen == pktlen) { 4576 *txd_upper = adapter->csum_txd_upper; 4577 *txd_lower = adapter->csum_txd_lower; 4578 return 0; 4579 } 4580 hlen = hoff + iphlen + thoff; 4581 4582 /* 4583 * Setup a new TSO context. 4584 */ 4585 4586 curr_txd = adapter->next_avail_tx_desc; 4587 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 4588 4589 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4590 E1000_TXD_DTYP_D | /* Data descr type */ 4591 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4592 4593 /* IP and/or TCP header checksum calculation and insertion. */ 4594 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4595 4596 /* 4597 * Start offset for header checksum calculation. 4598 * End offset for header checksum calculation. 4599 * Offset of place put the checksum. 4600 */ 4601 TXD->lower_setup.ip_fields.ipcss = hoff; 4602 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4603 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4604 4605 /* 4606 * Start offset for payload checksum calculation. 4607 * End offset for payload checksum calculation. 4608 * Offset of place to put the checksum. 4609 */ 4610 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4611 TXD->upper_setup.tcp_fields.tucse = 0; 4612 TXD->upper_setup.tcp_fields.tucso = 4613 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4614 4615 /* 4616 * Payload size per packet w/o any headers. 4617 * Length of all headers up to payload. 4618 */ 4619 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4620 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4621 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4622 E1000_TXD_CMD_DEXT | /* Extended descr */ 4623 E1000_TXD_CMD_TSE | /* TSE context */ 4624 E1000_TXD_CMD_IP | /* Do IP csum */ 4625 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4626 (pktlen - hlen)); /* Total len */ 4627 4628 /* Save the information for this TSO context */ 4629 adapter->csum_flags = CSUM_TSO; 4630 adapter->csum_lhlen = hoff; 4631 adapter->csum_iphlen = iphlen; 4632 adapter->csum_thlen = thoff; 4633 adapter->csum_mss = mss; 4634 adapter->csum_pktlen = pktlen; 4635 adapter->csum_txd_upper = *txd_upper; 4636 adapter->csum_txd_lower = *txd_lower; 4637 4638 if (++curr_txd == adapter->num_tx_desc) 4639 curr_txd = 0; 4640 4641 KKASSERT(adapter->num_tx_desc_avail > 0); 4642 adapter->num_tx_desc_avail--; 4643 4644 adapter->next_avail_tx_desc = curr_txd; 4645 return 1; 4646 } 4647 4648 /* 4649 * Remove all descriptors from the TX ring. 4650 * 4651 * We want to clear all pending descriptors from the TX ring. Zeroing 4652 * happens when the HW reads the regs. We assign the ring itself as 4653 * the data of the next descriptor. We don't care about the data we 4654 * are about to reset the HW. 4655 */ 4656 static void 4657 em_flush_tx_ring(struct adapter *adapter) 4658 { 4659 struct e1000_hw *hw = &adapter->hw; 4660 struct e1000_tx_desc *txd; 4661 uint32_t tctl; 4662 4663 tctl = E1000_READ_REG(hw, E1000_TCTL); 4664 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); 4665 4666 txd = &adapter->tx_desc_base[adapter->next_avail_tx_desc++]; 4667 if (adapter->next_avail_tx_desc == adapter->num_tx_desc) 4668 adapter->next_avail_tx_desc = 0; 4669 4670 /* Just use the ring as a dummy buffer addr */ 4671 txd->buffer_addr = adapter->txdma.dma_paddr; 4672 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512); 4673 txd->upper.data = 0; 4674 4675 E1000_WRITE_REG(hw, E1000_TDT(0), adapter->next_avail_tx_desc); 4676 usec_delay(250); 4677 } 4678 4679 /* 4680 * Remove all descriptors from the RX ring. 4681 * 4682 * Mark all descriptors in the RX ring as consumed and disable the RX ring. 4683 */ 4684 static void 4685 em_flush_rx_ring(struct adapter *adapter) 4686 { 4687 struct e1000_hw *hw = &adapter->hw; 4688 uint32_t rctl, rxdctl; 4689 4690 rctl = E1000_READ_REG(hw, E1000_RCTL); 4691 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4692 E1000_WRITE_FLUSH(hw); 4693 usec_delay(150); 4694 4695 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); 4696 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4697 rxdctl &= 0xffffc000; 4698 /* 4699 * Update thresholds: prefetch threshold to 31, host threshold to 1 4700 * and make sure the granularity is "descriptors" and not "cache 4701 * lines". 4702 */ 4703 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 4704 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl); 4705 4706 /* Momentarily enable the RX ring for the changes to take effect */ 4707 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); 4708 E1000_WRITE_FLUSH(hw); 4709 usec_delay(150); 4710 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4711 } 4712 4713 /* 4714 * Remove all descriptors from the descriptor rings. 4715 * 4716 * In i219, the descriptor rings must be emptied before resetting the HW 4717 * or before changing the device state to D3 during runtime (runtime PM). 4718 * 4719 * Failure to do this will cause the HW to enter a unit hang state which 4720 * can only be released by PCI reset on the device. 4721 */ 4722 static void 4723 em_flush_txrx_ring(struct adapter *adapter) 4724 { 4725 struct e1000_hw *hw = &adapter->hw; 4726 device_t dev = adapter->dev; 4727 uint16_t hang_state; 4728 uint32_t fext_nvm11; 4729 4730 /* 4731 * First, disable MULR fix in FEXTNVM11. 4732 */ 4733 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 4734 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 4735 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 4736 4737 /* 4738 * Do nothing if we're not in faulty state, or if the queue is 4739 * empty. 4740 */ 4741 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); 4742 if ((hang_state & FLUSH_DESC_REQUIRED) && 4743 E1000_READ_REG(hw, E1000_TDLEN(0))) 4744 em_flush_tx_ring(adapter); 4745 4746 /* 4747 * Recheck, maybe the fault is caused by the RX ring. 4748 */ 4749 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); 4750 if (hang_state & FLUSH_DESC_REQUIRED) 4751 em_flush_rx_ring(adapter); 4752 } 4753