1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 EMX_DEVICE(PCH_LBG_I219_LM3), 179 EMX_DEVICE(PCH_SPT_I219_LM4), 180 EMX_DEVICE(PCH_SPT_I219_V4), 181 EMX_DEVICE(PCH_SPT_I219_LM5), 182 EMX_DEVICE(PCH_SPT_I219_V5), 183 EMX_DEVICE(PCH_CNP_I219_LM6), 184 EMX_DEVICE(PCH_CNP_I219_V6), 185 EMX_DEVICE(PCH_CNP_I219_LM7), 186 EMX_DEVICE(PCH_CNP_I219_V7), 187 EMX_DEVICE(PCH_ICP_I219_LM8), 188 EMX_DEVICE(PCH_ICP_I219_V8), 189 EMX_DEVICE(PCH_ICP_I219_LM9), 190 EMX_DEVICE(PCH_ICP_I219_V9), 191 EMX_DEVICE(PCH_CMP_I219_LM10), 192 EMX_DEVICE(PCH_CMP_I219_V10), 193 EMX_DEVICE(PCH_CMP_I219_LM11), 194 EMX_DEVICE(PCH_CMP_I219_V11), 195 EMX_DEVICE(PCH_CMP_I219_LM12), 196 EMX_DEVICE(PCH_CMP_I219_V12), 197 EMX_DEVICE(PCH_TGP_I219_LM13), 198 EMX_DEVICE(PCH_TGP_I219_V13), 199 EMX_DEVICE(PCH_TGP_I219_LM14), 200 EMX_DEVICE(PCH_TGP_I219_V14), 201 EMX_DEVICE(PCH_TGP_I219_LM15), 202 EMX_DEVICE(PCH_TGP_I219_V15), 203 EMX_DEVICE(PCH_ADP_I219_LM16), 204 EMX_DEVICE(PCH_ADP_I219_V16), 205 EMX_DEVICE(PCH_ADP_I219_LM17), 206 EMX_DEVICE(PCH_ADP_I219_V17), 207 EMX_DEVICE(PCH_MTP_I219_LM18), 208 EMX_DEVICE(PCH_MTP_I219_V18), 209 EMX_DEVICE(PCH_MTP_I219_LM19), 210 EMX_DEVICE(PCH_MTP_I219_V19), 211 212 /* required last entry */ 213 EMX_DEVICE_NULL 214 }; 215 216 static int emx_probe(device_t); 217 static int emx_attach(device_t); 218 static int emx_detach(device_t); 219 static int emx_shutdown(device_t); 220 static int emx_suspend(device_t); 221 static int emx_resume(device_t); 222 223 static void emx_init(void *); 224 static void emx_stop(struct emx_softc *); 225 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 226 static void emx_start(struct ifnet *, struct ifaltq_subque *); 227 #ifdef IFPOLL_ENABLE 228 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 229 static void emx_npoll_status(struct ifnet *); 230 static void emx_npoll_tx(struct ifnet *, void *, int); 231 static void emx_npoll_rx(struct ifnet *, void *, int); 232 #endif 233 static void emx_watchdog(struct ifaltq_subque *); 234 static void emx_media_status(struct ifnet *, struct ifmediareq *); 235 static int emx_media_change(struct ifnet *); 236 static void emx_timer(void *); 237 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 238 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 239 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 240 #ifdef INVARIANTS 241 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 242 boolean_t); 243 #endif 244 245 static void emx_intr(void *); 246 static void emx_intr_mask(void *); 247 static void emx_intr_body(struct emx_softc *, boolean_t); 248 static void emx_rxeof(struct emx_rxdata *, int); 249 static void emx_txeof(struct emx_txdata *); 250 static void emx_tx_collect(struct emx_txdata *, boolean_t); 251 static void emx_txgc_timer(void *); 252 static void emx_tx_purge(struct emx_softc *); 253 static void emx_enable_intr(struct emx_softc *); 254 static void emx_disable_intr(struct emx_softc *); 255 256 static int emx_dma_alloc(struct emx_softc *); 257 static void emx_dma_free(struct emx_softc *); 258 static void emx_init_tx_ring(struct emx_txdata *); 259 static int emx_init_rx_ring(struct emx_rxdata *); 260 static void emx_free_tx_ring(struct emx_txdata *); 261 static void emx_free_rx_ring(struct emx_rxdata *); 262 static int emx_create_tx_ring(struct emx_txdata *); 263 static int emx_create_rx_ring(struct emx_rxdata *); 264 static void emx_destroy_tx_ring(struct emx_txdata *, int); 265 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 266 static int emx_newbuf(struct emx_rxdata *, int, int); 267 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 268 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 269 uint32_t *, uint32_t *); 270 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 271 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 272 uint32_t *, uint32_t *); 273 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 274 275 static int emx_is_valid_eaddr(const uint8_t *); 276 static int emx_reset(struct emx_softc *); 277 static void emx_setup_ifp(struct emx_softc *); 278 static void emx_init_tx_unit(struct emx_softc *); 279 static void emx_init_rx_unit(struct emx_softc *); 280 static void emx_update_stats(struct emx_softc *); 281 static void emx_set_promisc(struct emx_softc *); 282 static void emx_disable_promisc(struct emx_softc *); 283 static void emx_set_multi(struct emx_softc *); 284 static void emx_update_link_status(struct emx_softc *); 285 static void emx_smartspeed(struct emx_softc *); 286 static void emx_set_itr(struct emx_softc *, uint32_t); 287 static void emx_disable_aspm(struct emx_softc *); 288 static void emx_flush_tx_ring(struct emx_softc *); 289 static void emx_flush_rx_ring(struct emx_softc *); 290 static void emx_flush_txrx_ring(struct emx_softc *); 291 292 static void emx_print_debug_info(struct emx_softc *); 293 static void emx_print_nvm_info(struct emx_softc *); 294 static void emx_print_hw_stats(struct emx_softc *); 295 296 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 297 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 298 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 299 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 300 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 301 static void emx_add_sysctl(struct emx_softc *); 302 303 static void emx_serialize_skipmain(struct emx_softc *); 304 static void emx_deserialize_skipmain(struct emx_softc *); 305 306 /* Management and WOL Support */ 307 static void emx_get_mgmt(struct emx_softc *); 308 static void emx_rel_mgmt(struct emx_softc *); 309 static void emx_get_hw_control(struct emx_softc *); 310 static void emx_rel_hw_control(struct emx_softc *); 311 static void emx_enable_wol(device_t); 312 313 static device_method_t emx_methods[] = { 314 /* Device interface */ 315 DEVMETHOD(device_probe, emx_probe), 316 DEVMETHOD(device_attach, emx_attach), 317 DEVMETHOD(device_detach, emx_detach), 318 DEVMETHOD(device_shutdown, emx_shutdown), 319 DEVMETHOD(device_suspend, emx_suspend), 320 DEVMETHOD(device_resume, emx_resume), 321 DEVMETHOD_END 322 }; 323 324 static driver_t emx_driver = { 325 "emx", 326 emx_methods, 327 sizeof(struct emx_softc), 328 }; 329 330 static devclass_t emx_devclass; 331 332 DECLARE_DUMMY_MODULE(if_emx); 333 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 334 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 335 336 /* 337 * Tunables 338 */ 339 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 340 static int emx_rxd = EMX_DEFAULT_RXD; 341 static int emx_txd = EMX_DEFAULT_TXD; 342 static int emx_smart_pwr_down = 0; 343 static int emx_rxr = 0; 344 static int emx_txr = 1; 345 346 /* Controls whether promiscuous also shows bad packets */ 347 static int emx_debug_sbp = 0; 348 349 static int emx_82573_workaround = 1; 350 static int emx_msi_enable = 1; 351 352 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 353 354 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 355 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 356 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 357 TUNABLE_INT("hw.emx.txd", &emx_txd); 358 TUNABLE_INT("hw.emx.txr", &emx_txr); 359 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 360 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 361 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 362 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 363 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 364 365 /* Global used in WOL setup with multiport cards */ 366 static int emx_global_quad_port_a = 0; 367 368 /* Set this to one to display debug statistics */ 369 static int emx_display_debug_stats = 0; 370 371 #if !defined(KTR_IF_EMX) 372 #define KTR_IF_EMX KTR_ALL 373 #endif 374 KTR_INFO_MASTER(if_emx); 375 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 376 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 377 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 378 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 379 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 380 #define logif(name) KTR_LOG(if_emx_ ## name) 381 382 static __inline void 383 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 384 { 385 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 386 /* DD bit must be cleared */ 387 rxd->rxd_staterr = 0; 388 } 389 390 static __inline void 391 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer) 392 { 393 394 KKASSERT(tx_buffer->m_head != NULL); 395 KKASSERT(tdata->tx_nmbuf > 0); 396 tdata->tx_nmbuf--; 397 398 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 399 m_freem(tx_buffer->m_head); 400 tx_buffer->m_head = NULL; 401 } 402 403 static __inline void 404 emx_tx_intr(struct emx_txdata *tdata) 405 { 406 407 emx_txeof(tdata); 408 if (!ifsq_is_empty(tdata->ifsq)) 409 ifsq_devstart(tdata->ifsq); 410 } 411 412 static __inline void 413 emx_try_txgc(struct emx_txdata *tdata, int16_t dec) 414 { 415 416 if (tdata->tx_running > 0) { 417 tdata->tx_running -= dec; 418 if (tdata->tx_running <= 0 && tdata->tx_nmbuf && 419 tdata->num_tx_desc_avail < tdata->num_tx_desc && 420 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs > 421 tdata->num_tx_desc) 422 emx_tx_collect(tdata, TRUE); 423 } 424 } 425 426 static void 427 emx_txgc_timer(void *xtdata) 428 { 429 struct emx_txdata *tdata = xtdata; 430 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 431 432 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 433 (IFF_RUNNING | IFF_UP)) 434 return; 435 436 if (!lwkt_serialize_try(&tdata->tx_serialize)) 437 goto done; 438 439 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 440 (IFF_RUNNING | IFF_UP)) { 441 lwkt_serialize_exit(&tdata->tx_serialize); 442 return; 443 } 444 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC); 445 446 lwkt_serialize_exit(&tdata->tx_serialize); 447 done: 448 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata); 449 } 450 451 static __inline void 452 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 453 { 454 /* Ignore Checksum bit is set */ 455 if (staterr & E1000_RXD_STAT_IXSM) 456 return; 457 458 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 459 E1000_RXD_STAT_IPCS) 460 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 461 462 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 463 E1000_RXD_STAT_TCPCS) { 464 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 465 CSUM_PSEUDO_HDR | 466 CSUM_FRAG_NOT_CHECKED; 467 mp->m_pkthdr.csum_data = htons(0xffff); 468 } 469 } 470 471 static __inline struct pktinfo * 472 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 473 uint32_t mrq, uint32_t hash, uint32_t staterr) 474 { 475 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 476 case EMX_RXDMRQ_IPV4_TCP: 477 pi->pi_netisr = NETISR_IP; 478 pi->pi_flags = 0; 479 pi->pi_l3proto = IPPROTO_TCP; 480 break; 481 482 case EMX_RXDMRQ_IPV6_TCP: 483 pi->pi_netisr = NETISR_IPV6; 484 pi->pi_flags = 0; 485 pi->pi_l3proto = IPPROTO_TCP; 486 break; 487 488 case EMX_RXDMRQ_IPV4: 489 if (staterr & E1000_RXD_STAT_IXSM) 490 return NULL; 491 492 if ((staterr & 493 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 494 E1000_RXD_STAT_TCPCS) { 495 pi->pi_netisr = NETISR_IP; 496 pi->pi_flags = 0; 497 pi->pi_l3proto = IPPROTO_UDP; 498 break; 499 } 500 /* FALL THROUGH */ 501 default: 502 return NULL; 503 } 504 505 m_sethash(m, toeplitz_hash(hash)); 506 return pi; 507 } 508 509 static int 510 emx_probe(device_t dev) 511 { 512 const struct emx_device *d; 513 uint16_t vid, did; 514 515 vid = pci_get_vendor(dev); 516 did = pci_get_device(dev); 517 518 for (d = emx_devices; d->desc != NULL; ++d) { 519 if (vid == d->vid && did == d->did) { 520 device_set_desc(dev, d->desc); 521 device_set_async_attach(dev, TRUE); 522 return 0; 523 } 524 } 525 return ENXIO; 526 } 527 528 static int 529 emx_attach(device_t dev) 530 { 531 struct emx_softc *sc = device_get_softc(dev); 532 int error = 0, i, throttle, msi_enable; 533 int tx_ring_max, ring_cnt; 534 u_int intr_flags; 535 uint16_t eeprom_data, device_id, apme_mask; 536 driver_intr_t *intr_func; 537 char flowctrl[IFM_ETH_FC_STRLEN]; 538 539 /* 540 * Setup RX rings 541 */ 542 for (i = 0; i < EMX_NRX_RING; ++i) { 543 sc->rx_data[i].sc = sc; 544 sc->rx_data[i].idx = i; 545 } 546 547 /* 548 * Setup TX ring 549 */ 550 for (i = 0; i < EMX_NTX_RING; ++i) { 551 sc->tx_data[i].sc = sc; 552 sc->tx_data[i].idx = i; 553 callout_init_mp(&sc->tx_data[i].tx_gc_timer); 554 } 555 556 /* 557 * Initialize serializers 558 */ 559 lwkt_serialize_init(&sc->main_serialize); 560 for (i = 0; i < EMX_NTX_RING; ++i) 561 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 562 for (i = 0; i < EMX_NRX_RING; ++i) 563 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 564 565 /* 566 * Initialize serializer array 567 */ 568 i = 0; 569 570 KKASSERT(i < EMX_NSERIALIZE); 571 sc->serializes[i++] = &sc->main_serialize; 572 573 KKASSERT(i < EMX_NSERIALIZE); 574 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 575 KKASSERT(i < EMX_NSERIALIZE); 576 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 577 578 KKASSERT(i < EMX_NSERIALIZE); 579 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 580 KKASSERT(i < EMX_NSERIALIZE); 581 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 582 583 KKASSERT(i == EMX_NSERIALIZE); 584 585 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 586 emx_media_change, emx_media_status); 587 callout_init_mp(&sc->timer); 588 589 sc->dev = sc->osdep.dev = dev; 590 591 /* 592 * Determine hardware and mac type 593 */ 594 sc->hw.vendor_id = pci_get_vendor(dev); 595 sc->hw.device_id = pci_get_device(dev); 596 sc->hw.revision_id = pci_get_revid(dev); 597 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 598 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 599 600 if (e1000_set_mac_type(&sc->hw)) 601 return ENXIO; 602 603 /* Enable bus mastering */ 604 pci_enable_busmaster(dev); 605 606 /* 607 * Allocate IO memory 608 */ 609 sc->memory_rid = EMX_BAR_MEM; 610 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 611 &sc->memory_rid, RF_ACTIVE); 612 if (sc->memory == NULL) { 613 device_printf(dev, "Unable to allocate bus resource: memory\n"); 614 error = ENXIO; 615 goto fail; 616 } 617 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 618 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 619 620 /* XXX This is quite goofy, it is not actually used */ 621 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 622 623 /* 624 * Don't enable MSI-X on 82574, see: 625 * 82574 specification update errata #15 626 * 627 * Don't enable MSI on 82571/82572, see: 628 * 82571/82572 specification update errata #63 629 */ 630 msi_enable = emx_msi_enable; 631 if (msi_enable && 632 (sc->hw.mac.type == e1000_82571 || 633 sc->hw.mac.type == e1000_82572)) 634 msi_enable = 0; 635 again: 636 /* 637 * Allocate interrupt 638 */ 639 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 640 &sc->intr_rid, &intr_flags); 641 642 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 643 int unshared; 644 645 unshared = device_getenv_int(dev, "irq.unshared", 0); 646 if (!unshared) { 647 sc->flags |= EMX_FLAG_SHARED_INTR; 648 if (bootverbose) 649 device_printf(dev, "IRQ shared\n"); 650 } else { 651 intr_flags &= ~RF_SHAREABLE; 652 if (bootverbose) 653 device_printf(dev, "IRQ unshared\n"); 654 } 655 } 656 657 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 658 intr_flags); 659 if (sc->intr_res == NULL) { 660 device_printf(dev, "Unable to allocate bus resource: %s\n", 661 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 662 if (!msi_enable) { 663 /* Retry with MSI. */ 664 msi_enable = 1; 665 sc->flags &= ~EMX_FLAG_SHARED_INTR; 666 goto again; 667 } 668 error = ENXIO; 669 goto fail; 670 } 671 672 /* Save PCI command register for Shared Code */ 673 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 674 sc->hw.back = &sc->osdep; 675 676 /* 677 * For I217/I218, we need to map the flash memory and this 678 * must happen after the MAC is identified. 679 */ 680 if (sc->hw.mac.type == e1000_pch_lpt) { 681 sc->flash_rid = EMX_BAR_FLASH; 682 683 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 684 &sc->flash_rid, RF_ACTIVE); 685 if (sc->flash == NULL) { 686 device_printf(dev, "Mapping of Flash failed\n"); 687 error = ENXIO; 688 goto fail; 689 } 690 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 691 sc->osdep.flash_bus_space_handle = 692 rman_get_bushandle(sc->flash); 693 694 /* 695 * This is used in the shared code 696 * XXX this goof is actually not used. 697 */ 698 sc->hw.flash_address = (uint8_t *)sc->flash; 699 } else if (sc->hw.mac.type >= e1000_pch_spt) { 700 /* 701 * In the new SPT device flash is not a seperate BAR, 702 * rather it is also in BAR0, so use the same tag and 703 * an offset handle for the FLASH read/write macros 704 * in the shared code. 705 */ 706 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag; 707 sc->osdep.flash_bus_space_handle = 708 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; 709 } 710 711 /* Do Shared Code initialization */ 712 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 713 device_printf(dev, "Setup of Shared code failed\n"); 714 error = ENXIO; 715 goto fail; 716 } 717 e1000_get_bus_info(&sc->hw); 718 719 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 720 sc->hw.phy.autoneg_wait_to_complete = FALSE; 721 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 722 723 /* 724 * Interrupt throttle rate 725 */ 726 throttle = device_getenv_int(dev, "int_throttle_ceil", 727 emx_int_throttle_ceil); 728 if (throttle == 0) { 729 sc->int_throttle_ceil = 0; 730 } else { 731 if (throttle < 0) 732 throttle = EMX_DEFAULT_ITR; 733 734 /* Recalculate the tunable value to get the exact frequency. */ 735 throttle = 1000000000 / 256 / throttle; 736 737 /* Upper 16bits of ITR is reserved and should be zero */ 738 if (throttle & 0xffff0000) 739 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 740 741 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 742 } 743 744 e1000_init_script_state_82541(&sc->hw, TRUE); 745 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 746 747 /* Copper options */ 748 if (sc->hw.phy.media_type == e1000_media_type_copper) { 749 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 750 sc->hw.phy.disable_polarity_correction = FALSE; 751 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 752 } 753 754 /* Set the frame limits assuming standard ethernet sized frames. */ 755 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 756 757 /* This controls when hardware reports transmit completion status. */ 758 sc->hw.mac.report_tx_early = 1; 759 760 /* 761 * Calculate # of RX/TX rings 762 */ 763 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 764 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 765 766 tx_ring_max = 1; 767 if (sc->hw.mac.type == e1000_82571 || 768 sc->hw.mac.type == e1000_82572 || 769 sc->hw.mac.type == e1000_80003es2lan || 770 sc->hw.mac.type == e1000_pch_lpt || 771 sc->hw.mac.type == e1000_pch_spt || 772 sc->hw.mac.type == e1000_pch_cnp || 773 sc->hw.mac.type == e1000_82574) 774 tx_ring_max = EMX_NTX_RING; 775 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 776 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 777 778 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 779 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 780 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 781 782 /* Allocate RX/TX rings' busdma(9) stuffs */ 783 error = emx_dma_alloc(sc); 784 if (error) 785 goto fail; 786 787 /* Allocate multicast array memory. */ 788 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 789 M_DEVBUF, M_WAITOK); 790 791 /* Indicate SOL/IDER usage */ 792 if (e1000_check_reset_block(&sc->hw)) { 793 device_printf(dev, 794 "PHY reset is blocked due to SOL/IDER session.\n"); 795 } 796 797 /* Disable EEE on I217/I218 */ 798 sc->hw.dev_spec.ich8lan.eee_disable = 1; 799 800 /* 801 * Start from a known state, this is important in reading the 802 * nvm and mac from that. 803 */ 804 e1000_reset_hw(&sc->hw); 805 806 /* Make sure we have a good EEPROM before we read from it */ 807 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 808 /* 809 * Some PCI-E parts fail the first check due to 810 * the link being in sleep state, call it again, 811 * if it fails a second time its a real issue. 812 */ 813 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 814 device_printf(dev, 815 "The EEPROM Checksum Is Not Valid\n"); 816 error = EIO; 817 goto fail; 818 } 819 } 820 821 /* Copy the permanent MAC address out of the EEPROM */ 822 if (e1000_read_mac_addr(&sc->hw) < 0) { 823 device_printf(dev, "EEPROM read error while reading MAC" 824 " address\n"); 825 error = EIO; 826 goto fail; 827 } 828 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 829 device_printf(dev, "Invalid MAC address\n"); 830 error = EIO; 831 goto fail; 832 } 833 834 /* Disable ULP support */ 835 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 836 837 /* Determine if we have to control management hardware */ 838 if (e1000_enable_mng_pass_thru(&sc->hw)) 839 sc->flags |= EMX_FLAG_HAS_MGMT; 840 841 /* 842 * Setup Wake-on-Lan 843 */ 844 apme_mask = EMX_EEPROM_APME; 845 eeprom_data = 0; 846 switch (sc->hw.mac.type) { 847 case e1000_82573: 848 sc->flags |= EMX_FLAG_HAS_AMT; 849 /* FALL THROUGH */ 850 851 case e1000_82571: 852 case e1000_82572: 853 case e1000_80003es2lan: 854 if (sc->hw.bus.func == 1) { 855 e1000_read_nvm(&sc->hw, 856 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 857 } else { 858 e1000_read_nvm(&sc->hw, 859 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 860 } 861 break; 862 863 case e1000_pch_lpt: 864 case e1000_pch_spt: 865 case e1000_pch_cnp: 866 apme_mask = E1000_WUC_APME; 867 sc->flags |= EMX_FLAG_HAS_AMT; 868 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC); 869 break; 870 871 default: 872 e1000_read_nvm(&sc->hw, 873 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 874 break; 875 } 876 if (eeprom_data & apme_mask) 877 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 878 879 /* 880 * We have the eeprom settings, now apply the special cases 881 * where the eeprom may be wrong or the board won't support 882 * wake on lan on a particular port 883 */ 884 device_id = pci_get_device(dev); 885 switch (device_id) { 886 case E1000_DEV_ID_82571EB_FIBER: 887 /* 888 * Wake events only supported on port A for dual fiber 889 * regardless of eeprom setting 890 */ 891 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 892 E1000_STATUS_FUNC_1) 893 sc->wol = 0; 894 break; 895 896 case E1000_DEV_ID_82571EB_QUAD_COPPER: 897 case E1000_DEV_ID_82571EB_QUAD_FIBER: 898 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 899 /* if quad port sc, disable WoL on all but port A */ 900 if (emx_global_quad_port_a != 0) 901 sc->wol = 0; 902 /* Reset for multiple quad port adapters */ 903 if (++emx_global_quad_port_a == 4) 904 emx_global_quad_port_a = 0; 905 break; 906 } 907 908 /* XXX disable wol */ 909 sc->wol = 0; 910 911 /* Initialized #of TX rings to use. */ 912 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 913 914 /* Setup flow control. */ 915 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 916 emx_flowctrl); 917 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 918 919 /* Setup OS specific network interface */ 920 emx_setup_ifp(sc); 921 922 /* Add sysctl tree, must after em_setup_ifp() */ 923 emx_add_sysctl(sc); 924 925 /* Reset the hardware */ 926 error = emx_reset(sc); 927 if (error) { 928 /* 929 * Some 82573 parts fail the first reset, call it again, 930 * if it fails a second time its a real issue. 931 */ 932 error = emx_reset(sc); 933 if (error) { 934 device_printf(dev, "Unable to reset the hardware\n"); 935 ether_ifdetach(&sc->arpcom.ac_if); 936 goto fail; 937 } 938 } 939 940 /* Initialize statistics */ 941 emx_update_stats(sc); 942 943 sc->hw.mac.get_link_status = 1; 944 emx_update_link_status(sc); 945 946 /* Non-AMT based hardware can now take control from firmware */ 947 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 948 EMX_FLAG_HAS_MGMT) 949 emx_get_hw_control(sc); 950 951 /* 952 * Missing Interrupt Following ICR read: 953 * 954 * 82571/82572 specification update errata #76 955 * 82573 specification update errata #31 956 * 82574 specification update errata #12 957 */ 958 intr_func = emx_intr; 959 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 960 (sc->hw.mac.type == e1000_82571 || 961 sc->hw.mac.type == e1000_82572 || 962 sc->hw.mac.type == e1000_82573 || 963 sc->hw.mac.type == e1000_82574)) 964 intr_func = emx_intr_mask; 965 966 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 967 &sc->intr_tag, &sc->main_serialize); 968 if (error) { 969 device_printf(dev, "Failed to register interrupt handler"); 970 ether_ifdetach(&sc->arpcom.ac_if); 971 goto fail; 972 } 973 return (0); 974 fail: 975 emx_detach(dev); 976 return (error); 977 } 978 979 static int 980 emx_detach(device_t dev) 981 { 982 struct emx_softc *sc = device_get_softc(dev); 983 984 if (device_is_attached(dev)) { 985 struct ifnet *ifp = &sc->arpcom.ac_if; 986 987 ifnet_serialize_all(ifp); 988 989 emx_stop(sc); 990 991 e1000_phy_hw_reset(&sc->hw); 992 993 emx_rel_mgmt(sc); 994 emx_rel_hw_control(sc); 995 996 if (sc->wol) { 997 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 998 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 999 emx_enable_wol(dev); 1000 } 1001 1002 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 1003 1004 ifnet_deserialize_all(ifp); 1005 1006 ether_ifdetach(ifp); 1007 } else if (sc->memory != NULL) { 1008 emx_rel_hw_control(sc); 1009 } 1010 1011 ifmedia_removeall(&sc->media); 1012 bus_generic_detach(dev); 1013 1014 if (sc->intr_res != NULL) { 1015 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 1016 sc->intr_res); 1017 } 1018 1019 if (sc->intr_type == PCI_INTR_TYPE_MSI) 1020 pci_release_msi(dev); 1021 1022 if (sc->memory != NULL) { 1023 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 1024 sc->memory); 1025 } 1026 1027 if (sc->flash != NULL) { 1028 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 1029 sc->flash); 1030 } 1031 1032 emx_dma_free(sc); 1033 1034 if (sc->mta != NULL) 1035 kfree(sc->mta, M_DEVBUF); 1036 1037 if (sc->rx_rmap != NULL) 1038 if_ringmap_free(sc->rx_rmap); 1039 if (sc->tx_rmap != NULL) 1040 if_ringmap_free(sc->tx_rmap); 1041 1042 return (0); 1043 } 1044 1045 static int 1046 emx_shutdown(device_t dev) 1047 { 1048 return emx_suspend(dev); 1049 } 1050 1051 static int 1052 emx_suspend(device_t dev) 1053 { 1054 struct emx_softc *sc = device_get_softc(dev); 1055 struct ifnet *ifp = &sc->arpcom.ac_if; 1056 1057 ifnet_serialize_all(ifp); 1058 1059 emx_stop(sc); 1060 1061 emx_rel_mgmt(sc); 1062 emx_rel_hw_control(sc); 1063 1064 if (sc->wol) { 1065 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 1066 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 1067 emx_enable_wol(dev); 1068 } 1069 1070 ifnet_deserialize_all(ifp); 1071 1072 return bus_generic_suspend(dev); 1073 } 1074 1075 static int 1076 emx_resume(device_t dev) 1077 { 1078 struct emx_softc *sc = device_get_softc(dev); 1079 struct ifnet *ifp = &sc->arpcom.ac_if; 1080 int i; 1081 1082 ifnet_serialize_all(ifp); 1083 1084 emx_init(sc); 1085 emx_get_mgmt(sc); 1086 for (i = 0; i < sc->tx_ring_inuse; ++i) 1087 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1088 1089 ifnet_deserialize_all(ifp); 1090 1091 return bus_generic_resume(dev); 1092 } 1093 1094 static void 1095 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1096 { 1097 struct emx_softc *sc = ifp->if_softc; 1098 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1099 struct mbuf *m_head; 1100 int idx = -1, nsegs = 0; 1101 1102 KKASSERT(tdata->ifsq == ifsq); 1103 ASSERT_SERIALIZED(&tdata->tx_serialize); 1104 1105 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1106 return; 1107 1108 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1109 ifsq_purge(ifsq); 1110 return; 1111 } 1112 1113 while (!ifsq_is_empty(ifsq)) { 1114 /* Now do we at least have a minimal? */ 1115 if (EMX_IS_OACTIVE(tdata)) { 1116 emx_tx_collect(tdata, FALSE); 1117 if (EMX_IS_OACTIVE(tdata)) { 1118 ifsq_set_oactive(ifsq); 1119 break; 1120 } 1121 } 1122 1123 logif(pkt_txqueue); 1124 m_head = ifsq_dequeue(ifsq); 1125 if (m_head == NULL) 1126 break; 1127 1128 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1129 IFNET_STAT_INC(ifp, oerrors, 1); 1130 emx_tx_collect(tdata, FALSE); 1131 continue; 1132 } 1133 1134 /* 1135 * TX interrupt are aggressively aggregated, so increasing 1136 * opackets at TX interrupt time will make the opackets 1137 * statistics vastly inaccurate; we do the opackets increment 1138 * now. 1139 */ 1140 IFNET_STAT_INC(ifp, opackets, 1); 1141 1142 if (nsegs >= tdata->tx_wreg_nsegs) { 1143 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1144 nsegs = 0; 1145 idx = -1; 1146 } 1147 1148 /* Send a copy of the frame to the BPF listener */ 1149 ETHER_BPF_MTAP(ifp, m_head); 1150 1151 /* Set timeout in case hardware has problems transmitting. */ 1152 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT); 1153 } 1154 if (idx >= 0) 1155 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1156 tdata->tx_running = EMX_TX_RUNNING; 1157 } 1158 1159 static int 1160 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1161 { 1162 struct emx_softc *sc = ifp->if_softc; 1163 struct ifreq *ifr = (struct ifreq *)data; 1164 uint16_t eeprom_data = 0; 1165 int max_frame_size, mask, reinit; 1166 int error = 0; 1167 1168 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1169 1170 switch (command) { 1171 case SIOCSIFMTU: 1172 switch (sc->hw.mac.type) { 1173 case e1000_82573: 1174 /* 1175 * 82573 only supports jumbo frames 1176 * if ASPM is disabled. 1177 */ 1178 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1179 &eeprom_data); 1180 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1181 max_frame_size = ETHER_MAX_LEN; 1182 break; 1183 } 1184 /* FALL THROUGH */ 1185 1186 /* Limit Jumbo Frame size */ 1187 case e1000_82571: 1188 case e1000_82572: 1189 case e1000_82574: 1190 case e1000_pch_lpt: 1191 case e1000_pch_spt: 1192 case e1000_pch_cnp: 1193 case e1000_80003es2lan: 1194 max_frame_size = 9234; 1195 break; 1196 1197 default: 1198 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1199 break; 1200 } 1201 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1202 ETHER_CRC_LEN) { 1203 error = EINVAL; 1204 break; 1205 } 1206 1207 ifp->if_mtu = ifr->ifr_mtu; 1208 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1209 ETHER_CRC_LEN; 1210 1211 if (ifp->if_flags & IFF_RUNNING) 1212 emx_init(sc); 1213 break; 1214 1215 case SIOCSIFFLAGS: 1216 if (ifp->if_flags & IFF_UP) { 1217 if ((ifp->if_flags & IFF_RUNNING)) { 1218 if ((ifp->if_flags ^ sc->if_flags) & 1219 (IFF_PROMISC | IFF_ALLMULTI)) { 1220 emx_disable_promisc(sc); 1221 emx_set_promisc(sc); 1222 } 1223 } else { 1224 emx_init(sc); 1225 } 1226 } else if (ifp->if_flags & IFF_RUNNING) { 1227 emx_stop(sc); 1228 } 1229 sc->if_flags = ifp->if_flags; 1230 break; 1231 1232 case SIOCADDMULTI: 1233 case SIOCDELMULTI: 1234 if (ifp->if_flags & IFF_RUNNING) { 1235 emx_disable_intr(sc); 1236 emx_set_multi(sc); 1237 #ifdef IFPOLL_ENABLE 1238 if (!(ifp->if_flags & IFF_NPOLLING)) 1239 #endif 1240 emx_enable_intr(sc); 1241 } 1242 break; 1243 1244 case SIOCSIFMEDIA: 1245 /* Check SOL/IDER usage */ 1246 if (e1000_check_reset_block(&sc->hw)) { 1247 device_printf(sc->dev, "Media change is" 1248 " blocked due to SOL/IDER session.\n"); 1249 break; 1250 } 1251 /* FALL THROUGH */ 1252 1253 case SIOCGIFMEDIA: 1254 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1255 break; 1256 1257 case SIOCSIFCAP: 1258 reinit = 0; 1259 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1260 if (mask & IFCAP_RXCSUM) { 1261 ifp->if_capenable ^= IFCAP_RXCSUM; 1262 reinit = 1; 1263 } 1264 if (mask & IFCAP_VLAN_HWTAGGING) { 1265 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1266 reinit = 1; 1267 } 1268 if (mask & IFCAP_TXCSUM) { 1269 ifp->if_capenable ^= IFCAP_TXCSUM; 1270 if (ifp->if_capenable & IFCAP_TXCSUM) 1271 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1272 else 1273 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1274 } 1275 if (mask & IFCAP_TSO) { 1276 ifp->if_capenable ^= IFCAP_TSO; 1277 if (ifp->if_capenable & IFCAP_TSO) 1278 ifp->if_hwassist |= CSUM_TSO; 1279 else 1280 ifp->if_hwassist &= ~CSUM_TSO; 1281 } 1282 if (mask & IFCAP_RSS) 1283 ifp->if_capenable ^= IFCAP_RSS; 1284 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1285 emx_init(sc); 1286 break; 1287 1288 default: 1289 error = ether_ioctl(ifp, command, data); 1290 break; 1291 } 1292 return (error); 1293 } 1294 1295 static void 1296 emx_watchdog(struct ifaltq_subque *ifsq) 1297 { 1298 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1299 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1300 struct emx_softc *sc = ifp->if_softc; 1301 int i; 1302 1303 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1304 1305 /* 1306 * The timer is set to 5 every time start queues a packet. 1307 * Then txeof keeps resetting it as long as it cleans at 1308 * least one descriptor. 1309 * Finally, anytime all descriptors are clean the timer is 1310 * set to 0. 1311 */ 1312 1313 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1314 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1315 /* 1316 * If we reach here, all TX jobs are completed and 1317 * the TX engine should have been idled for some time. 1318 * We don't need to call ifsq_devstart_sched() here. 1319 */ 1320 ifsq_clr_oactive(ifsq); 1321 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0); 1322 return; 1323 } 1324 1325 /* 1326 * If we are in this routine because of pause frames, then 1327 * don't reset the hardware. 1328 */ 1329 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1330 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT); 1331 return; 1332 } 1333 1334 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1335 1336 IFNET_STAT_INC(ifp, oerrors, 1); 1337 1338 emx_init(sc); 1339 for (i = 0; i < sc->tx_ring_inuse; ++i) 1340 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1341 } 1342 1343 static void 1344 emx_init(void *xsc) 1345 { 1346 struct emx_softc *sc = xsc; 1347 struct ifnet *ifp = &sc->arpcom.ac_if; 1348 device_t dev = sc->dev; 1349 boolean_t polling; 1350 int i; 1351 1352 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1353 1354 emx_stop(sc); 1355 1356 /* Get the latest mac address, User can use a LAA */ 1357 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1358 1359 /* Put the address into the Receive Address Array */ 1360 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1361 1362 /* 1363 * With the 82571 sc, RAR[0] may be overwritten 1364 * when the other port is reset, we make a duplicate 1365 * in RAR[14] for that eventuality, this assures 1366 * the interface continues to function. 1367 */ 1368 if (sc->hw.mac.type == e1000_82571) { 1369 e1000_set_laa_state_82571(&sc->hw, TRUE); 1370 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1371 E1000_RAR_ENTRIES - 1); 1372 } 1373 1374 /* Initialize the hardware */ 1375 if (emx_reset(sc)) { 1376 device_printf(dev, "Unable to reset the hardware\n"); 1377 /* XXX emx_stop()? */ 1378 return; 1379 } 1380 emx_update_link_status(sc); 1381 1382 /* Setup VLAN support, basic and offload if available */ 1383 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1384 1385 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1386 uint32_t ctrl; 1387 1388 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1389 ctrl |= E1000_CTRL_VME; 1390 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1391 } 1392 1393 /* Configure for OS presence */ 1394 emx_get_mgmt(sc); 1395 1396 polling = FALSE; 1397 #ifdef IFPOLL_ENABLE 1398 if (ifp->if_flags & IFF_NPOLLING) 1399 polling = TRUE; 1400 #endif 1401 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1402 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1403 1404 /* Prepare transmit descriptors and buffers */ 1405 for (i = 0; i < sc->tx_ring_inuse; ++i) 1406 emx_init_tx_ring(&sc->tx_data[i]); 1407 emx_init_tx_unit(sc); 1408 1409 /* Setup Multicast table */ 1410 emx_set_multi(sc); 1411 1412 /* Prepare receive descriptors and buffers */ 1413 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1414 if (emx_init_rx_ring(&sc->rx_data[i])) { 1415 device_printf(dev, 1416 "Could not setup receive structures\n"); 1417 emx_stop(sc); 1418 return; 1419 } 1420 } 1421 emx_init_rx_unit(sc); 1422 1423 /* Don't lose promiscuous settings */ 1424 emx_set_promisc(sc); 1425 1426 /* Reset hardware counters */ 1427 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1428 1429 /* MSI/X configuration for 82574 */ 1430 if (sc->hw.mac.type == e1000_82574) { 1431 int tmp; 1432 1433 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1434 tmp |= E1000_CTRL_EXT_PBA_CLR; 1435 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1436 /* 1437 * XXX MSIX 1438 * Set the IVAR - interrupt vector routing. 1439 * Each nibble represents a vector, high bit 1440 * is enable, other 3 bits are the MSIX table 1441 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1442 * Link (other) to 2, hence the magic number. 1443 */ 1444 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1445 } 1446 1447 /* 1448 * Only enable interrupts if we are not polling, make sure 1449 * they are off otherwise. 1450 */ 1451 if (polling) 1452 emx_disable_intr(sc); 1453 else 1454 emx_enable_intr(sc); 1455 1456 /* AMT based hardware can now take control from firmware */ 1457 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1458 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1459 emx_get_hw_control(sc); 1460 1461 ifp->if_flags |= IFF_RUNNING; 1462 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1463 struct emx_txdata *tdata = &sc->tx_data[i]; 1464 1465 ifsq_clr_oactive(tdata->ifsq); 1466 ifsq_watchdog_start(&tdata->tx_watchdog); 1467 if (!polling) { 1468 callout_reset_bycpu(&tdata->tx_gc_timer, 1, 1469 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq)); 1470 } 1471 } 1472 callout_reset(&sc->timer, hz, emx_timer, sc); 1473 } 1474 1475 static void 1476 emx_intr(void *xsc) 1477 { 1478 emx_intr_body(xsc, TRUE); 1479 } 1480 1481 static void 1482 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1483 { 1484 struct ifnet *ifp = &sc->arpcom.ac_if; 1485 uint32_t reg_icr; 1486 1487 logif(intr_beg); 1488 ASSERT_SERIALIZED(&sc->main_serialize); 1489 1490 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1491 1492 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1493 logif(intr_end); 1494 return; 1495 } 1496 1497 /* 1498 * XXX: some laptops trigger several spurious interrupts 1499 * on emx(4) when in the resume cycle. The ICR register 1500 * reports all-ones value in this case. Processing such 1501 * interrupts would lead to a freeze. I don't know why. 1502 */ 1503 if (reg_icr == 0xffffffff) { 1504 logif(intr_end); 1505 return; 1506 } 1507 1508 if (ifp->if_flags & IFF_RUNNING) { 1509 if (reg_icr & 1510 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1511 int i; 1512 1513 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1514 lwkt_serialize_enter( 1515 &sc->rx_data[i].rx_serialize); 1516 emx_rxeof(&sc->rx_data[i], -1); 1517 lwkt_serialize_exit( 1518 &sc->rx_data[i].rx_serialize); 1519 } 1520 } 1521 if (reg_icr & E1000_ICR_TXDW) { 1522 struct emx_txdata *tdata = &sc->tx_data[0]; 1523 1524 lwkt_serialize_enter(&tdata->tx_serialize); 1525 emx_tx_intr(tdata); 1526 lwkt_serialize_exit(&tdata->tx_serialize); 1527 } 1528 } 1529 1530 /* Link status change */ 1531 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1532 emx_serialize_skipmain(sc); 1533 1534 callout_stop(&sc->timer); 1535 sc->hw.mac.get_link_status = 1; 1536 emx_update_link_status(sc); 1537 1538 /* Deal with TX cruft when link lost */ 1539 emx_tx_purge(sc); 1540 1541 callout_reset(&sc->timer, hz, emx_timer, sc); 1542 1543 emx_deserialize_skipmain(sc); 1544 } 1545 1546 if (reg_icr & E1000_ICR_RXO) 1547 sc->rx_overruns++; 1548 1549 logif(intr_end); 1550 } 1551 1552 static void 1553 emx_intr_mask(void *xsc) 1554 { 1555 struct emx_softc *sc = xsc; 1556 1557 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1558 /* 1559 * NOTE: 1560 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1561 * so don't check it. 1562 */ 1563 emx_intr_body(sc, FALSE); 1564 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1565 } 1566 1567 static void 1568 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1569 { 1570 struct emx_softc *sc = ifp->if_softc; 1571 1572 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1573 1574 emx_update_link_status(sc); 1575 1576 ifmr->ifm_status = IFM_AVALID; 1577 ifmr->ifm_active = IFM_ETHER; 1578 1579 if (!sc->link_active) { 1580 if (sc->hw.mac.autoneg) 1581 ifmr->ifm_active |= IFM_NONE; 1582 else 1583 ifmr->ifm_active |= sc->media.ifm_media; 1584 return; 1585 } 1586 1587 ifmr->ifm_status |= IFM_ACTIVE; 1588 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1589 ifmr->ifm_active |= sc->ifm_flowctrl; 1590 1591 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1592 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1593 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1594 } else { 1595 switch (sc->link_speed) { 1596 case 10: 1597 ifmr->ifm_active |= IFM_10_T; 1598 break; 1599 case 100: 1600 ifmr->ifm_active |= IFM_100_TX; 1601 break; 1602 1603 case 1000: 1604 ifmr->ifm_active |= IFM_1000_T; 1605 break; 1606 } 1607 if (sc->link_duplex == FULL_DUPLEX) 1608 ifmr->ifm_active |= IFM_FDX; 1609 else 1610 ifmr->ifm_active |= IFM_HDX; 1611 } 1612 if (ifmr->ifm_active & IFM_FDX) 1613 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1614 } 1615 1616 static int 1617 emx_media_change(struct ifnet *ifp) 1618 { 1619 struct emx_softc *sc = ifp->if_softc; 1620 struct ifmedia *ifm = &sc->media; 1621 1622 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1623 1624 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1625 return (EINVAL); 1626 1627 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1628 case IFM_AUTO: 1629 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1630 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1631 break; 1632 1633 case IFM_1000_SX: 1634 case IFM_1000_T: 1635 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1636 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1637 break; 1638 1639 case IFM_100_TX: 1640 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1641 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1642 } else { 1643 if (IFM_OPTIONS(ifm->ifm_media) & 1644 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1645 if (bootverbose) { 1646 if_printf(ifp, "Flow control is not " 1647 "allowed for half-duplex\n"); 1648 } 1649 return EINVAL; 1650 } 1651 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1652 } 1653 sc->hw.mac.autoneg = FALSE; 1654 sc->hw.phy.autoneg_advertised = 0; 1655 break; 1656 1657 case IFM_10_T: 1658 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1659 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1660 } else { 1661 if (IFM_OPTIONS(ifm->ifm_media) & 1662 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1663 if (bootverbose) { 1664 if_printf(ifp, "Flow control is not " 1665 "allowed for half-duplex\n"); 1666 } 1667 return EINVAL; 1668 } 1669 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1670 } 1671 sc->hw.mac.autoneg = FALSE; 1672 sc->hw.phy.autoneg_advertised = 0; 1673 break; 1674 1675 default: 1676 if (bootverbose) { 1677 if_printf(ifp, "Unsupported media type %d\n", 1678 IFM_SUBTYPE(ifm->ifm_media)); 1679 } 1680 return EINVAL; 1681 } 1682 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1683 1684 if (ifp->if_flags & IFF_RUNNING) 1685 emx_init(sc); 1686 1687 return (0); 1688 } 1689 1690 static int 1691 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1692 int *segs_used, int *idx) 1693 { 1694 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1695 bus_dmamap_t map; 1696 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1697 struct e1000_tx_desc *ctxd = NULL; 1698 struct mbuf *m_head = *m_headp; 1699 uint32_t txd_upper, txd_lower, cmd = 0; 1700 int maxsegs, nsegs, i, j, first, last = 0, error; 1701 1702 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1703 error = emx_tso_pullup(tdata, m_headp); 1704 if (error) 1705 return error; 1706 m_head = *m_headp; 1707 } 1708 1709 txd_upper = txd_lower = 0; 1710 1711 /* 1712 * Capture the first descriptor index, this descriptor 1713 * will have the index of the EOP which is the only one 1714 * that now gets a DONE bit writeback. 1715 */ 1716 first = tdata->next_avail_tx_desc; 1717 tx_buffer = &tdata->tx_buf[first]; 1718 tx_buffer_mapped = tx_buffer; 1719 map = tx_buffer->map; 1720 1721 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1722 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1723 if (maxsegs > EMX_MAX_SCATTER) 1724 maxsegs = EMX_MAX_SCATTER; 1725 1726 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1727 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1728 if (error) { 1729 m_freem(*m_headp); 1730 *m_headp = NULL; 1731 return error; 1732 } 1733 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1734 1735 m_head = *m_headp; 1736 tdata->tx_nsegs += nsegs; 1737 *segs_used += nsegs; 1738 1739 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1740 /* TSO will consume one TX desc */ 1741 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1742 tdata->tx_nsegs += i; 1743 *segs_used += i; 1744 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1745 /* TX csum offloading will consume one TX desc */ 1746 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1747 tdata->tx_nsegs += i; 1748 *segs_used += i; 1749 } 1750 1751 /* Handle VLAN tag */ 1752 if (m_head->m_flags & M_VLANTAG) { 1753 /* Set the vlan id. */ 1754 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1755 /* Tell hardware to add tag */ 1756 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1757 } 1758 1759 i = tdata->next_avail_tx_desc; 1760 1761 /* Set up our transmit descriptors */ 1762 for (j = 0; j < nsegs; j++) { 1763 tx_buffer = &tdata->tx_buf[i]; 1764 ctxd = &tdata->tx_desc_base[i]; 1765 1766 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1767 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1768 txd_lower | segs[j].ds_len); 1769 ctxd->upper.data = htole32(txd_upper); 1770 1771 last = i; 1772 if (++i == tdata->num_tx_desc) 1773 i = 0; 1774 } 1775 1776 tdata->next_avail_tx_desc = i; 1777 1778 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1779 tdata->num_tx_desc_avail -= nsegs; 1780 tdata->tx_nmbuf++; 1781 1782 tx_buffer->m_head = m_head; 1783 tx_buffer_mapped->map = tx_buffer->map; 1784 tx_buffer->map = map; 1785 1786 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1787 tdata->tx_nsegs = 0; 1788 1789 /* 1790 * Report Status (RS) is turned on 1791 * every tx_intr_nsegs descriptors. 1792 */ 1793 cmd = E1000_TXD_CMD_RS; 1794 1795 /* 1796 * Keep track of the descriptor, which will 1797 * be written back by hardware. 1798 */ 1799 tdata->tx_dd[tdata->tx_dd_tail] = last; 1800 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1801 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1802 } 1803 1804 /* 1805 * Last Descriptor of Packet needs End Of Packet (EOP) 1806 */ 1807 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1808 1809 /* 1810 * Defer TDT updating, until enough descriptors are setup 1811 */ 1812 *idx = i; 1813 1814 #ifdef EMX_TSS_DEBUG 1815 tdata->tx_pkts++; 1816 #endif 1817 1818 return (0); 1819 } 1820 1821 static void 1822 emx_set_promisc(struct emx_softc *sc) 1823 { 1824 struct ifnet *ifp = &sc->arpcom.ac_if; 1825 uint32_t reg_rctl; 1826 1827 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1828 1829 if (ifp->if_flags & IFF_PROMISC) { 1830 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1831 /* Turn this on if you want to see bad packets */ 1832 if (emx_debug_sbp) 1833 reg_rctl |= E1000_RCTL_SBP; 1834 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1835 } else if (ifp->if_flags & IFF_ALLMULTI) { 1836 reg_rctl |= E1000_RCTL_MPE; 1837 reg_rctl &= ~E1000_RCTL_UPE; 1838 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1839 } 1840 } 1841 1842 static void 1843 emx_disable_promisc(struct emx_softc *sc) 1844 { 1845 struct ifnet *ifp = &sc->arpcom.ac_if; 1846 uint32_t reg_rctl; 1847 int mcnt = 0; 1848 1849 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1850 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1851 1852 if (ifp->if_flags & IFF_ALLMULTI) { 1853 mcnt = EMX_MCAST_ADDR_MAX; 1854 } else { 1855 const struct ifmultiaddr *ifma; 1856 1857 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1858 if (ifma->ifma_addr->sa_family != AF_LINK) 1859 continue; 1860 if (mcnt == EMX_MCAST_ADDR_MAX) 1861 break; 1862 mcnt++; 1863 } 1864 } 1865 /* Don't disable if in MAX groups */ 1866 if (mcnt < EMX_MCAST_ADDR_MAX) 1867 reg_rctl &= ~E1000_RCTL_MPE; 1868 1869 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1870 } 1871 1872 static void 1873 emx_set_multi(struct emx_softc *sc) 1874 { 1875 struct ifnet *ifp = &sc->arpcom.ac_if; 1876 struct ifmultiaddr *ifma; 1877 uint32_t reg_rctl = 0; 1878 uint8_t *mta; 1879 int mcnt = 0; 1880 1881 mta = sc->mta; 1882 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1883 1884 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1885 if (ifma->ifma_addr->sa_family != AF_LINK) 1886 continue; 1887 1888 if (mcnt == EMX_MCAST_ADDR_MAX) 1889 break; 1890 1891 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1892 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1893 mcnt++; 1894 } 1895 1896 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1897 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1898 reg_rctl |= E1000_RCTL_MPE; 1899 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1900 } else { 1901 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1902 } 1903 } 1904 1905 /* 1906 * This routine checks for link status and updates statistics. 1907 */ 1908 static void 1909 emx_timer(void *xsc) 1910 { 1911 struct emx_softc *sc = xsc; 1912 struct ifnet *ifp = &sc->arpcom.ac_if; 1913 1914 lwkt_serialize_enter(&sc->main_serialize); 1915 1916 emx_update_link_status(sc); 1917 emx_update_stats(sc); 1918 1919 /* Reset LAA into RAR[0] on 82571 */ 1920 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1921 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1922 1923 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1924 emx_print_hw_stats(sc); 1925 1926 emx_smartspeed(sc); 1927 1928 callout_reset(&sc->timer, hz, emx_timer, sc); 1929 1930 lwkt_serialize_exit(&sc->main_serialize); 1931 } 1932 1933 static void 1934 emx_update_link_status(struct emx_softc *sc) 1935 { 1936 struct e1000_hw *hw = &sc->hw; 1937 struct ifnet *ifp = &sc->arpcom.ac_if; 1938 device_t dev = sc->dev; 1939 uint32_t link_check = 0; 1940 1941 /* Get the cached link value or read phy for real */ 1942 switch (hw->phy.media_type) { 1943 case e1000_media_type_copper: 1944 if (hw->mac.get_link_status) { 1945 if (hw->mac.type >= e1000_pch_spt) 1946 msec_delay(50); 1947 /* Do the work to read phy */ 1948 e1000_check_for_link(hw); 1949 link_check = !hw->mac.get_link_status; 1950 if (link_check) /* ESB2 fix */ 1951 e1000_cfg_on_link_up(hw); 1952 } else { 1953 link_check = TRUE; 1954 } 1955 break; 1956 1957 case e1000_media_type_fiber: 1958 e1000_check_for_link(hw); 1959 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1960 break; 1961 1962 case e1000_media_type_internal_serdes: 1963 e1000_check_for_link(hw); 1964 link_check = sc->hw.mac.serdes_has_link; 1965 break; 1966 1967 case e1000_media_type_unknown: 1968 default: 1969 break; 1970 } 1971 1972 /* Now check for a transition */ 1973 if (link_check && sc->link_active == 0) { 1974 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1975 &sc->link_duplex); 1976 1977 /* 1978 * Check if we should enable/disable SPEED_MODE bit on 1979 * 82571EB/82572EI 1980 */ 1981 if (sc->link_speed != SPEED_1000 && 1982 (hw->mac.type == e1000_82571 || 1983 hw->mac.type == e1000_82572)) { 1984 int tarc0; 1985 1986 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1987 tarc0 &= ~EMX_TARC_SPEED_MODE; 1988 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1989 } 1990 if (bootverbose) { 1991 char flowctrl[IFM_ETH_FC_STRLEN]; 1992 1993 e1000_fc2str(hw->fc.current_mode, flowctrl, 1994 sizeof(flowctrl)); 1995 device_printf(dev, "Link is up %d Mbps %s, " 1996 "Flow control: %s\n", 1997 sc->link_speed, 1998 (sc->link_duplex == FULL_DUPLEX) ? 1999 "Full Duplex" : "Half Duplex", 2000 flowctrl); 2001 } 2002 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 2003 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 2004 sc->link_active = 1; 2005 sc->smartspeed = 0; 2006 ifp->if_baudrate = sc->link_speed * 1000000; 2007 ifp->if_link_state = LINK_STATE_UP; 2008 if_link_state_change(ifp); 2009 } else if (!link_check && sc->link_active == 1) { 2010 ifp->if_baudrate = sc->link_speed = 0; 2011 sc->link_duplex = 0; 2012 if (bootverbose) 2013 device_printf(dev, "Link is Down\n"); 2014 sc->link_active = 0; 2015 ifp->if_link_state = LINK_STATE_DOWN; 2016 if_link_state_change(ifp); 2017 } 2018 } 2019 2020 static void 2021 emx_stop(struct emx_softc *sc) 2022 { 2023 struct ifnet *ifp = &sc->arpcom.ac_if; 2024 int i; 2025 2026 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2027 2028 emx_disable_intr(sc); 2029 2030 callout_stop(&sc->timer); 2031 2032 ifp->if_flags &= ~IFF_RUNNING; 2033 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2034 struct emx_txdata *tdata = &sc->tx_data[i]; 2035 2036 ifsq_clr_oactive(tdata->ifsq); 2037 ifsq_watchdog_stop(&tdata->tx_watchdog); 2038 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 2039 2040 tdata->tx_running = 0; 2041 callout_stop(&tdata->tx_gc_timer); 2042 } 2043 2044 /* I219 needs some special flushing to avoid hangs */ 2045 if (sc->hw.mac.type >= e1000_pch_spt) 2046 emx_flush_txrx_ring(sc); 2047 2048 /* 2049 * Disable multiple receive queues. 2050 * 2051 * NOTE: 2052 * We should disable multiple receive queues before 2053 * resetting the hardware. 2054 */ 2055 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 2056 2057 e1000_reset_hw(&sc->hw); 2058 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2059 2060 for (i = 0; i < sc->tx_ring_cnt; ++i) 2061 emx_free_tx_ring(&sc->tx_data[i]); 2062 for (i = 0; i < sc->rx_ring_cnt; ++i) 2063 emx_free_rx_ring(&sc->rx_data[i]); 2064 } 2065 2066 static int 2067 emx_reset(struct emx_softc *sc) 2068 { 2069 device_t dev = sc->dev; 2070 uint16_t rx_buffer_size; 2071 uint32_t pba; 2072 2073 /* Set up smart power down as default off on newer adapters. */ 2074 if (!emx_smart_pwr_down && 2075 (sc->hw.mac.type == e1000_82571 || 2076 sc->hw.mac.type == e1000_82572)) { 2077 uint16_t phy_tmp = 0; 2078 2079 /* Speed up time to link by disabling smart power down. */ 2080 e1000_read_phy_reg(&sc->hw, 2081 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2082 phy_tmp &= ~IGP02E1000_PM_SPD; 2083 e1000_write_phy_reg(&sc->hw, 2084 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2085 } 2086 2087 /* 2088 * Packet Buffer Allocation (PBA) 2089 * Writing PBA sets the receive portion of the buffer 2090 * the remainder is used for the transmit buffer. 2091 */ 2092 switch (sc->hw.mac.type) { 2093 /* Total Packet Buffer on these is 48K */ 2094 case e1000_82571: 2095 case e1000_82572: 2096 case e1000_80003es2lan: 2097 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2098 break; 2099 2100 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2101 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2102 break; 2103 2104 case e1000_82574: 2105 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2106 break; 2107 2108 case e1000_pch_lpt: 2109 case e1000_pch_spt: 2110 case e1000_pch_cnp: 2111 pba = E1000_PBA_26K; 2112 break; 2113 2114 default: 2115 /* Devices before 82547 had a Packet Buffer of 64K. */ 2116 if (sc->hw.mac.max_frame_size > 8192) 2117 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2118 else 2119 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2120 } 2121 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2122 2123 /* 2124 * These parameters control the automatic generation (Tx) and 2125 * response (Rx) to Ethernet PAUSE frames. 2126 * - High water mark should allow for at least two frames to be 2127 * received after sending an XOFF. 2128 * - Low water mark works best when it is very near the high water mark. 2129 * This allows the receiver to restart by sending XON when it has 2130 * drained a bit. Here we use an arbitary value of 1500 which will 2131 * restart after one full frame is pulled from the buffer. There 2132 * could be several smaller frames in the buffer and if so they will 2133 * not trigger the XON until their total number reduces the buffer 2134 * by 1500. 2135 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2136 */ 2137 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2138 2139 sc->hw.fc.high_water = rx_buffer_size - 2140 roundup2(sc->hw.mac.max_frame_size, 1024); 2141 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2142 2143 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2144 sc->hw.fc.send_xon = TRUE; 2145 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2146 2147 /* 2148 * Device specific overrides/settings 2149 */ 2150 if (sc->hw.mac.type == e1000_pch_lpt || 2151 sc->hw.mac.type == e1000_pch_spt || 2152 sc->hw.mac.type == e1000_pch_cnp) { 2153 sc->hw.fc.high_water = 0x5C20; 2154 sc->hw.fc.low_water = 0x5048; 2155 sc->hw.fc.pause_time = 0x0650; 2156 sc->hw.fc.refresh_time = 0x0400; 2157 /* Jumbos need adjusted PBA */ 2158 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2159 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2160 else 2161 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2162 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2163 sc->hw.fc.pause_time = 0xFFFF; 2164 } 2165 2166 /* I219 needs some special flushing to avoid hangs */ 2167 if (sc->hw.mac.type >= e1000_pch_spt) 2168 emx_flush_txrx_ring(sc); 2169 2170 /* Issue a global reset */ 2171 e1000_reset_hw(&sc->hw); 2172 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2173 emx_disable_aspm(sc); 2174 2175 if (e1000_init_hw(&sc->hw) < 0) { 2176 device_printf(dev, "Hardware Initialization Failed\n"); 2177 return (EIO); 2178 } 2179 2180 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2181 e1000_get_phy_info(&sc->hw); 2182 e1000_check_for_link(&sc->hw); 2183 2184 return (0); 2185 } 2186 2187 static void 2188 emx_setup_ifp(struct emx_softc *sc) 2189 { 2190 struct ifnet *ifp = &sc->arpcom.ac_if; 2191 int i; 2192 2193 if_initname(ifp, device_get_name(sc->dev), 2194 device_get_unit(sc->dev)); 2195 ifp->if_softc = sc; 2196 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2197 ifp->if_init = emx_init; 2198 ifp->if_ioctl = emx_ioctl; 2199 ifp->if_start = emx_start; 2200 #ifdef IFPOLL_ENABLE 2201 ifp->if_npoll = emx_npoll; 2202 #endif 2203 ifp->if_serialize = emx_serialize; 2204 ifp->if_deserialize = emx_deserialize; 2205 ifp->if_tryserialize = emx_tryserialize; 2206 #ifdef INVARIANTS 2207 ifp->if_serialize_assert = emx_serialize_assert; 2208 #endif 2209 2210 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2211 2212 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2213 ifq_set_ready(&ifp->if_snd); 2214 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2215 2216 ifp->if_mapsubq = ifq_mapsubq_modulo; 2217 ifq_set_subq_divisor(&ifp->if_snd, 1); 2218 2219 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2220 2221 ifp->if_capabilities = IFCAP_HWCSUM | 2222 IFCAP_VLAN_HWTAGGING | 2223 IFCAP_VLAN_MTU | 2224 IFCAP_TSO; 2225 if (sc->rx_ring_cnt > 1) 2226 ifp->if_capabilities |= IFCAP_RSS; 2227 ifp->if_capenable = ifp->if_capabilities; 2228 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2229 2230 /* 2231 * Tell the upper layer(s) we support long frames. 2232 */ 2233 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2234 2235 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2236 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2237 struct emx_txdata *tdata = &sc->tx_data[i]; 2238 2239 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2240 ifsq_set_priv(ifsq, tdata); 2241 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2242 tdata->ifsq = ifsq; 2243 2244 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog, 0); 2245 } 2246 2247 /* 2248 * Specify the media types supported by this sc and register 2249 * callbacks to update media and link information 2250 */ 2251 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2252 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2253 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2254 0, NULL); 2255 } else { 2256 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2257 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2258 0, NULL); 2259 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2260 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2261 0, NULL); 2262 if (sc->hw.phy.type != e1000_phy_ife) { 2263 ifmedia_add(&sc->media, 2264 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2265 } 2266 } 2267 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2268 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2269 } 2270 2271 /* 2272 * Workaround for SmartSpeed on 82541 and 82547 controllers 2273 */ 2274 static void 2275 emx_smartspeed(struct emx_softc *sc) 2276 { 2277 uint16_t phy_tmp; 2278 2279 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2280 sc->hw.mac.autoneg == 0 || 2281 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2282 return; 2283 2284 if (sc->smartspeed == 0) { 2285 /* 2286 * If Master/Slave config fault is asserted twice, 2287 * we assume back-to-back 2288 */ 2289 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2290 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2291 return; 2292 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2293 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2294 e1000_read_phy_reg(&sc->hw, 2295 PHY_1000T_CTRL, &phy_tmp); 2296 if (phy_tmp & CR_1000T_MS_ENABLE) { 2297 phy_tmp &= ~CR_1000T_MS_ENABLE; 2298 e1000_write_phy_reg(&sc->hw, 2299 PHY_1000T_CTRL, phy_tmp); 2300 sc->smartspeed++; 2301 if (sc->hw.mac.autoneg && 2302 !e1000_phy_setup_autoneg(&sc->hw) && 2303 !e1000_read_phy_reg(&sc->hw, 2304 PHY_CONTROL, &phy_tmp)) { 2305 phy_tmp |= MII_CR_AUTO_NEG_EN | 2306 MII_CR_RESTART_AUTO_NEG; 2307 e1000_write_phy_reg(&sc->hw, 2308 PHY_CONTROL, phy_tmp); 2309 } 2310 } 2311 } 2312 return; 2313 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2314 /* If still no link, perhaps using 2/3 pair cable */ 2315 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2316 phy_tmp |= CR_1000T_MS_ENABLE; 2317 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2318 if (sc->hw.mac.autoneg && 2319 !e1000_phy_setup_autoneg(&sc->hw) && 2320 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2321 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2322 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2323 } 2324 } 2325 2326 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2327 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2328 sc->smartspeed = 0; 2329 } 2330 2331 static int 2332 emx_create_tx_ring(struct emx_txdata *tdata) 2333 { 2334 device_t dev = tdata->sc->dev; 2335 struct emx_txbuf *tx_buffer; 2336 int error, i, tsize, ntxd; 2337 2338 /* 2339 * Validate number of transmit descriptors. It must not exceed 2340 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2341 */ 2342 ntxd = device_getenv_int(dev, "txd", emx_txd); 2343 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2344 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2345 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2346 EMX_DEFAULT_TXD, ntxd); 2347 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2348 } else { 2349 tdata->num_tx_desc = ntxd; 2350 } 2351 2352 /* 2353 * Allocate Transmit Descriptor ring 2354 */ 2355 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2356 EMX_DBA_ALIGN); 2357 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2358 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2359 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2360 &tdata->tx_desc_paddr); 2361 if (tdata->tx_desc_base == NULL) { 2362 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2363 return ENOMEM; 2364 } 2365 2366 tsize = __VM_CACHELINE_ALIGN( 2367 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2368 tdata->tx_buf = kmalloc(tsize, M_DEVBUF, 2369 M_WAITOK | M_ZERO | M_CACHEALIGN); 2370 2371 /* 2372 * Create DMA tags for tx buffers 2373 */ 2374 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2375 1, 0, /* alignment, bounds */ 2376 BUS_SPACE_MAXADDR, /* lowaddr */ 2377 BUS_SPACE_MAXADDR, /* highaddr */ 2378 NULL, NULL, /* filter, filterarg */ 2379 EMX_TSO_SIZE, /* maxsize */ 2380 EMX_MAX_SCATTER, /* nsegments */ 2381 EMX_MAX_SEGSIZE, /* maxsegsize */ 2382 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2383 BUS_DMA_ONEBPAGE, /* flags */ 2384 &tdata->txtag); 2385 if (error) { 2386 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2387 kfree(tdata->tx_buf, M_DEVBUF); 2388 tdata->tx_buf = NULL; 2389 return error; 2390 } 2391 2392 /* 2393 * Create DMA maps for tx buffers 2394 */ 2395 for (i = 0; i < tdata->num_tx_desc; i++) { 2396 tx_buffer = &tdata->tx_buf[i]; 2397 2398 error = bus_dmamap_create(tdata->txtag, 2399 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2400 &tx_buffer->map); 2401 if (error) { 2402 device_printf(dev, "Unable to create TX DMA map\n"); 2403 emx_destroy_tx_ring(tdata, i); 2404 return error; 2405 } 2406 } 2407 2408 /* 2409 * Setup TX parameters 2410 */ 2411 tdata->spare_tx_desc = EMX_TX_SPARE; 2412 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2413 2414 /* 2415 * Keep following relationship between spare_tx_desc, oact_tx_desc 2416 * and tx_intr_nsegs: 2417 * (spare_tx_desc + EMX_TX_RESERVED) <= 2418 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2419 */ 2420 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2421 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2422 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2423 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2424 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2425 2426 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2427 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2428 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2429 2430 /* 2431 * Pullup extra 4bytes into the first data segment for TSO, see: 2432 * 82571/82572 specification update errata #7 2433 * 2434 * Same applies to I217 (and maybe I218 and I219). 2435 * 2436 * NOTE: 2437 * 4bytes instead of 2bytes, which are mentioned in the errata, 2438 * are pulled; mainly to keep rest of the data properly aligned. 2439 */ 2440 if (tdata->sc->hw.mac.type == e1000_82571 || 2441 tdata->sc->hw.mac.type == e1000_82572 || 2442 tdata->sc->hw.mac.type == e1000_pch_lpt || 2443 tdata->sc->hw.mac.type == e1000_pch_spt || 2444 tdata->sc->hw.mac.type == e1000_pch_cnp) 2445 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2446 2447 return (0); 2448 } 2449 2450 static void 2451 emx_init_tx_ring(struct emx_txdata *tdata) 2452 { 2453 /* Clear the old ring contents */ 2454 bzero(tdata->tx_desc_base, 2455 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2456 2457 /* Reset state */ 2458 tdata->next_avail_tx_desc = 0; 2459 tdata->next_tx_to_clean = 0; 2460 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2461 tdata->tx_nmbuf = 0; 2462 tdata->tx_running = 0; 2463 2464 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2465 if (tdata->sc->tx_ring_inuse > 1) { 2466 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2467 if (bootverbose) { 2468 if_printf(&tdata->sc->arpcom.ac_if, 2469 "TX %d force ctx setup\n", tdata->idx); 2470 } 2471 } 2472 } 2473 2474 static void 2475 emx_init_tx_unit(struct emx_softc *sc) 2476 { 2477 uint32_t tctl, tarc, tipg = 0, txdctl; 2478 int i; 2479 2480 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2481 struct emx_txdata *tdata = &sc->tx_data[i]; 2482 uint64_t bus_addr; 2483 2484 /* Setup the Base and Length of the Tx Descriptor Ring */ 2485 bus_addr = tdata->tx_desc_paddr; 2486 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2487 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2488 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2489 (uint32_t)(bus_addr >> 32)); 2490 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2491 (uint32_t)bus_addr); 2492 /* Setup the HW Tx Head and Tail descriptor pointers */ 2493 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2494 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2495 2496 txdctl = 0x1f; /* PTHRESH */ 2497 txdctl |= 1 << 8; /* HTHRESH */ 2498 txdctl |= 1 << 16; /* WTHRESH */ 2499 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2500 txdctl |= E1000_TXDCTL_GRAN; 2501 txdctl |= 1 << 25; /* LWTHRESH */ 2502 2503 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl); 2504 } 2505 2506 /* Set the default values for the Tx Inter Packet Gap timer */ 2507 switch (sc->hw.mac.type) { 2508 case e1000_80003es2lan: 2509 tipg = DEFAULT_82543_TIPG_IPGR1; 2510 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2511 E1000_TIPG_IPGR2_SHIFT; 2512 break; 2513 2514 default: 2515 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2516 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2517 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2518 else 2519 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2520 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2521 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2522 break; 2523 } 2524 2525 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2526 2527 /* NOTE: 0 is not allowed for TIDV */ 2528 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2529 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2530 2531 /* 2532 * Errata workaround (obtained from Linux). This is necessary 2533 * to make multiple TX queues work on 82574. 2534 * XXX can't find it in any published errata though. 2535 */ 2536 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2537 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2538 2539 if (sc->hw.mac.type == e1000_82571 || 2540 sc->hw.mac.type == e1000_82572) { 2541 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2542 tarc |= EMX_TARC_SPEED_MODE; 2543 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2544 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2545 /* errata: program both queues to unweighted RR */ 2546 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2547 tarc |= 1; 2548 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2549 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2550 tarc |= 1; 2551 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2552 } else if (sc->hw.mac.type == e1000_82574) { 2553 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2554 tarc |= EMX_TARC_ERRATA; 2555 if (sc->tx_ring_inuse > 1) { 2556 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX); 2557 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2558 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2559 } else { 2560 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2561 } 2562 } 2563 2564 /* Program the Transmit Control Register */ 2565 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2566 tctl &= ~E1000_TCTL_CT; 2567 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2568 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2569 tctl |= E1000_TCTL_MULR; 2570 2571 /* This write will effectively turn on the transmit unit. */ 2572 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2573 2574 if (sc->hw.mac.type == e1000_82571 || 2575 sc->hw.mac.type == e1000_82572 || 2576 sc->hw.mac.type == e1000_80003es2lan) { 2577 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2578 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2579 tarc &= ~(1 << 28); 2580 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2581 } else if (sc->hw.mac.type >= e1000_pch_spt) { 2582 uint32_t reg; 2583 2584 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC); 2585 reg |= E1000_RCTL_RDMTS_HEX; 2586 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg); 2587 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2588 reg |= E1000_TARC0_CB_MULTIQ_3_REQ; 2589 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg); 2590 } 2591 2592 if (sc->tx_ring_inuse > 1) { 2593 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2594 tarc &= ~EMX_TARC_COUNT_MASK; 2595 tarc |= 1; 2596 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2597 2598 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2599 tarc &= ~EMX_TARC_COUNT_MASK; 2600 tarc |= 1; 2601 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2602 } 2603 } 2604 2605 static void 2606 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2607 { 2608 struct emx_txbuf *tx_buffer; 2609 int i; 2610 2611 /* Free Transmit Descriptor ring */ 2612 if (tdata->tx_desc_base) { 2613 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2614 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2615 tdata->tx_desc_dmap); 2616 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2617 2618 tdata->tx_desc_base = NULL; 2619 } 2620 2621 if (tdata->tx_buf == NULL) 2622 return; 2623 2624 for (i = 0; i < ndesc; i++) { 2625 tx_buffer = &tdata->tx_buf[i]; 2626 2627 KKASSERT(tx_buffer->m_head == NULL); 2628 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2629 } 2630 bus_dma_tag_destroy(tdata->txtag); 2631 2632 kfree(tdata->tx_buf, M_DEVBUF); 2633 tdata->tx_buf = NULL; 2634 } 2635 2636 /* 2637 * The offload context needs to be set when we transfer the first 2638 * packet of a particular protocol (TCP/UDP). This routine has been 2639 * enhanced to deal with inserted VLAN headers. 2640 * 2641 * If the new packet's ether header length, ip header length and 2642 * csum offloading type are same as the previous packet, we should 2643 * avoid allocating a new csum context descriptor; mainly to take 2644 * advantage of the pipeline effect of the TX data read request. 2645 * 2646 * This function returns number of TX descrptors allocated for 2647 * csum context. 2648 */ 2649 static int 2650 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2651 uint32_t *txd_upper, uint32_t *txd_lower) 2652 { 2653 struct e1000_context_desc *TXD; 2654 int curr_txd, ehdrlen, csum_flags; 2655 uint32_t cmd, hdr_len, ip_hlen; 2656 2657 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2658 ip_hlen = mp->m_pkthdr.csum_iphlen; 2659 ehdrlen = mp->m_pkthdr.csum_lhlen; 2660 2661 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2662 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2663 tdata->csum_flags == csum_flags) { 2664 /* 2665 * Same csum offload context as the previous packets; 2666 * just return. 2667 */ 2668 *txd_upper = tdata->csum_txd_upper; 2669 *txd_lower = tdata->csum_txd_lower; 2670 return 0; 2671 } 2672 2673 /* 2674 * Setup a new csum offload context. 2675 */ 2676 2677 curr_txd = tdata->next_avail_tx_desc; 2678 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2679 2680 cmd = 0; 2681 2682 /* Setup of IP header checksum. */ 2683 if (csum_flags & CSUM_IP) { 2684 /* 2685 * Start offset for header checksum calculation. 2686 * End offset for header checksum calculation. 2687 * Offset of place to put the checksum. 2688 */ 2689 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2690 TXD->lower_setup.ip_fields.ipcse = 2691 htole16(ehdrlen + ip_hlen - 1); 2692 TXD->lower_setup.ip_fields.ipcso = 2693 ehdrlen + offsetof(struct ip, ip_sum); 2694 cmd |= E1000_TXD_CMD_IP; 2695 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2696 } 2697 hdr_len = ehdrlen + ip_hlen; 2698 2699 if (csum_flags & CSUM_TCP) { 2700 /* 2701 * Start offset for payload checksum calculation. 2702 * End offset for payload checksum calculation. 2703 * Offset of place to put the checksum. 2704 */ 2705 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2706 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2707 TXD->upper_setup.tcp_fields.tucso = 2708 hdr_len + offsetof(struct tcphdr, th_sum); 2709 cmd |= E1000_TXD_CMD_TCP; 2710 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2711 } else if (csum_flags & CSUM_UDP) { 2712 /* 2713 * Start offset for header checksum calculation. 2714 * End offset for header checksum calculation. 2715 * Offset of place to put the checksum. 2716 */ 2717 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2718 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2719 TXD->upper_setup.tcp_fields.tucso = 2720 hdr_len + offsetof(struct udphdr, uh_sum); 2721 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2722 } 2723 2724 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2725 E1000_TXD_DTYP_D; /* Data descr */ 2726 2727 /* Save the information for this csum offloading context */ 2728 tdata->csum_lhlen = ehdrlen; 2729 tdata->csum_iphlen = ip_hlen; 2730 tdata->csum_flags = csum_flags; 2731 tdata->csum_txd_upper = *txd_upper; 2732 tdata->csum_txd_lower = *txd_lower; 2733 2734 TXD->tcp_seg_setup.data = htole32(0); 2735 TXD->cmd_and_length = 2736 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2737 2738 if (++curr_txd == tdata->num_tx_desc) 2739 curr_txd = 0; 2740 2741 KKASSERT(tdata->num_tx_desc_avail > 0); 2742 tdata->num_tx_desc_avail--; 2743 2744 tdata->next_avail_tx_desc = curr_txd; 2745 return 1; 2746 } 2747 2748 static void 2749 emx_txeof(struct emx_txdata *tdata) 2750 { 2751 struct emx_txbuf *tx_buffer; 2752 int first, num_avail; 2753 2754 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2755 return; 2756 2757 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2758 return; 2759 2760 num_avail = tdata->num_tx_desc_avail; 2761 first = tdata->next_tx_to_clean; 2762 2763 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2764 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2765 struct e1000_tx_desc *tx_desc; 2766 2767 tx_desc = &tdata->tx_desc_base[dd_idx]; 2768 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2769 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2770 2771 if (++dd_idx == tdata->num_tx_desc) 2772 dd_idx = 0; 2773 2774 while (first != dd_idx) { 2775 logif(pkt_txclean); 2776 2777 KKASSERT(num_avail < tdata->num_tx_desc); 2778 num_avail++; 2779 2780 tx_buffer = &tdata->tx_buf[first]; 2781 if (tx_buffer->m_head) 2782 emx_free_txbuf(tdata, tx_buffer); 2783 2784 if (++first == tdata->num_tx_desc) 2785 first = 0; 2786 } 2787 } else { 2788 break; 2789 } 2790 } 2791 tdata->next_tx_to_clean = first; 2792 tdata->num_tx_desc_avail = num_avail; 2793 2794 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2795 tdata->tx_dd_head = 0; 2796 tdata->tx_dd_tail = 0; 2797 } 2798 2799 if (!EMX_IS_OACTIVE(tdata)) { 2800 ifsq_clr_oactive(tdata->ifsq); 2801 2802 /* All clean, turn off the timer */ 2803 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2804 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0); 2805 } 2806 tdata->tx_running = EMX_TX_RUNNING; 2807 } 2808 2809 static void 2810 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc) 2811 { 2812 struct emx_txbuf *tx_buffer; 2813 int tdh, first, num_avail, dd_idx = -1; 2814 2815 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2816 return; 2817 2818 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2819 if (tdh == tdata->next_tx_to_clean) { 2820 if (gc && tdata->tx_nmbuf > 0) 2821 tdata->tx_running = EMX_TX_RUNNING; 2822 return; 2823 } 2824 if (gc) 2825 tdata->tx_gc++; 2826 2827 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2828 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2829 2830 num_avail = tdata->num_tx_desc_avail; 2831 first = tdata->next_tx_to_clean; 2832 2833 while (first != tdh) { 2834 logif(pkt_txclean); 2835 2836 KKASSERT(num_avail < tdata->num_tx_desc); 2837 num_avail++; 2838 2839 tx_buffer = &tdata->tx_buf[first]; 2840 if (tx_buffer->m_head) 2841 emx_free_txbuf(tdata, tx_buffer); 2842 2843 if (first == dd_idx) { 2844 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2845 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2846 tdata->tx_dd_head = 0; 2847 tdata->tx_dd_tail = 0; 2848 dd_idx = -1; 2849 } else { 2850 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2851 } 2852 } 2853 2854 if (++first == tdata->num_tx_desc) 2855 first = 0; 2856 } 2857 tdata->next_tx_to_clean = first; 2858 tdata->num_tx_desc_avail = num_avail; 2859 2860 if (!EMX_IS_OACTIVE(tdata)) { 2861 ifsq_clr_oactive(tdata->ifsq); 2862 2863 /* All clean, turn off the timer */ 2864 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2865 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0); 2866 } 2867 if (!gc || tdata->tx_nmbuf > 0) 2868 tdata->tx_running = EMX_TX_RUNNING; 2869 } 2870 2871 /* 2872 * When Link is lost sometimes there is work still in the TX ring 2873 * which will result in a watchdog, rather than allow that do an 2874 * attempted cleanup and then reinit here. Note that this has been 2875 * seens mostly with fiber adapters. 2876 */ 2877 static void 2878 emx_tx_purge(struct emx_softc *sc) 2879 { 2880 int i; 2881 2882 if (sc->link_active) 2883 return; 2884 2885 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2886 struct emx_txdata *tdata = &sc->tx_data[i]; 2887 2888 if (tdata->tx_watchdog.wd_timer) { 2889 emx_tx_collect(tdata, FALSE); 2890 if (tdata->tx_watchdog.wd_timer) { 2891 if_printf(&sc->arpcom.ac_if, 2892 "Link lost, TX pending, reinit\n"); 2893 emx_init(sc); 2894 return; 2895 } 2896 } 2897 } 2898 } 2899 2900 static int 2901 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2902 { 2903 struct mbuf *m; 2904 bus_dma_segment_t seg; 2905 bus_dmamap_t map; 2906 struct emx_rxbuf *rx_buffer; 2907 int error, nseg; 2908 2909 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2910 if (m == NULL) { 2911 if (init) { 2912 if_printf(&rdata->sc->arpcom.ac_if, 2913 "Unable to allocate RX mbuf\n"); 2914 } 2915 return (ENOBUFS); 2916 } 2917 m->m_len = m->m_pkthdr.len = MCLBYTES; 2918 2919 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2920 m_adj(m, ETHER_ALIGN); 2921 2922 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2923 rdata->rx_sparemap, m, 2924 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2925 if (error) { 2926 m_freem(m); 2927 if (init) { 2928 if_printf(&rdata->sc->arpcom.ac_if, 2929 "Unable to load RX mbuf\n"); 2930 } 2931 return (error); 2932 } 2933 2934 rx_buffer = &rdata->rx_buf[i]; 2935 if (rx_buffer->m_head != NULL) 2936 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2937 2938 map = rx_buffer->map; 2939 rx_buffer->map = rdata->rx_sparemap; 2940 rdata->rx_sparemap = map; 2941 2942 rx_buffer->m_head = m; 2943 rx_buffer->paddr = seg.ds_addr; 2944 2945 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2946 return (0); 2947 } 2948 2949 static int 2950 emx_create_rx_ring(struct emx_rxdata *rdata) 2951 { 2952 device_t dev = rdata->sc->dev; 2953 struct emx_rxbuf *rx_buffer; 2954 int i, error, rsize, nrxd; 2955 2956 /* 2957 * Validate number of receive descriptors. It must not exceed 2958 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2959 */ 2960 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2961 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2962 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2963 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2964 EMX_DEFAULT_RXD, nrxd); 2965 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2966 } else { 2967 rdata->num_rx_desc = nrxd; 2968 } 2969 2970 /* 2971 * Allocate Receive Descriptor ring 2972 */ 2973 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2974 EMX_DBA_ALIGN); 2975 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2976 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2977 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2978 &rdata->rx_desc_paddr); 2979 if (rdata->rx_desc == NULL) { 2980 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2981 return ENOMEM; 2982 } 2983 2984 rsize = __VM_CACHELINE_ALIGN( 2985 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2986 rdata->rx_buf = kmalloc(rsize, M_DEVBUF, 2987 M_WAITOK | M_ZERO | M_CACHEALIGN); 2988 2989 /* 2990 * Create DMA tag for rx buffers 2991 */ 2992 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2993 1, 0, /* alignment, bounds */ 2994 BUS_SPACE_MAXADDR, /* lowaddr */ 2995 BUS_SPACE_MAXADDR, /* highaddr */ 2996 NULL, NULL, /* filter, filterarg */ 2997 MCLBYTES, /* maxsize */ 2998 1, /* nsegments */ 2999 MCLBYTES, /* maxsegsize */ 3000 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 3001 &rdata->rxtag); 3002 if (error) { 3003 device_printf(dev, "Unable to allocate RX DMA tag\n"); 3004 kfree(rdata->rx_buf, M_DEVBUF); 3005 rdata->rx_buf = NULL; 3006 return error; 3007 } 3008 3009 /* 3010 * Create spare DMA map for rx buffers 3011 */ 3012 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 3013 &rdata->rx_sparemap); 3014 if (error) { 3015 device_printf(dev, "Unable to create spare RX DMA map\n"); 3016 bus_dma_tag_destroy(rdata->rxtag); 3017 kfree(rdata->rx_buf, M_DEVBUF); 3018 rdata->rx_buf = NULL; 3019 return error; 3020 } 3021 3022 /* 3023 * Create DMA maps for rx buffers 3024 */ 3025 for (i = 0; i < rdata->num_rx_desc; i++) { 3026 rx_buffer = &rdata->rx_buf[i]; 3027 3028 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 3029 &rx_buffer->map); 3030 if (error) { 3031 device_printf(dev, "Unable to create RX DMA map\n"); 3032 emx_destroy_rx_ring(rdata, i); 3033 return error; 3034 } 3035 } 3036 return (0); 3037 } 3038 3039 static void 3040 emx_free_rx_ring(struct emx_rxdata *rdata) 3041 { 3042 int i; 3043 3044 for (i = 0; i < rdata->num_rx_desc; i++) { 3045 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 3046 3047 if (rx_buffer->m_head != NULL) { 3048 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 3049 m_freem(rx_buffer->m_head); 3050 rx_buffer->m_head = NULL; 3051 } 3052 } 3053 3054 if (rdata->fmp != NULL) 3055 m_freem(rdata->fmp); 3056 rdata->fmp = NULL; 3057 rdata->lmp = NULL; 3058 } 3059 3060 static void 3061 emx_free_tx_ring(struct emx_txdata *tdata) 3062 { 3063 int i; 3064 3065 for (i = 0; i < tdata->num_tx_desc; i++) { 3066 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 3067 3068 if (tx_buffer->m_head != NULL) 3069 emx_free_txbuf(tdata, tx_buffer); 3070 } 3071 3072 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 3073 3074 tdata->csum_flags = 0; 3075 tdata->csum_lhlen = 0; 3076 tdata->csum_iphlen = 0; 3077 tdata->csum_thlen = 0; 3078 tdata->csum_mss = 0; 3079 tdata->csum_pktlen = 0; 3080 3081 tdata->tx_dd_head = 0; 3082 tdata->tx_dd_tail = 0; 3083 tdata->tx_nsegs = 0; 3084 } 3085 3086 static int 3087 emx_init_rx_ring(struct emx_rxdata *rdata) 3088 { 3089 int i, error; 3090 3091 /* Reset descriptor ring */ 3092 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 3093 3094 /* Allocate new ones. */ 3095 for (i = 0; i < rdata->num_rx_desc; i++) { 3096 error = emx_newbuf(rdata, i, 1); 3097 if (error) 3098 return (error); 3099 } 3100 3101 /* Setup our descriptor pointers */ 3102 rdata->next_rx_desc_to_check = 0; 3103 3104 return (0); 3105 } 3106 3107 static void 3108 emx_init_rx_unit(struct emx_softc *sc) 3109 { 3110 struct ifnet *ifp = &sc->arpcom.ac_if; 3111 uint64_t bus_addr; 3112 uint32_t rctl, itr, rfctl, rxcsum; 3113 int i; 3114 3115 /* 3116 * Make sure receives are disabled while setting 3117 * up the descriptor ring 3118 */ 3119 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 3120 /* Do not disable if ever enabled on this hardware */ 3121 if (sc->hw.mac.type != e1000_82574) 3122 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3123 3124 /* 3125 * Set the interrupt throttling rate. Value is calculated 3126 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3127 */ 3128 if (sc->int_throttle_ceil) 3129 itr = 1000000000 / 256 / sc->int_throttle_ceil; 3130 else 3131 itr = 0; 3132 emx_set_itr(sc, itr); 3133 3134 /* Use extended RX descriptor */ 3135 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL); 3136 rfctl |= E1000_RFCTL_EXTEN; 3137 /* Disable accelerated ackknowledge */ 3138 if (sc->hw.mac.type == e1000_82574) 3139 rfctl |= E1000_RFCTL_ACK_DIS; 3140 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 3141 3142 /* 3143 * Receive Checksum Offload for TCP and UDP 3144 * 3145 * Checksum offloading is also enabled if multiple receive 3146 * queue is to be supported, since we need it to figure out 3147 * packet type. 3148 */ 3149 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3150 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3151 sc->rx_ring_cnt > 1) { 3152 /* 3153 * NOTE: 3154 * PCSD must be enabled to enable multiple 3155 * receive queues. 3156 */ 3157 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3158 E1000_RXCSUM_PCSD; 3159 } else { 3160 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3161 E1000_RXCSUM_PCSD); 3162 } 3163 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3164 3165 /* 3166 * Configure multiple receive queue (RSS) 3167 */ 3168 if (sc->rx_ring_cnt > 1) { 3169 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3170 int r, j; 3171 3172 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3173 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3174 3175 /* 3176 * NOTE: 3177 * When we reach here, RSS has already been disabled 3178 * in emx_stop(), so we could safely configure RSS key 3179 * and redirect table. 3180 */ 3181 3182 /* 3183 * Configure RSS key 3184 */ 3185 toeplitz_get_key(key, sizeof(key)); 3186 for (i = 0; i < EMX_NRSSRK; ++i) { 3187 uint32_t rssrk; 3188 3189 rssrk = EMX_RSSRK_VAL(key, i); 3190 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3191 3192 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3193 } 3194 3195 /* 3196 * Configure RSS redirect table. 3197 */ 3198 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3199 EMX_RDRTABLE_SIZE); 3200 3201 r = 0; 3202 for (j = 0; j < EMX_NRETA; ++j) { 3203 uint32_t reta = 0; 3204 3205 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3206 uint32_t q; 3207 3208 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3209 reta |= q << (8 * i); 3210 ++r; 3211 } 3212 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3213 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3214 } 3215 3216 /* 3217 * Enable multiple receive queues. 3218 * Enable IPv4 RSS standard hash functions. 3219 * Disable RSS interrupt. 3220 */ 3221 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3222 E1000_MRQC_ENABLE_RSS_2Q | 3223 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3224 E1000_MRQC_RSS_FIELD_IPV4); 3225 } 3226 3227 /* 3228 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3229 * long latencies are observed, like Lenovo X60. This 3230 * change eliminates the problem, but since having positive 3231 * values in RDTR is a known source of problems on other 3232 * platforms another solution is being sought. 3233 */ 3234 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3235 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3236 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3237 } 3238 3239 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3240 struct emx_rxdata *rdata = &sc->rx_data[i]; 3241 3242 /* 3243 * Setup the Base and Length of the Rx Descriptor Ring 3244 */ 3245 bus_addr = rdata->rx_desc_paddr; 3246 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3247 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3248 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3249 (uint32_t)(bus_addr >> 32)); 3250 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3251 (uint32_t)bus_addr); 3252 3253 /* 3254 * Setup the HW Rx Head and Tail Descriptor Pointers 3255 */ 3256 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3257 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3258 sc->rx_data[i].num_rx_desc - 1); 3259 } 3260 3261 /* Set PTHRESH for improved jumbo performance */ 3262 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) { 3263 uint32_t rxdctl; 3264 3265 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3266 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i)); 3267 rxdctl |= 0x20; /* PTHRESH */ 3268 rxdctl |= 4 << 8; /* HTHRESH */ 3269 rxdctl |= 4 << 16; /* WTHRESH */ 3270 rxdctl |= 1 << 24; /* Switch to granularity */ 3271 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl); 3272 } 3273 } 3274 3275 if (sc->hw.mac.type >= e1000_pch2lan) { 3276 if (ifp->if_mtu > ETHERMTU) 3277 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3278 else 3279 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3280 } 3281 3282 /* Setup the Receive Control Register */ 3283 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3284 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3285 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3286 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3287 3288 /* Make sure VLAN Filters are off */ 3289 rctl &= ~E1000_RCTL_VFE; 3290 3291 /* Don't store bad paket */ 3292 rctl &= ~E1000_RCTL_SBP; 3293 3294 /* MCLBYTES */ 3295 rctl |= E1000_RCTL_SZ_2048; 3296 3297 if (ifp->if_mtu > ETHERMTU) 3298 rctl |= E1000_RCTL_LPE; 3299 else 3300 rctl &= ~E1000_RCTL_LPE; 3301 3302 /* Enable Receives */ 3303 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3304 } 3305 3306 static void 3307 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3308 { 3309 struct emx_rxbuf *rx_buffer; 3310 int i; 3311 3312 /* Free Receive Descriptor ring */ 3313 if (rdata->rx_desc) { 3314 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3315 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3316 rdata->rx_desc_dmap); 3317 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3318 3319 rdata->rx_desc = NULL; 3320 } 3321 3322 if (rdata->rx_buf == NULL) 3323 return; 3324 3325 for (i = 0; i < ndesc; i++) { 3326 rx_buffer = &rdata->rx_buf[i]; 3327 3328 KKASSERT(rx_buffer->m_head == NULL); 3329 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3330 } 3331 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3332 bus_dma_tag_destroy(rdata->rxtag); 3333 3334 kfree(rdata->rx_buf, M_DEVBUF); 3335 rdata->rx_buf = NULL; 3336 } 3337 3338 static void 3339 emx_rxeof(struct emx_rxdata *rdata, int count) 3340 { 3341 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3342 uint32_t staterr; 3343 emx_rxdesc_t *current_desc; 3344 struct mbuf *mp; 3345 int i, cpuid = mycpuid; 3346 3347 i = rdata->next_rx_desc_to_check; 3348 current_desc = &rdata->rx_desc[i]; 3349 staterr = le32toh(current_desc->rxd_staterr); 3350 3351 if (!(staterr & E1000_RXD_STAT_DD)) 3352 return; 3353 3354 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3355 struct pktinfo *pi = NULL, pi0; 3356 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3357 struct mbuf *m = NULL; 3358 int eop, len; 3359 3360 logif(pkt_receive); 3361 3362 mp = rx_buf->m_head; 3363 3364 /* 3365 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3366 * needs to access the last received byte in the mbuf. 3367 */ 3368 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3369 BUS_DMASYNC_POSTREAD); 3370 3371 len = le16toh(current_desc->rxd_length); 3372 if (staterr & E1000_RXD_STAT_EOP) { 3373 count--; 3374 eop = 1; 3375 } else { 3376 eop = 0; 3377 } 3378 3379 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3380 uint16_t vlan = 0; 3381 uint32_t mrq, rss_hash; 3382 3383 /* 3384 * Save several necessary information, 3385 * before emx_newbuf() destroy it. 3386 */ 3387 if ((staterr & E1000_RXD_STAT_VP) && eop) 3388 vlan = le16toh(current_desc->rxd_vlan); 3389 3390 mrq = le32toh(current_desc->rxd_mrq); 3391 rss_hash = le32toh(current_desc->rxd_rss); 3392 3393 EMX_RSS_DPRINTF(rdata->sc, 10, 3394 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3395 rdata->idx, mrq, rss_hash); 3396 3397 if (emx_newbuf(rdata, i, 0) != 0) { 3398 IFNET_STAT_INC(ifp, iqdrops, 1); 3399 goto discard; 3400 } 3401 3402 /* Assign correct length to the current fragment */ 3403 mp->m_len = len; 3404 3405 if (rdata->fmp == NULL) { 3406 mp->m_pkthdr.len = len; 3407 rdata->fmp = mp; /* Store the first mbuf */ 3408 rdata->lmp = mp; 3409 } else { 3410 /* 3411 * Chain mbuf's together 3412 */ 3413 rdata->lmp->m_next = mp; 3414 rdata->lmp = rdata->lmp->m_next; 3415 rdata->fmp->m_pkthdr.len += len; 3416 } 3417 3418 if (eop) { 3419 rdata->fmp->m_pkthdr.rcvif = ifp; 3420 IFNET_STAT_INC(ifp, ipackets, 1); 3421 3422 if (ifp->if_capenable & IFCAP_RXCSUM) 3423 emx_rxcsum(staterr, rdata->fmp); 3424 3425 if (staterr & E1000_RXD_STAT_VP) { 3426 rdata->fmp->m_pkthdr.ether_vlantag = 3427 vlan; 3428 rdata->fmp->m_flags |= M_VLANTAG; 3429 } 3430 m = rdata->fmp; 3431 rdata->fmp = NULL; 3432 rdata->lmp = NULL; 3433 3434 if (ifp->if_capenable & IFCAP_RSS) { 3435 pi = emx_rssinfo(m, &pi0, mrq, 3436 rss_hash, staterr); 3437 } 3438 #ifdef EMX_RSS_DEBUG 3439 rdata->rx_pkts++; 3440 #endif 3441 } 3442 } else { 3443 IFNET_STAT_INC(ifp, ierrors, 1); 3444 discard: 3445 emx_setup_rxdesc(current_desc, rx_buf); 3446 if (rdata->fmp != NULL) { 3447 m_freem(rdata->fmp); 3448 rdata->fmp = NULL; 3449 rdata->lmp = NULL; 3450 } 3451 m = NULL; 3452 } 3453 3454 if (m != NULL) 3455 ifp->if_input(ifp, m, pi, cpuid); 3456 3457 /* Advance our pointers to the next descriptor. */ 3458 if (++i == rdata->num_rx_desc) 3459 i = 0; 3460 3461 current_desc = &rdata->rx_desc[i]; 3462 staterr = le32toh(current_desc->rxd_staterr); 3463 } 3464 rdata->next_rx_desc_to_check = i; 3465 3466 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3467 if (--i < 0) 3468 i = rdata->num_rx_desc - 1; 3469 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3470 } 3471 3472 static void 3473 emx_enable_intr(struct emx_softc *sc) 3474 { 3475 uint32_t ims_mask = IMS_ENABLE_MASK; 3476 3477 lwkt_serialize_handler_enable(&sc->main_serialize); 3478 3479 #if 0 3480 if (sc->hw.mac.type == e1000_82574) { 3481 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3482 ims_mask |= EM_MSIX_MASK; 3483 } 3484 #endif 3485 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3486 } 3487 3488 static void 3489 emx_disable_intr(struct emx_softc *sc) 3490 { 3491 if (sc->hw.mac.type == e1000_82574) 3492 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3493 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3494 3495 lwkt_serialize_handler_disable(&sc->main_serialize); 3496 } 3497 3498 /* 3499 * Bit of a misnomer, what this really means is 3500 * to enable OS management of the system... aka 3501 * to disable special hardware management features 3502 */ 3503 static void 3504 emx_get_mgmt(struct emx_softc *sc) 3505 { 3506 /* A shared code workaround */ 3507 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3508 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3509 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3510 3511 /* disable hardware interception of ARP */ 3512 manc &= ~(E1000_MANC_ARP_EN); 3513 3514 /* enable receiving management packets to the host */ 3515 manc |= E1000_MANC_EN_MNG2HOST; 3516 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3517 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3518 manc2h |= E1000_MNG2HOST_PORT_623; 3519 manc2h |= E1000_MNG2HOST_PORT_664; 3520 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3521 3522 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3523 } 3524 } 3525 3526 /* 3527 * Give control back to hardware management 3528 * controller if there is one. 3529 */ 3530 static void 3531 emx_rel_mgmt(struct emx_softc *sc) 3532 { 3533 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3534 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3535 3536 /* re-enable hardware interception of ARP */ 3537 manc |= E1000_MANC_ARP_EN; 3538 manc &= ~E1000_MANC_EN_MNG2HOST; 3539 3540 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3541 } 3542 } 3543 3544 /* 3545 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3546 * For ASF and Pass Through versions of f/w this means that 3547 * the driver is loaded. For AMT version (only with 82573) 3548 * of the f/w this means that the network i/f is open. 3549 */ 3550 static void 3551 emx_get_hw_control(struct emx_softc *sc) 3552 { 3553 /* Let firmware know the driver has taken over */ 3554 if (sc->hw.mac.type == e1000_82573) { 3555 uint32_t swsm; 3556 3557 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3558 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3559 swsm | E1000_SWSM_DRV_LOAD); 3560 } else { 3561 uint32_t ctrl_ext; 3562 3563 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3564 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3565 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3566 } 3567 sc->flags |= EMX_FLAG_HW_CTRL; 3568 } 3569 3570 /* 3571 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3572 * For ASF and Pass Through versions of f/w this means that the 3573 * driver is no longer loaded. For AMT version (only with 82573) 3574 * of the f/w this means that the network i/f is closed. 3575 */ 3576 static void 3577 emx_rel_hw_control(struct emx_softc *sc) 3578 { 3579 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3580 return; 3581 sc->flags &= ~EMX_FLAG_HW_CTRL; 3582 3583 /* Let firmware taken over control of h/w */ 3584 if (sc->hw.mac.type == e1000_82573) { 3585 uint32_t swsm; 3586 3587 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3588 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3589 swsm & ~E1000_SWSM_DRV_LOAD); 3590 } else { 3591 uint32_t ctrl_ext; 3592 3593 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3594 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3595 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3596 } 3597 } 3598 3599 static int 3600 emx_is_valid_eaddr(const uint8_t *addr) 3601 { 3602 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3603 3604 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3605 return (FALSE); 3606 3607 return (TRUE); 3608 } 3609 3610 /* 3611 * Enable PCI Wake On Lan capability 3612 */ 3613 static void 3614 emx_enable_wol(device_t dev) 3615 { 3616 uint16_t cap, status; 3617 uint8_t id; 3618 3619 /* First find the capabilities pointer*/ 3620 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3621 3622 /* Read the PM Capabilities */ 3623 id = pci_read_config(dev, cap, 1); 3624 if (id != PCIY_PMG) /* Something wrong */ 3625 return; 3626 3627 /* 3628 * OK, we have the power capabilities, 3629 * so now get the status register 3630 */ 3631 cap += PCIR_POWER_STATUS; 3632 status = pci_read_config(dev, cap, 2); 3633 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3634 pci_write_config(dev, cap, status, 2); 3635 } 3636 3637 static void 3638 emx_update_stats(struct emx_softc *sc) 3639 { 3640 struct ifnet *ifp = &sc->arpcom.ac_if; 3641 3642 if (sc->hw.phy.media_type == e1000_media_type_copper || 3643 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3644 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3645 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3646 } 3647 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3648 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3649 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3650 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3651 3652 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3653 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3654 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3655 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3656 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3657 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3658 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3659 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3660 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3661 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3662 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3663 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3664 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3665 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3666 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3667 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3668 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3669 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3670 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3671 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3672 3673 /* For the 64-bit byte counters the low dword must be read first. */ 3674 /* Both registers clear on the read of the high dword */ 3675 3676 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3677 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3678 3679 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3680 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3681 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3682 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3683 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3684 3685 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3686 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3687 3688 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3689 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3690 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3691 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3692 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3693 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3694 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3695 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3696 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3697 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3698 3699 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3700 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3701 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3702 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3703 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3704 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3705 3706 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3707 3708 /* Rx Errors */ 3709 IFNET_STAT_SET(ifp, ierrors, 3710 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3711 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3712 3713 /* Tx Errors */ 3714 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3715 } 3716 3717 static void 3718 emx_print_debug_info(struct emx_softc *sc) 3719 { 3720 device_t dev = sc->dev; 3721 uint8_t *hw_addr = sc->hw.hw_addr; 3722 int i; 3723 3724 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3725 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3726 E1000_READ_REG(&sc->hw, E1000_CTRL), 3727 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3728 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3729 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3730 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3731 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3732 sc->hw.fc.high_water, sc->hw.fc.low_water); 3733 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3734 E1000_READ_REG(&sc->hw, E1000_TIDV), 3735 E1000_READ_REG(&sc->hw, E1000_TADV)); 3736 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3737 E1000_READ_REG(&sc->hw, E1000_RDTR), 3738 E1000_READ_REG(&sc->hw, E1000_RADV)); 3739 3740 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3741 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3742 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3743 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3744 } 3745 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3746 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3747 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3748 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3749 } 3750 3751 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3752 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3753 sc->tx_data[i].num_tx_desc_avail); 3754 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3755 sc->tx_data[i].tso_segments); 3756 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3757 sc->tx_data[i].tso_ctx_reused); 3758 } 3759 } 3760 3761 static void 3762 emx_print_hw_stats(struct emx_softc *sc) 3763 { 3764 device_t dev = sc->dev; 3765 3766 device_printf(dev, "Excessive collisions = %lld\n", 3767 (long long)sc->stats.ecol); 3768 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3769 device_printf(dev, "Symbol errors = %lld\n", 3770 (long long)sc->stats.symerrs); 3771 #endif 3772 device_printf(dev, "Sequence errors = %lld\n", 3773 (long long)sc->stats.sec); 3774 device_printf(dev, "Defer count = %lld\n", 3775 (long long)sc->stats.dc); 3776 device_printf(dev, "Missed Packets = %lld\n", 3777 (long long)sc->stats.mpc); 3778 device_printf(dev, "Receive No Buffers = %lld\n", 3779 (long long)sc->stats.rnbc); 3780 /* RLEC is inaccurate on some hardware, calculate our own. */ 3781 device_printf(dev, "Receive Length Errors = %lld\n", 3782 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3783 device_printf(dev, "Receive errors = %lld\n", 3784 (long long)sc->stats.rxerrc); 3785 device_printf(dev, "Crc errors = %lld\n", 3786 (long long)sc->stats.crcerrs); 3787 device_printf(dev, "Alignment errors = %lld\n", 3788 (long long)sc->stats.algnerrc); 3789 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3790 (long long)sc->stats.cexterr); 3791 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3792 device_printf(dev, "XON Rcvd = %lld\n", 3793 (long long)sc->stats.xonrxc); 3794 device_printf(dev, "XON Xmtd = %lld\n", 3795 (long long)sc->stats.xontxc); 3796 device_printf(dev, "XOFF Rcvd = %lld\n", 3797 (long long)sc->stats.xoffrxc); 3798 device_printf(dev, "XOFF Xmtd = %lld\n", 3799 (long long)sc->stats.xofftxc); 3800 device_printf(dev, "Good Packets Rcvd = %lld\n", 3801 (long long)sc->stats.gprc); 3802 device_printf(dev, "Good Packets Xmtd = %lld\n", 3803 (long long)sc->stats.gptc); 3804 } 3805 3806 static void 3807 emx_print_nvm_info(struct emx_softc *sc) 3808 { 3809 uint16_t eeprom_data; 3810 int i, j, row = 0; 3811 3812 /* Its a bit crude, but it gets the job done */ 3813 kprintf("\nInterface EEPROM Dump:\n"); 3814 kprintf("Offset\n0x0000 "); 3815 for (i = 0, j = 0; i < 32; i++, j++) { 3816 if (j == 8) { /* Make the offset block */ 3817 j = 0; ++row; 3818 kprintf("\n0x00%x0 ",row); 3819 } 3820 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3821 kprintf("%04x ", eeprom_data); 3822 } 3823 kprintf("\n"); 3824 } 3825 3826 static int 3827 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3828 { 3829 struct emx_softc *sc; 3830 struct ifnet *ifp; 3831 int error, result; 3832 3833 result = -1; 3834 error = sysctl_handle_int(oidp, &result, 0, req); 3835 if (error || !req->newptr) 3836 return (error); 3837 3838 sc = (struct emx_softc *)arg1; 3839 ifp = &sc->arpcom.ac_if; 3840 3841 ifnet_serialize_all(ifp); 3842 3843 if (result == 1) 3844 emx_print_debug_info(sc); 3845 3846 /* 3847 * This value will cause a hex dump of the 3848 * first 32 16-bit words of the EEPROM to 3849 * the screen. 3850 */ 3851 if (result == 2) 3852 emx_print_nvm_info(sc); 3853 3854 ifnet_deserialize_all(ifp); 3855 3856 return (error); 3857 } 3858 3859 static int 3860 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3861 { 3862 int error, result; 3863 3864 result = -1; 3865 error = sysctl_handle_int(oidp, &result, 0, req); 3866 if (error || !req->newptr) 3867 return (error); 3868 3869 if (result == 1) { 3870 struct emx_softc *sc = (struct emx_softc *)arg1; 3871 struct ifnet *ifp = &sc->arpcom.ac_if; 3872 3873 ifnet_serialize_all(ifp); 3874 emx_print_hw_stats(sc); 3875 ifnet_deserialize_all(ifp); 3876 } 3877 return (error); 3878 } 3879 3880 static void 3881 emx_add_sysctl(struct emx_softc *sc) 3882 { 3883 struct sysctl_ctx_list *ctx; 3884 struct sysctl_oid *tree; 3885 char pkt_desc[32]; 3886 int i; 3887 3888 ctx = device_get_sysctl_ctx(sc->dev); 3889 tree = device_get_sysctl_tree(sc->dev); 3890 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3891 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3892 emx_sysctl_debug_info, "I", "Debug Information"); 3893 3894 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3895 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3896 emx_sysctl_stats, "I", "Statistics"); 3897 3898 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3899 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3900 "# of RX descs"); 3901 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3902 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3903 "# of TX descs"); 3904 3905 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3906 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3907 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3908 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3909 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3910 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3911 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3912 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3913 emx_sysctl_tx_wreg_nsegs, "I", 3914 "# segments sent before write to hardware register"); 3915 3916 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3917 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3918 "# of RX rings"); 3919 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3920 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3921 "# of TX rings"); 3922 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3923 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3924 "# of TX rings used"); 3925 3926 #ifdef IFPOLL_ENABLE 3927 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3928 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3929 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3930 "TX polling CPU map"); 3931 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3932 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3933 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3934 "RX polling CPU map"); 3935 #endif 3936 3937 #ifdef EMX_RSS_DEBUG 3938 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3939 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3940 0, "RSS debug level"); 3941 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3942 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3943 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3944 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3945 "RXed packets"); 3946 } 3947 #endif 3948 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3949 #ifdef EMX_TSS_DEBUG 3950 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3951 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3952 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3953 "TXed packets"); 3954 #endif 3955 3956 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i); 3957 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3958 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0, 3959 "# of pending TX mbufs"); 3960 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i); 3961 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3962 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc, 3963 "# of TX desc GC"); 3964 } 3965 } 3966 3967 static int 3968 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3969 { 3970 struct emx_softc *sc = (void *)arg1; 3971 struct ifnet *ifp = &sc->arpcom.ac_if; 3972 int error, throttle; 3973 3974 throttle = sc->int_throttle_ceil; 3975 error = sysctl_handle_int(oidp, &throttle, 0, req); 3976 if (error || req->newptr == NULL) 3977 return error; 3978 if (throttle < 0 || throttle > 1000000000 / 256) 3979 return EINVAL; 3980 3981 if (throttle) { 3982 /* 3983 * Set the interrupt throttling rate in 256ns increments, 3984 * recalculate sysctl value assignment to get exact frequency. 3985 */ 3986 throttle = 1000000000 / 256 / throttle; 3987 3988 /* Upper 16bits of ITR is reserved and should be zero */ 3989 if (throttle & 0xffff0000) 3990 return EINVAL; 3991 } 3992 3993 ifnet_serialize_all(ifp); 3994 3995 if (throttle) 3996 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3997 else 3998 sc->int_throttle_ceil = 0; 3999 4000 if (ifp->if_flags & IFF_RUNNING) 4001 emx_set_itr(sc, throttle); 4002 4003 ifnet_deserialize_all(ifp); 4004 4005 if (bootverbose) { 4006 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 4007 sc->int_throttle_ceil); 4008 } 4009 return 0; 4010 } 4011 4012 static int 4013 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 4014 { 4015 struct emx_softc *sc = (void *)arg1; 4016 struct ifnet *ifp = &sc->arpcom.ac_if; 4017 struct emx_txdata *tdata = &sc->tx_data[0]; 4018 int error, segs; 4019 4020 segs = tdata->tx_intr_nsegs; 4021 error = sysctl_handle_int(oidp, &segs, 0, req); 4022 if (error || req->newptr == NULL) 4023 return error; 4024 if (segs <= 0) 4025 return EINVAL; 4026 4027 ifnet_serialize_all(ifp); 4028 4029 /* 4030 * Don't allow tx_intr_nsegs to become: 4031 * o Less the oact_tx_desc 4032 * o Too large that no TX desc will cause TX interrupt to 4033 * be generated (OACTIVE will never recover) 4034 * o Too small that will cause tx_dd[] overflow 4035 */ 4036 if (segs < tdata->oact_tx_desc || 4037 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 4038 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 4039 error = EINVAL; 4040 } else { 4041 int i; 4042 4043 error = 0; 4044 for (i = 0; i < sc->tx_ring_cnt; ++i) 4045 sc->tx_data[i].tx_intr_nsegs = segs; 4046 } 4047 4048 ifnet_deserialize_all(ifp); 4049 4050 return error; 4051 } 4052 4053 static int 4054 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4055 { 4056 struct emx_softc *sc = (void *)arg1; 4057 struct ifnet *ifp = &sc->arpcom.ac_if; 4058 int error, nsegs, i; 4059 4060 nsegs = sc->tx_data[0].tx_wreg_nsegs; 4061 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4062 if (error || req->newptr == NULL) 4063 return error; 4064 4065 ifnet_serialize_all(ifp); 4066 for (i = 0; i < sc->tx_ring_cnt; ++i) 4067 sc->tx_data[i].tx_wreg_nsegs =nsegs; 4068 ifnet_deserialize_all(ifp); 4069 4070 return 0; 4071 } 4072 4073 static int 4074 emx_dma_alloc(struct emx_softc *sc) 4075 { 4076 int error, i; 4077 4078 /* 4079 * Create top level busdma tag 4080 */ 4081 error = bus_dma_tag_create(NULL, 1, 0, 4082 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4083 NULL, NULL, 4084 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4085 0, &sc->parent_dtag); 4086 if (error) { 4087 device_printf(sc->dev, "could not create top level DMA tag\n"); 4088 return error; 4089 } 4090 4091 /* 4092 * Allocate transmit descriptors ring and buffers 4093 */ 4094 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4095 error = emx_create_tx_ring(&sc->tx_data[i]); 4096 if (error) { 4097 device_printf(sc->dev, 4098 "Could not setup transmit structures\n"); 4099 return error; 4100 } 4101 } 4102 4103 /* 4104 * Allocate receive descriptors ring and buffers 4105 */ 4106 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4107 error = emx_create_rx_ring(&sc->rx_data[i]); 4108 if (error) { 4109 device_printf(sc->dev, 4110 "Could not setup receive structures\n"); 4111 return error; 4112 } 4113 } 4114 return 0; 4115 } 4116 4117 static void 4118 emx_dma_free(struct emx_softc *sc) 4119 { 4120 int i; 4121 4122 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4123 emx_destroy_tx_ring(&sc->tx_data[i], 4124 sc->tx_data[i].num_tx_desc); 4125 } 4126 4127 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4128 emx_destroy_rx_ring(&sc->rx_data[i], 4129 sc->rx_data[i].num_rx_desc); 4130 } 4131 4132 /* Free top level busdma tag */ 4133 if (sc->parent_dtag != NULL) 4134 bus_dma_tag_destroy(sc->parent_dtag); 4135 } 4136 4137 static void 4138 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4139 { 4140 struct emx_softc *sc = ifp->if_softc; 4141 4142 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4143 } 4144 4145 static void 4146 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4147 { 4148 struct emx_softc *sc = ifp->if_softc; 4149 4150 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4151 } 4152 4153 static int 4154 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4155 { 4156 struct emx_softc *sc = ifp->if_softc; 4157 4158 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4159 } 4160 4161 static void 4162 emx_serialize_skipmain(struct emx_softc *sc) 4163 { 4164 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4165 } 4166 4167 static void 4168 emx_deserialize_skipmain(struct emx_softc *sc) 4169 { 4170 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4171 } 4172 4173 #ifdef INVARIANTS 4174 4175 static void 4176 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4177 boolean_t serialized) 4178 { 4179 struct emx_softc *sc = ifp->if_softc; 4180 4181 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4182 slz, serialized); 4183 } 4184 4185 #endif /* INVARIANTS */ 4186 4187 #ifdef IFPOLL_ENABLE 4188 4189 static void 4190 emx_npoll_status(struct ifnet *ifp) 4191 { 4192 struct emx_softc *sc = ifp->if_softc; 4193 uint32_t reg_icr; 4194 4195 ASSERT_SERIALIZED(&sc->main_serialize); 4196 4197 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4198 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4199 callout_stop(&sc->timer); 4200 sc->hw.mac.get_link_status = 1; 4201 emx_update_link_status(sc); 4202 callout_reset(&sc->timer, hz, emx_timer, sc); 4203 } 4204 } 4205 4206 static void 4207 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4208 { 4209 struct emx_txdata *tdata = arg; 4210 4211 ASSERT_SERIALIZED(&tdata->tx_serialize); 4212 4213 emx_tx_intr(tdata); 4214 emx_try_txgc(tdata, 1); 4215 } 4216 4217 static void 4218 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4219 { 4220 struct emx_rxdata *rdata = arg; 4221 4222 ASSERT_SERIALIZED(&rdata->rx_serialize); 4223 4224 emx_rxeof(rdata, cycle); 4225 } 4226 4227 static void 4228 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4229 { 4230 struct emx_softc *sc = ifp->if_softc; 4231 int i, txr_cnt; 4232 4233 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4234 4235 if (info) { 4236 int cpu; 4237 4238 info->ifpi_status.status_func = emx_npoll_status; 4239 info->ifpi_status.serializer = &sc->main_serialize; 4240 4241 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4242 for (i = 0; i < txr_cnt; ++i) { 4243 struct emx_txdata *tdata = &sc->tx_data[i]; 4244 4245 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4246 KKASSERT(cpu < netisr_ncpus); 4247 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4248 info->ifpi_tx[cpu].arg = tdata; 4249 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4250 ifsq_set_cpuid(tdata->ifsq, cpu); 4251 } 4252 4253 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4254 struct emx_rxdata *rdata = &sc->rx_data[i]; 4255 4256 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4257 KKASSERT(cpu < netisr_ncpus); 4258 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4259 info->ifpi_rx[cpu].arg = rdata; 4260 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4261 } 4262 } else { 4263 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4264 struct emx_txdata *tdata = &sc->tx_data[i]; 4265 4266 ifsq_set_cpuid(tdata->ifsq, 4267 rman_get_cpuid(sc->intr_res)); 4268 } 4269 } 4270 if (ifp->if_flags & IFF_RUNNING) 4271 emx_init(sc); 4272 } 4273 4274 #endif /* IFPOLL_ENABLE */ 4275 4276 static void 4277 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4278 { 4279 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4280 if (sc->hw.mac.type == e1000_82574) { 4281 int i; 4282 4283 /* 4284 * When using MSIX interrupts we need to 4285 * throttle using the EITR register 4286 */ 4287 for (i = 0; i < 4; ++i) 4288 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4289 } 4290 } 4291 4292 /* 4293 * Disable the L0s, 82574L Errata #20 4294 */ 4295 static void 4296 emx_disable_aspm(struct emx_softc *sc) 4297 { 4298 uint16_t link_cap, link_ctrl, disable; 4299 uint8_t pcie_ptr, reg; 4300 device_t dev = sc->dev; 4301 4302 switch (sc->hw.mac.type) { 4303 case e1000_82571: 4304 case e1000_82572: 4305 case e1000_82573: 4306 /* 4307 * 82573 specification update 4308 * errata #8 disable L0s 4309 * errata #41 disable L1 4310 * 4311 * 82571/82572 specification update 4312 # errata #13 disable L1 4313 * errata #68 disable L0s 4314 */ 4315 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4316 break; 4317 4318 case e1000_82574: 4319 /* 4320 * 82574 specification update errata #20 4321 * 4322 * There is no need to disable L1 4323 */ 4324 disable = PCIEM_LNKCTL_ASPM_L0S; 4325 break; 4326 4327 default: 4328 return; 4329 } 4330 4331 pcie_ptr = pci_get_pciecap_ptr(dev); 4332 if (pcie_ptr == 0) 4333 return; 4334 4335 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4336 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4337 return; 4338 4339 if (bootverbose) 4340 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4341 4342 reg = pcie_ptr + PCIER_LINKCTRL; 4343 link_ctrl = pci_read_config(dev, reg, 2); 4344 link_ctrl &= ~disable; 4345 pci_write_config(dev, reg, link_ctrl, 2); 4346 } 4347 4348 static int 4349 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4350 { 4351 int iphlen, hoff, thoff, ex = 0; 4352 struct mbuf *m; 4353 struct ip *ip; 4354 4355 m = *mp; 4356 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4357 4358 iphlen = m->m_pkthdr.csum_iphlen; 4359 thoff = m->m_pkthdr.csum_thlen; 4360 hoff = m->m_pkthdr.csum_lhlen; 4361 4362 KASSERT(iphlen > 0, ("invalid ip hlen")); 4363 KASSERT(thoff > 0, ("invalid tcp hlen")); 4364 KASSERT(hoff > 0, ("invalid ether hlen")); 4365 4366 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4367 ex = 4; 4368 4369 if (m->m_len < hoff + iphlen + thoff + ex) { 4370 m = m_pullup(m, hoff + iphlen + thoff + ex); 4371 if (m == NULL) { 4372 *mp = NULL; 4373 return ENOBUFS; 4374 } 4375 *mp = m; 4376 } 4377 ip = mtodoff(m, struct ip *, hoff); 4378 ip->ip_len = 0; 4379 4380 return 0; 4381 } 4382 4383 static int 4384 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4385 uint32_t *txd_upper, uint32_t *txd_lower) 4386 { 4387 struct e1000_context_desc *TXD; 4388 int hoff, iphlen, thoff, hlen; 4389 int mss, pktlen, curr_txd; 4390 4391 #ifdef EMX_TSO_DEBUG 4392 tdata->tso_segments++; 4393 #endif 4394 4395 iphlen = mp->m_pkthdr.csum_iphlen; 4396 thoff = mp->m_pkthdr.csum_thlen; 4397 hoff = mp->m_pkthdr.csum_lhlen; 4398 mss = mp->m_pkthdr.tso_segsz; 4399 pktlen = mp->m_pkthdr.len; 4400 4401 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4402 tdata->csum_flags == CSUM_TSO && 4403 tdata->csum_iphlen == iphlen && 4404 tdata->csum_lhlen == hoff && 4405 tdata->csum_thlen == thoff && 4406 tdata->csum_mss == mss && 4407 tdata->csum_pktlen == pktlen) { 4408 *txd_upper = tdata->csum_txd_upper; 4409 *txd_lower = tdata->csum_txd_lower; 4410 #ifdef EMX_TSO_DEBUG 4411 tdata->tso_ctx_reused++; 4412 #endif 4413 return 0; 4414 } 4415 hlen = hoff + iphlen + thoff; 4416 4417 /* 4418 * Setup a new TSO context. 4419 */ 4420 4421 curr_txd = tdata->next_avail_tx_desc; 4422 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4423 4424 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4425 E1000_TXD_DTYP_D | /* Data descr type */ 4426 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4427 4428 /* IP and/or TCP header checksum calculation and insertion. */ 4429 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4430 4431 /* 4432 * Start offset for header checksum calculation. 4433 * End offset for header checksum calculation. 4434 * Offset of place put the checksum. 4435 */ 4436 TXD->lower_setup.ip_fields.ipcss = hoff; 4437 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4438 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4439 4440 /* 4441 * Start offset for payload checksum calculation. 4442 * End offset for payload checksum calculation. 4443 * Offset of place to put the checksum. 4444 */ 4445 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4446 TXD->upper_setup.tcp_fields.tucse = 0; 4447 TXD->upper_setup.tcp_fields.tucso = 4448 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4449 4450 /* 4451 * Payload size per packet w/o any headers. 4452 * Length of all headers up to payload. 4453 */ 4454 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4455 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4456 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4457 E1000_TXD_CMD_DEXT | /* Extended descr */ 4458 E1000_TXD_CMD_TSE | /* TSE context */ 4459 E1000_TXD_CMD_IP | /* Do IP csum */ 4460 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4461 (pktlen - hlen)); /* Total len */ 4462 4463 /* Save the information for this TSO context */ 4464 tdata->csum_flags = CSUM_TSO; 4465 tdata->csum_lhlen = hoff; 4466 tdata->csum_iphlen = iphlen; 4467 tdata->csum_thlen = thoff; 4468 tdata->csum_mss = mss; 4469 tdata->csum_pktlen = pktlen; 4470 tdata->csum_txd_upper = *txd_upper; 4471 tdata->csum_txd_lower = *txd_lower; 4472 4473 if (++curr_txd == tdata->num_tx_desc) 4474 curr_txd = 0; 4475 4476 KKASSERT(tdata->num_tx_desc_avail > 0); 4477 tdata->num_tx_desc_avail--; 4478 4479 tdata->next_avail_tx_desc = curr_txd; 4480 return 1; 4481 } 4482 4483 static int 4484 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4485 { 4486 if (polling) 4487 return sc->tx_ring_cnt; 4488 else 4489 return 1; 4490 } 4491 4492 /* 4493 * Remove all descriptors from the TX ring. 4494 * 4495 * We want to clear all pending descriptors from the TX ring. Zeroing 4496 * happens when the HW reads the regs. We assign the ring itself as 4497 * the data of the next descriptor. We don't care about the data we 4498 * are about to reset the HW. 4499 */ 4500 static void 4501 emx_flush_tx_ring(struct emx_softc *sc) 4502 { 4503 struct e1000_hw *hw = &sc->hw; 4504 uint32_t tctl; 4505 int i; 4506 4507 tctl = E1000_READ_REG(hw, E1000_TCTL); 4508 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); 4509 4510 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4511 struct emx_txdata *tdata = &sc->tx_data[i]; 4512 struct e1000_tx_desc *txd; 4513 4514 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0) 4515 continue; 4516 4517 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++]; 4518 if (tdata->next_avail_tx_desc == tdata->num_tx_desc) 4519 tdata->next_avail_tx_desc = 0; 4520 4521 /* Just use the ring as a dummy buffer addr */ 4522 txd->buffer_addr = tdata->tx_desc_paddr; 4523 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512); 4524 txd->upper.data = 0; 4525 4526 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc); 4527 usec_delay(250); 4528 } 4529 } 4530 4531 /* 4532 * Remove all descriptors from the RX rings. 4533 * 4534 * Mark all descriptors in the RX rings as consumed and disable the RX rings. 4535 */ 4536 static void 4537 emx_flush_rx_ring(struct emx_softc *sc) 4538 { 4539 struct e1000_hw *hw = &sc->hw; 4540 uint32_t rctl; 4541 int i; 4542 4543 rctl = E1000_READ_REG(hw, E1000_RCTL); 4544 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4545 E1000_WRITE_FLUSH(hw); 4546 usec_delay(150); 4547 4548 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4549 uint32_t rxdctl; 4550 4551 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 4552 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4553 rxdctl &= 0xffffc000; 4554 /* 4555 * Update thresholds: prefetch threshold to 31, host threshold 4556 * to 1 and make sure the granularity is "descriptors" and not 4557 * "cache lines". 4558 */ 4559 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 4560 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 4561 } 4562 4563 /* Momentarily enable the RX rings for the changes to take effect */ 4564 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); 4565 E1000_WRITE_FLUSH(hw); 4566 usec_delay(150); 4567 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4568 } 4569 4570 /* 4571 * Remove all descriptors from the descriptor rings. 4572 * 4573 * In i219, the descriptor rings must be emptied before resetting the HW 4574 * or before changing the device state to D3 during runtime (runtime PM). 4575 * 4576 * Failure to do this will cause the HW to enter a unit hang state which 4577 * can only be released by PCI reset on the device. 4578 */ 4579 static void 4580 emx_flush_txrx_ring(struct emx_softc *sc) 4581 { 4582 struct e1000_hw *hw = &sc->hw; 4583 device_t dev = sc->dev; 4584 uint16_t hang_state; 4585 uint32_t fext_nvm11, tdlen; 4586 int i; 4587 4588 /* 4589 * First, disable MULR fix in FEXTNVM11. 4590 */ 4591 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 4592 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 4593 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 4594 4595 /* 4596 * Do nothing if we're not in faulty state, or if the queue is 4597 * empty. 4598 */ 4599 tdlen = 0; 4600 for (i = 0; i < sc->tx_ring_inuse; ++i) 4601 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i)); 4602 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4603 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen) 4604 emx_flush_tx_ring(sc); 4605 4606 /* 4607 * Recheck, maybe the fault is caused by the RX ring. 4608 */ 4609 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4610 if (hang_state & EMX_FLUSH_DESC_REQUIRED) 4611 emx_flush_rx_ring(sc); 4612 } 4613