1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 EMX_DEVICE(PCH_LBG_I219_LM3), 179 EMX_DEVICE(PCH_SPT_I219_LM4), 180 EMX_DEVICE(PCH_SPT_I219_V4), 181 EMX_DEVICE(PCH_SPT_I219_LM5), 182 EMX_DEVICE(PCH_SPT_I219_V5), 183 EMX_DEVICE(PCH_CNP_I219_LM6), 184 EMX_DEVICE(PCH_CNP_I219_V6), 185 EMX_DEVICE(PCH_CNP_I219_LM7), 186 EMX_DEVICE(PCH_CNP_I219_V7), 187 EMX_DEVICE(PCH_CNP_I219_LM8), 188 EMX_DEVICE(PCH_CNP_I219_V8), 189 EMX_DEVICE(PCH_CNP_I219_LM9), 190 EMX_DEVICE(PCH_CNP_I219_V9), 191 EMX_DEVICE(PCH_CNP_I219_LM10), 192 EMX_DEVICE(PCH_CNP_I219_V10), 193 EMX_DEVICE(PCH_CNP_I219_LM11), 194 EMX_DEVICE(PCH_CNP_I219_V11), 195 EMX_DEVICE(PCH_CNP_I219_LM12), 196 EMX_DEVICE(PCH_CNP_I219_V12), 197 EMX_DEVICE(PCH_CNP_I219_LM13), 198 EMX_DEVICE(PCH_CNP_I219_V13), 199 EMX_DEVICE(PCH_CNP_I219_LM14), 200 EMX_DEVICE(PCH_CNP_I219_V14), 201 202 /* required last entry */ 203 EMX_DEVICE_NULL 204 }; 205 206 static int emx_probe(device_t); 207 static int emx_attach(device_t); 208 static int emx_detach(device_t); 209 static int emx_shutdown(device_t); 210 static int emx_suspend(device_t); 211 static int emx_resume(device_t); 212 213 static void emx_init(void *); 214 static void emx_stop(struct emx_softc *); 215 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 216 static void emx_start(struct ifnet *, struct ifaltq_subque *); 217 #ifdef IFPOLL_ENABLE 218 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 219 static void emx_npoll_status(struct ifnet *); 220 static void emx_npoll_tx(struct ifnet *, void *, int); 221 static void emx_npoll_rx(struct ifnet *, void *, int); 222 #endif 223 static void emx_watchdog(struct ifaltq_subque *); 224 static void emx_media_status(struct ifnet *, struct ifmediareq *); 225 static int emx_media_change(struct ifnet *); 226 static void emx_timer(void *); 227 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 228 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 229 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 230 #ifdef INVARIANTS 231 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 232 boolean_t); 233 #endif 234 235 static void emx_intr(void *); 236 static void emx_intr_mask(void *); 237 static void emx_intr_body(struct emx_softc *, boolean_t); 238 static void emx_rxeof(struct emx_rxdata *, int); 239 static void emx_txeof(struct emx_txdata *); 240 static void emx_tx_collect(struct emx_txdata *, boolean_t); 241 static void emx_txgc_timer(void *); 242 static void emx_tx_purge(struct emx_softc *); 243 static void emx_enable_intr(struct emx_softc *); 244 static void emx_disable_intr(struct emx_softc *); 245 246 static int emx_dma_alloc(struct emx_softc *); 247 static void emx_dma_free(struct emx_softc *); 248 static void emx_init_tx_ring(struct emx_txdata *); 249 static int emx_init_rx_ring(struct emx_rxdata *); 250 static void emx_free_tx_ring(struct emx_txdata *); 251 static void emx_free_rx_ring(struct emx_rxdata *); 252 static int emx_create_tx_ring(struct emx_txdata *); 253 static int emx_create_rx_ring(struct emx_rxdata *); 254 static void emx_destroy_tx_ring(struct emx_txdata *, int); 255 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 256 static int emx_newbuf(struct emx_rxdata *, int, int); 257 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 258 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 259 uint32_t *, uint32_t *); 260 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 261 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 262 uint32_t *, uint32_t *); 263 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 264 265 static int emx_is_valid_eaddr(const uint8_t *); 266 static int emx_reset(struct emx_softc *); 267 static void emx_setup_ifp(struct emx_softc *); 268 static void emx_init_tx_unit(struct emx_softc *); 269 static void emx_init_rx_unit(struct emx_softc *); 270 static void emx_update_stats(struct emx_softc *); 271 static void emx_set_promisc(struct emx_softc *); 272 static void emx_disable_promisc(struct emx_softc *); 273 static void emx_set_multi(struct emx_softc *); 274 static void emx_update_link_status(struct emx_softc *); 275 static void emx_smartspeed(struct emx_softc *); 276 static void emx_set_itr(struct emx_softc *, uint32_t); 277 static void emx_disable_aspm(struct emx_softc *); 278 static void emx_flush_tx_ring(struct emx_softc *); 279 static void emx_flush_rx_ring(struct emx_softc *); 280 static void emx_flush_txrx_ring(struct emx_softc *); 281 282 static void emx_print_debug_info(struct emx_softc *); 283 static void emx_print_nvm_info(struct emx_softc *); 284 static void emx_print_hw_stats(struct emx_softc *); 285 286 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 287 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 288 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 289 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 290 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 291 static void emx_add_sysctl(struct emx_softc *); 292 293 static void emx_serialize_skipmain(struct emx_softc *); 294 static void emx_deserialize_skipmain(struct emx_softc *); 295 296 /* Management and WOL Support */ 297 static void emx_get_mgmt(struct emx_softc *); 298 static void emx_rel_mgmt(struct emx_softc *); 299 static void emx_get_hw_control(struct emx_softc *); 300 static void emx_rel_hw_control(struct emx_softc *); 301 static void emx_enable_wol(device_t); 302 303 static device_method_t emx_methods[] = { 304 /* Device interface */ 305 DEVMETHOD(device_probe, emx_probe), 306 DEVMETHOD(device_attach, emx_attach), 307 DEVMETHOD(device_detach, emx_detach), 308 DEVMETHOD(device_shutdown, emx_shutdown), 309 DEVMETHOD(device_suspend, emx_suspend), 310 DEVMETHOD(device_resume, emx_resume), 311 DEVMETHOD_END 312 }; 313 314 static driver_t emx_driver = { 315 "emx", 316 emx_methods, 317 sizeof(struct emx_softc), 318 }; 319 320 static devclass_t emx_devclass; 321 322 DECLARE_DUMMY_MODULE(if_emx); 323 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 324 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 325 326 /* 327 * Tunables 328 */ 329 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 330 static int emx_rxd = EMX_DEFAULT_RXD; 331 static int emx_txd = EMX_DEFAULT_TXD; 332 static int emx_smart_pwr_down = 0; 333 static int emx_rxr = 0; 334 static int emx_txr = 1; 335 336 /* Controls whether promiscuous also shows bad packets */ 337 static int emx_debug_sbp = 0; 338 339 static int emx_82573_workaround = 1; 340 static int emx_msi_enable = 1; 341 342 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 343 344 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 345 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 346 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 347 TUNABLE_INT("hw.emx.txd", &emx_txd); 348 TUNABLE_INT("hw.emx.txr", &emx_txr); 349 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 350 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 351 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 352 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 353 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 354 355 /* Global used in WOL setup with multiport cards */ 356 static int emx_global_quad_port_a = 0; 357 358 /* Set this to one to display debug statistics */ 359 static int emx_display_debug_stats = 0; 360 361 #if !defined(KTR_IF_EMX) 362 #define KTR_IF_EMX KTR_ALL 363 #endif 364 KTR_INFO_MASTER(if_emx); 365 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 366 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 367 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 368 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 369 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 370 #define logif(name) KTR_LOG(if_emx_ ## name) 371 372 static __inline void 373 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 374 { 375 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 376 /* DD bit must be cleared */ 377 rxd->rxd_staterr = 0; 378 } 379 380 static __inline void 381 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer) 382 { 383 384 KKASSERT(tx_buffer->m_head != NULL); 385 KKASSERT(tdata->tx_nmbuf > 0); 386 tdata->tx_nmbuf--; 387 388 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 389 m_freem(tx_buffer->m_head); 390 tx_buffer->m_head = NULL; 391 } 392 393 static __inline void 394 emx_tx_intr(struct emx_txdata *tdata) 395 { 396 397 emx_txeof(tdata); 398 if (!ifsq_is_empty(tdata->ifsq)) 399 ifsq_devstart(tdata->ifsq); 400 } 401 402 static __inline void 403 emx_try_txgc(struct emx_txdata *tdata, int16_t dec) 404 { 405 406 if (tdata->tx_running > 0) { 407 tdata->tx_running -= dec; 408 if (tdata->tx_running <= 0 && tdata->tx_nmbuf && 409 tdata->num_tx_desc_avail < tdata->num_tx_desc && 410 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs > 411 tdata->num_tx_desc) 412 emx_tx_collect(tdata, TRUE); 413 } 414 } 415 416 static void 417 emx_txgc_timer(void *xtdata) 418 { 419 struct emx_txdata *tdata = xtdata; 420 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 421 422 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 423 (IFF_RUNNING | IFF_UP)) 424 return; 425 426 if (!lwkt_serialize_try(&tdata->tx_serialize)) 427 goto done; 428 429 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 430 (IFF_RUNNING | IFF_UP)) { 431 lwkt_serialize_exit(&tdata->tx_serialize); 432 return; 433 } 434 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC); 435 436 lwkt_serialize_exit(&tdata->tx_serialize); 437 done: 438 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata); 439 } 440 441 static __inline void 442 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 443 { 444 /* Ignore Checksum bit is set */ 445 if (staterr & E1000_RXD_STAT_IXSM) 446 return; 447 448 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 449 E1000_RXD_STAT_IPCS) 450 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 451 452 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 453 E1000_RXD_STAT_TCPCS) { 454 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 455 CSUM_PSEUDO_HDR | 456 CSUM_FRAG_NOT_CHECKED; 457 mp->m_pkthdr.csum_data = htons(0xffff); 458 } 459 } 460 461 static __inline struct pktinfo * 462 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 463 uint32_t mrq, uint32_t hash, uint32_t staterr) 464 { 465 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 466 case EMX_RXDMRQ_IPV4_TCP: 467 pi->pi_netisr = NETISR_IP; 468 pi->pi_flags = 0; 469 pi->pi_l3proto = IPPROTO_TCP; 470 break; 471 472 case EMX_RXDMRQ_IPV6_TCP: 473 pi->pi_netisr = NETISR_IPV6; 474 pi->pi_flags = 0; 475 pi->pi_l3proto = IPPROTO_TCP; 476 break; 477 478 case EMX_RXDMRQ_IPV4: 479 if (staterr & E1000_RXD_STAT_IXSM) 480 return NULL; 481 482 if ((staterr & 483 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 484 E1000_RXD_STAT_TCPCS) { 485 pi->pi_netisr = NETISR_IP; 486 pi->pi_flags = 0; 487 pi->pi_l3proto = IPPROTO_UDP; 488 break; 489 } 490 /* FALL THROUGH */ 491 default: 492 return NULL; 493 } 494 495 m_sethash(m, toeplitz_hash(hash)); 496 return pi; 497 } 498 499 static int 500 emx_probe(device_t dev) 501 { 502 const struct emx_device *d; 503 uint16_t vid, did; 504 505 vid = pci_get_vendor(dev); 506 did = pci_get_device(dev); 507 508 for (d = emx_devices; d->desc != NULL; ++d) { 509 if (vid == d->vid && did == d->did) { 510 device_set_desc(dev, d->desc); 511 device_set_async_attach(dev, TRUE); 512 return 0; 513 } 514 } 515 return ENXIO; 516 } 517 518 static int 519 emx_attach(device_t dev) 520 { 521 struct emx_softc *sc = device_get_softc(dev); 522 int error = 0, i, throttle, msi_enable; 523 int tx_ring_max, ring_cnt; 524 u_int intr_flags; 525 uint16_t eeprom_data, device_id, apme_mask; 526 driver_intr_t *intr_func; 527 char flowctrl[IFM_ETH_FC_STRLEN]; 528 529 /* 530 * Setup RX rings 531 */ 532 for (i = 0; i < EMX_NRX_RING; ++i) { 533 sc->rx_data[i].sc = sc; 534 sc->rx_data[i].idx = i; 535 } 536 537 /* 538 * Setup TX ring 539 */ 540 for (i = 0; i < EMX_NTX_RING; ++i) { 541 sc->tx_data[i].sc = sc; 542 sc->tx_data[i].idx = i; 543 callout_init_mp(&sc->tx_data[i].tx_gc_timer); 544 } 545 546 /* 547 * Initialize serializers 548 */ 549 lwkt_serialize_init(&sc->main_serialize); 550 for (i = 0; i < EMX_NTX_RING; ++i) 551 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 552 for (i = 0; i < EMX_NRX_RING; ++i) 553 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 554 555 /* 556 * Initialize serializer array 557 */ 558 i = 0; 559 560 KKASSERT(i < EMX_NSERIALIZE); 561 sc->serializes[i++] = &sc->main_serialize; 562 563 KKASSERT(i < EMX_NSERIALIZE); 564 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 565 KKASSERT(i < EMX_NSERIALIZE); 566 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 567 568 KKASSERT(i < EMX_NSERIALIZE); 569 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 570 KKASSERT(i < EMX_NSERIALIZE); 571 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 572 573 KKASSERT(i == EMX_NSERIALIZE); 574 575 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 576 emx_media_change, emx_media_status); 577 callout_init_mp(&sc->timer); 578 579 sc->dev = sc->osdep.dev = dev; 580 581 /* 582 * Determine hardware and mac type 583 */ 584 sc->hw.vendor_id = pci_get_vendor(dev); 585 sc->hw.device_id = pci_get_device(dev); 586 sc->hw.revision_id = pci_get_revid(dev); 587 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 588 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 589 590 if (e1000_set_mac_type(&sc->hw)) 591 return ENXIO; 592 593 /* Enable bus mastering */ 594 pci_enable_busmaster(dev); 595 596 /* 597 * Allocate IO memory 598 */ 599 sc->memory_rid = EMX_BAR_MEM; 600 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 601 &sc->memory_rid, RF_ACTIVE); 602 if (sc->memory == NULL) { 603 device_printf(dev, "Unable to allocate bus resource: memory\n"); 604 error = ENXIO; 605 goto fail; 606 } 607 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 608 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 609 610 /* XXX This is quite goofy, it is not actually used */ 611 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 612 613 /* 614 * Don't enable MSI-X on 82574, see: 615 * 82574 specification update errata #15 616 * 617 * Don't enable MSI on 82571/82572, see: 618 * 82571/82572 specification update errata #63 619 */ 620 msi_enable = emx_msi_enable; 621 if (msi_enable && 622 (sc->hw.mac.type == e1000_82571 || 623 sc->hw.mac.type == e1000_82572)) 624 msi_enable = 0; 625 again: 626 /* 627 * Allocate interrupt 628 */ 629 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 630 &sc->intr_rid, &intr_flags); 631 632 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 633 int unshared; 634 635 unshared = device_getenv_int(dev, "irq.unshared", 0); 636 if (!unshared) { 637 sc->flags |= EMX_FLAG_SHARED_INTR; 638 if (bootverbose) 639 device_printf(dev, "IRQ shared\n"); 640 } else { 641 intr_flags &= ~RF_SHAREABLE; 642 if (bootverbose) 643 device_printf(dev, "IRQ unshared\n"); 644 } 645 } 646 647 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 648 intr_flags); 649 if (sc->intr_res == NULL) { 650 device_printf(dev, "Unable to allocate bus resource: %s\n", 651 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 652 if (!msi_enable) { 653 /* Retry with MSI. */ 654 msi_enable = 1; 655 sc->flags &= ~EMX_FLAG_SHARED_INTR; 656 goto again; 657 } 658 error = ENXIO; 659 goto fail; 660 } 661 662 /* Save PCI command register for Shared Code */ 663 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 664 sc->hw.back = &sc->osdep; 665 666 /* 667 * For I217/I218, we need to map the flash memory and this 668 * must happen after the MAC is identified. 669 */ 670 if (sc->hw.mac.type == e1000_pch_lpt) { 671 sc->flash_rid = EMX_BAR_FLASH; 672 673 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 674 &sc->flash_rid, RF_ACTIVE); 675 if (sc->flash == NULL) { 676 device_printf(dev, "Mapping of Flash failed\n"); 677 error = ENXIO; 678 goto fail; 679 } 680 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 681 sc->osdep.flash_bus_space_handle = 682 rman_get_bushandle(sc->flash); 683 684 /* 685 * This is used in the shared code 686 * XXX this goof is actually not used. 687 */ 688 sc->hw.flash_address = (uint8_t *)sc->flash; 689 } else if (sc->hw.mac.type >= e1000_pch_spt) { 690 /* 691 * In the new SPT device flash is not a seperate BAR, 692 * rather it is also in BAR0, so use the same tag and 693 * an offset handle for the FLASH read/write macros 694 * in the shared code. 695 */ 696 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag; 697 sc->osdep.flash_bus_space_handle = 698 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; 699 } 700 701 /* Do Shared Code initialization */ 702 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 703 device_printf(dev, "Setup of Shared code failed\n"); 704 error = ENXIO; 705 goto fail; 706 } 707 e1000_get_bus_info(&sc->hw); 708 709 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 710 sc->hw.phy.autoneg_wait_to_complete = FALSE; 711 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 712 713 /* 714 * Interrupt throttle rate 715 */ 716 throttle = device_getenv_int(dev, "int_throttle_ceil", 717 emx_int_throttle_ceil); 718 if (throttle == 0) { 719 sc->int_throttle_ceil = 0; 720 } else { 721 if (throttle < 0) 722 throttle = EMX_DEFAULT_ITR; 723 724 /* Recalculate the tunable value to get the exact frequency. */ 725 throttle = 1000000000 / 256 / throttle; 726 727 /* Upper 16bits of ITR is reserved and should be zero */ 728 if (throttle & 0xffff0000) 729 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 730 731 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 732 } 733 734 e1000_init_script_state_82541(&sc->hw, TRUE); 735 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 736 737 /* Copper options */ 738 if (sc->hw.phy.media_type == e1000_media_type_copper) { 739 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 740 sc->hw.phy.disable_polarity_correction = FALSE; 741 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 742 } 743 744 /* Set the frame limits assuming standard ethernet sized frames. */ 745 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 746 747 /* This controls when hardware reports transmit completion status. */ 748 sc->hw.mac.report_tx_early = 1; 749 750 /* 751 * Calculate # of RX/TX rings 752 */ 753 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 754 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 755 756 tx_ring_max = 1; 757 if (sc->hw.mac.type == e1000_82571 || 758 sc->hw.mac.type == e1000_82572 || 759 sc->hw.mac.type == e1000_80003es2lan || 760 sc->hw.mac.type == e1000_pch_lpt || 761 sc->hw.mac.type == e1000_pch_spt || 762 sc->hw.mac.type == e1000_pch_cnp || 763 sc->hw.mac.type == e1000_82574) 764 tx_ring_max = EMX_NTX_RING; 765 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 766 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 767 768 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 769 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 770 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 771 772 /* Allocate RX/TX rings' busdma(9) stuffs */ 773 error = emx_dma_alloc(sc); 774 if (error) 775 goto fail; 776 777 /* Allocate multicast array memory. */ 778 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 779 M_DEVBUF, M_WAITOK); 780 781 /* Indicate SOL/IDER usage */ 782 if (e1000_check_reset_block(&sc->hw)) { 783 device_printf(dev, 784 "PHY reset is blocked due to SOL/IDER session.\n"); 785 } 786 787 /* Disable EEE on I217/I218 */ 788 sc->hw.dev_spec.ich8lan.eee_disable = 1; 789 790 /* 791 * Start from a known state, this is important in reading the 792 * nvm and mac from that. 793 */ 794 e1000_reset_hw(&sc->hw); 795 796 /* Make sure we have a good EEPROM before we read from it */ 797 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 798 /* 799 * Some PCI-E parts fail the first check due to 800 * the link being in sleep state, call it again, 801 * if it fails a second time its a real issue. 802 */ 803 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 804 device_printf(dev, 805 "The EEPROM Checksum Is Not Valid\n"); 806 error = EIO; 807 goto fail; 808 } 809 } 810 811 /* Copy the permanent MAC address out of the EEPROM */ 812 if (e1000_read_mac_addr(&sc->hw) < 0) { 813 device_printf(dev, "EEPROM read error while reading MAC" 814 " address\n"); 815 error = EIO; 816 goto fail; 817 } 818 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 819 device_printf(dev, "Invalid MAC address\n"); 820 error = EIO; 821 goto fail; 822 } 823 824 /* Disable ULP support */ 825 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 826 827 /* Determine if we have to control management hardware */ 828 if (e1000_enable_mng_pass_thru(&sc->hw)) 829 sc->flags |= EMX_FLAG_HAS_MGMT; 830 831 /* 832 * Setup Wake-on-Lan 833 */ 834 apme_mask = EMX_EEPROM_APME; 835 eeprom_data = 0; 836 switch (sc->hw.mac.type) { 837 case e1000_82573: 838 sc->flags |= EMX_FLAG_HAS_AMT; 839 /* FALL THROUGH */ 840 841 case e1000_82571: 842 case e1000_82572: 843 case e1000_80003es2lan: 844 if (sc->hw.bus.func == 1) { 845 e1000_read_nvm(&sc->hw, 846 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 847 } else { 848 e1000_read_nvm(&sc->hw, 849 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 850 } 851 break; 852 853 case e1000_pch_lpt: 854 case e1000_pch_spt: 855 case e1000_pch_cnp: 856 apme_mask = E1000_WUC_APME; 857 sc->flags |= EMX_FLAG_HAS_AMT; 858 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC); 859 break; 860 861 default: 862 e1000_read_nvm(&sc->hw, 863 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 864 break; 865 } 866 if (eeprom_data & apme_mask) 867 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 868 869 /* 870 * We have the eeprom settings, now apply the special cases 871 * where the eeprom may be wrong or the board won't support 872 * wake on lan on a particular port 873 */ 874 device_id = pci_get_device(dev); 875 switch (device_id) { 876 case E1000_DEV_ID_82571EB_FIBER: 877 /* 878 * Wake events only supported on port A for dual fiber 879 * regardless of eeprom setting 880 */ 881 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 882 E1000_STATUS_FUNC_1) 883 sc->wol = 0; 884 break; 885 886 case E1000_DEV_ID_82571EB_QUAD_COPPER: 887 case E1000_DEV_ID_82571EB_QUAD_FIBER: 888 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 889 /* if quad port sc, disable WoL on all but port A */ 890 if (emx_global_quad_port_a != 0) 891 sc->wol = 0; 892 /* Reset for multiple quad port adapters */ 893 if (++emx_global_quad_port_a == 4) 894 emx_global_quad_port_a = 0; 895 break; 896 } 897 898 /* XXX disable wol */ 899 sc->wol = 0; 900 901 /* Initialized #of TX rings to use. */ 902 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 903 904 /* Setup flow control. */ 905 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 906 emx_flowctrl); 907 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 908 909 /* Setup OS specific network interface */ 910 emx_setup_ifp(sc); 911 912 /* Add sysctl tree, must after em_setup_ifp() */ 913 emx_add_sysctl(sc); 914 915 /* Reset the hardware */ 916 error = emx_reset(sc); 917 if (error) { 918 /* 919 * Some 82573 parts fail the first reset, call it again, 920 * if it fails a second time its a real issue. 921 */ 922 error = emx_reset(sc); 923 if (error) { 924 device_printf(dev, "Unable to reset the hardware\n"); 925 ether_ifdetach(&sc->arpcom.ac_if); 926 goto fail; 927 } 928 } 929 930 /* Initialize statistics */ 931 emx_update_stats(sc); 932 933 sc->hw.mac.get_link_status = 1; 934 emx_update_link_status(sc); 935 936 /* Non-AMT based hardware can now take control from firmware */ 937 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 938 EMX_FLAG_HAS_MGMT) 939 emx_get_hw_control(sc); 940 941 /* 942 * Missing Interrupt Following ICR read: 943 * 944 * 82571/82572 specification update errata #76 945 * 82573 specification update errata #31 946 * 82574 specification update errata #12 947 */ 948 intr_func = emx_intr; 949 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 950 (sc->hw.mac.type == e1000_82571 || 951 sc->hw.mac.type == e1000_82572 || 952 sc->hw.mac.type == e1000_82573 || 953 sc->hw.mac.type == e1000_82574)) 954 intr_func = emx_intr_mask; 955 956 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 957 &sc->intr_tag, &sc->main_serialize); 958 if (error) { 959 device_printf(dev, "Failed to register interrupt handler"); 960 ether_ifdetach(&sc->arpcom.ac_if); 961 goto fail; 962 } 963 return (0); 964 fail: 965 emx_detach(dev); 966 return (error); 967 } 968 969 static int 970 emx_detach(device_t dev) 971 { 972 struct emx_softc *sc = device_get_softc(dev); 973 974 if (device_is_attached(dev)) { 975 struct ifnet *ifp = &sc->arpcom.ac_if; 976 977 ifnet_serialize_all(ifp); 978 979 emx_stop(sc); 980 981 e1000_phy_hw_reset(&sc->hw); 982 983 emx_rel_mgmt(sc); 984 emx_rel_hw_control(sc); 985 986 if (sc->wol) { 987 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 988 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 989 emx_enable_wol(dev); 990 } 991 992 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 993 994 ifnet_deserialize_all(ifp); 995 996 ether_ifdetach(ifp); 997 } else if (sc->memory != NULL) { 998 emx_rel_hw_control(sc); 999 } 1000 1001 ifmedia_removeall(&sc->media); 1002 bus_generic_detach(dev); 1003 1004 if (sc->intr_res != NULL) { 1005 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 1006 sc->intr_res); 1007 } 1008 1009 if (sc->intr_type == PCI_INTR_TYPE_MSI) 1010 pci_release_msi(dev); 1011 1012 if (sc->memory != NULL) { 1013 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 1014 sc->memory); 1015 } 1016 1017 if (sc->flash != NULL) { 1018 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 1019 sc->flash); 1020 } 1021 1022 emx_dma_free(sc); 1023 1024 if (sc->mta != NULL) 1025 kfree(sc->mta, M_DEVBUF); 1026 1027 if (sc->rx_rmap != NULL) 1028 if_ringmap_free(sc->rx_rmap); 1029 if (sc->tx_rmap != NULL) 1030 if_ringmap_free(sc->tx_rmap); 1031 1032 return (0); 1033 } 1034 1035 static int 1036 emx_shutdown(device_t dev) 1037 { 1038 return emx_suspend(dev); 1039 } 1040 1041 static int 1042 emx_suspend(device_t dev) 1043 { 1044 struct emx_softc *sc = device_get_softc(dev); 1045 struct ifnet *ifp = &sc->arpcom.ac_if; 1046 1047 ifnet_serialize_all(ifp); 1048 1049 emx_stop(sc); 1050 1051 emx_rel_mgmt(sc); 1052 emx_rel_hw_control(sc); 1053 1054 if (sc->wol) { 1055 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 1056 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 1057 emx_enable_wol(dev); 1058 } 1059 1060 ifnet_deserialize_all(ifp); 1061 1062 return bus_generic_suspend(dev); 1063 } 1064 1065 static int 1066 emx_resume(device_t dev) 1067 { 1068 struct emx_softc *sc = device_get_softc(dev); 1069 struct ifnet *ifp = &sc->arpcom.ac_if; 1070 int i; 1071 1072 ifnet_serialize_all(ifp); 1073 1074 emx_init(sc); 1075 emx_get_mgmt(sc); 1076 for (i = 0; i < sc->tx_ring_inuse; ++i) 1077 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1078 1079 ifnet_deserialize_all(ifp); 1080 1081 return bus_generic_resume(dev); 1082 } 1083 1084 static void 1085 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1086 { 1087 struct emx_softc *sc = ifp->if_softc; 1088 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1089 struct mbuf *m_head; 1090 int idx = -1, nsegs = 0; 1091 1092 KKASSERT(tdata->ifsq == ifsq); 1093 ASSERT_SERIALIZED(&tdata->tx_serialize); 1094 1095 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1096 return; 1097 1098 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1099 ifsq_purge(ifsq); 1100 return; 1101 } 1102 1103 while (!ifsq_is_empty(ifsq)) { 1104 /* Now do we at least have a minimal? */ 1105 if (EMX_IS_OACTIVE(tdata)) { 1106 emx_tx_collect(tdata, FALSE); 1107 if (EMX_IS_OACTIVE(tdata)) { 1108 ifsq_set_oactive(ifsq); 1109 break; 1110 } 1111 } 1112 1113 logif(pkt_txqueue); 1114 m_head = ifsq_dequeue(ifsq); 1115 if (m_head == NULL) 1116 break; 1117 1118 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1119 IFNET_STAT_INC(ifp, oerrors, 1); 1120 emx_tx_collect(tdata, FALSE); 1121 continue; 1122 } 1123 1124 /* 1125 * TX interrupt are aggressively aggregated, so increasing 1126 * opackets at TX interrupt time will make the opackets 1127 * statistics vastly inaccurate; we do the opackets increment 1128 * now. 1129 */ 1130 IFNET_STAT_INC(ifp, opackets, 1); 1131 1132 if (nsegs >= tdata->tx_wreg_nsegs) { 1133 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1134 nsegs = 0; 1135 idx = -1; 1136 } 1137 1138 /* Send a copy of the frame to the BPF listener */ 1139 ETHER_BPF_MTAP(ifp, m_head); 1140 1141 /* Set timeout in case hardware has problems transmitting. */ 1142 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1143 } 1144 if (idx >= 0) 1145 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1146 tdata->tx_running = EMX_TX_RUNNING; 1147 } 1148 1149 static int 1150 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1151 { 1152 struct emx_softc *sc = ifp->if_softc; 1153 struct ifreq *ifr = (struct ifreq *)data; 1154 uint16_t eeprom_data = 0; 1155 int max_frame_size, mask, reinit; 1156 int error = 0; 1157 1158 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1159 1160 switch (command) { 1161 case SIOCSIFMTU: 1162 switch (sc->hw.mac.type) { 1163 case e1000_82573: 1164 /* 1165 * 82573 only supports jumbo frames 1166 * if ASPM is disabled. 1167 */ 1168 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1169 &eeprom_data); 1170 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1171 max_frame_size = ETHER_MAX_LEN; 1172 break; 1173 } 1174 /* FALL THROUGH */ 1175 1176 /* Limit Jumbo Frame size */ 1177 case e1000_82571: 1178 case e1000_82572: 1179 case e1000_82574: 1180 case e1000_pch_lpt: 1181 case e1000_pch_spt: 1182 case e1000_pch_cnp: 1183 case e1000_80003es2lan: 1184 max_frame_size = 9234; 1185 break; 1186 1187 default: 1188 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1189 break; 1190 } 1191 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1192 ETHER_CRC_LEN) { 1193 error = EINVAL; 1194 break; 1195 } 1196 1197 ifp->if_mtu = ifr->ifr_mtu; 1198 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1199 ETHER_CRC_LEN; 1200 1201 if (ifp->if_flags & IFF_RUNNING) 1202 emx_init(sc); 1203 break; 1204 1205 case SIOCSIFFLAGS: 1206 if (ifp->if_flags & IFF_UP) { 1207 if ((ifp->if_flags & IFF_RUNNING)) { 1208 if ((ifp->if_flags ^ sc->if_flags) & 1209 (IFF_PROMISC | IFF_ALLMULTI)) { 1210 emx_disable_promisc(sc); 1211 emx_set_promisc(sc); 1212 } 1213 } else { 1214 emx_init(sc); 1215 } 1216 } else if (ifp->if_flags & IFF_RUNNING) { 1217 emx_stop(sc); 1218 } 1219 sc->if_flags = ifp->if_flags; 1220 break; 1221 1222 case SIOCADDMULTI: 1223 case SIOCDELMULTI: 1224 if (ifp->if_flags & IFF_RUNNING) { 1225 emx_disable_intr(sc); 1226 emx_set_multi(sc); 1227 #ifdef IFPOLL_ENABLE 1228 if (!(ifp->if_flags & IFF_NPOLLING)) 1229 #endif 1230 emx_enable_intr(sc); 1231 } 1232 break; 1233 1234 case SIOCSIFMEDIA: 1235 /* Check SOL/IDER usage */ 1236 if (e1000_check_reset_block(&sc->hw)) { 1237 device_printf(sc->dev, "Media change is" 1238 " blocked due to SOL/IDER session.\n"); 1239 break; 1240 } 1241 /* FALL THROUGH */ 1242 1243 case SIOCGIFMEDIA: 1244 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1245 break; 1246 1247 case SIOCSIFCAP: 1248 reinit = 0; 1249 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1250 if (mask & IFCAP_RXCSUM) { 1251 ifp->if_capenable ^= IFCAP_RXCSUM; 1252 reinit = 1; 1253 } 1254 if (mask & IFCAP_VLAN_HWTAGGING) { 1255 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1256 reinit = 1; 1257 } 1258 if (mask & IFCAP_TXCSUM) { 1259 ifp->if_capenable ^= IFCAP_TXCSUM; 1260 if (ifp->if_capenable & IFCAP_TXCSUM) 1261 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1262 else 1263 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1264 } 1265 if (mask & IFCAP_TSO) { 1266 ifp->if_capenable ^= IFCAP_TSO; 1267 if (ifp->if_capenable & IFCAP_TSO) 1268 ifp->if_hwassist |= CSUM_TSO; 1269 else 1270 ifp->if_hwassist &= ~CSUM_TSO; 1271 } 1272 if (mask & IFCAP_RSS) 1273 ifp->if_capenable ^= IFCAP_RSS; 1274 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1275 emx_init(sc); 1276 break; 1277 1278 default: 1279 error = ether_ioctl(ifp, command, data); 1280 break; 1281 } 1282 return (error); 1283 } 1284 1285 static void 1286 emx_watchdog(struct ifaltq_subque *ifsq) 1287 { 1288 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1289 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1290 struct emx_softc *sc = ifp->if_softc; 1291 int i; 1292 1293 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1294 1295 /* 1296 * The timer is set to 5 every time start queues a packet. 1297 * Then txeof keeps resetting it as long as it cleans at 1298 * least one descriptor. 1299 * Finally, anytime all descriptors are clean the timer is 1300 * set to 0. 1301 */ 1302 1303 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1304 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1305 /* 1306 * If we reach here, all TX jobs are completed and 1307 * the TX engine should have been idled for some time. 1308 * We don't need to call ifsq_devstart_sched() here. 1309 */ 1310 ifsq_clr_oactive(ifsq); 1311 tdata->tx_watchdog.wd_timer = 0; 1312 return; 1313 } 1314 1315 /* 1316 * If we are in this routine because of pause frames, then 1317 * don't reset the hardware. 1318 */ 1319 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1320 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1321 return; 1322 } 1323 1324 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1325 1326 IFNET_STAT_INC(ifp, oerrors, 1); 1327 1328 emx_init(sc); 1329 for (i = 0; i < sc->tx_ring_inuse; ++i) 1330 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1331 } 1332 1333 static void 1334 emx_init(void *xsc) 1335 { 1336 struct emx_softc *sc = xsc; 1337 struct ifnet *ifp = &sc->arpcom.ac_if; 1338 device_t dev = sc->dev; 1339 boolean_t polling; 1340 int i; 1341 1342 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1343 1344 emx_stop(sc); 1345 1346 /* Get the latest mac address, User can use a LAA */ 1347 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1348 1349 /* Put the address into the Receive Address Array */ 1350 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1351 1352 /* 1353 * With the 82571 sc, RAR[0] may be overwritten 1354 * when the other port is reset, we make a duplicate 1355 * in RAR[14] for that eventuality, this assures 1356 * the interface continues to function. 1357 */ 1358 if (sc->hw.mac.type == e1000_82571) { 1359 e1000_set_laa_state_82571(&sc->hw, TRUE); 1360 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1361 E1000_RAR_ENTRIES - 1); 1362 } 1363 1364 /* Initialize the hardware */ 1365 if (emx_reset(sc)) { 1366 device_printf(dev, "Unable to reset the hardware\n"); 1367 /* XXX emx_stop()? */ 1368 return; 1369 } 1370 emx_update_link_status(sc); 1371 1372 /* Setup VLAN support, basic and offload if available */ 1373 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1374 1375 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1376 uint32_t ctrl; 1377 1378 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1379 ctrl |= E1000_CTRL_VME; 1380 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1381 } 1382 1383 /* Configure for OS presence */ 1384 emx_get_mgmt(sc); 1385 1386 polling = FALSE; 1387 #ifdef IFPOLL_ENABLE 1388 if (ifp->if_flags & IFF_NPOLLING) 1389 polling = TRUE; 1390 #endif 1391 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1392 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1393 1394 /* Prepare transmit descriptors and buffers */ 1395 for (i = 0; i < sc->tx_ring_inuse; ++i) 1396 emx_init_tx_ring(&sc->tx_data[i]); 1397 emx_init_tx_unit(sc); 1398 1399 /* Setup Multicast table */ 1400 emx_set_multi(sc); 1401 1402 /* Prepare receive descriptors and buffers */ 1403 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1404 if (emx_init_rx_ring(&sc->rx_data[i])) { 1405 device_printf(dev, 1406 "Could not setup receive structures\n"); 1407 emx_stop(sc); 1408 return; 1409 } 1410 } 1411 emx_init_rx_unit(sc); 1412 1413 /* Don't lose promiscuous settings */ 1414 emx_set_promisc(sc); 1415 1416 /* Reset hardware counters */ 1417 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1418 1419 /* MSI/X configuration for 82574 */ 1420 if (sc->hw.mac.type == e1000_82574) { 1421 int tmp; 1422 1423 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1424 tmp |= E1000_CTRL_EXT_PBA_CLR; 1425 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1426 /* 1427 * XXX MSIX 1428 * Set the IVAR - interrupt vector routing. 1429 * Each nibble represents a vector, high bit 1430 * is enable, other 3 bits are the MSIX table 1431 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1432 * Link (other) to 2, hence the magic number. 1433 */ 1434 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1435 } 1436 1437 /* 1438 * Only enable interrupts if we are not polling, make sure 1439 * they are off otherwise. 1440 */ 1441 if (polling) 1442 emx_disable_intr(sc); 1443 else 1444 emx_enable_intr(sc); 1445 1446 /* AMT based hardware can now take control from firmware */ 1447 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1448 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1449 emx_get_hw_control(sc); 1450 1451 ifp->if_flags |= IFF_RUNNING; 1452 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1453 struct emx_txdata *tdata = &sc->tx_data[i]; 1454 1455 ifsq_clr_oactive(tdata->ifsq); 1456 ifsq_watchdog_start(&tdata->tx_watchdog); 1457 if (!polling) { 1458 callout_reset_bycpu(&tdata->tx_gc_timer, 1, 1459 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq)); 1460 } 1461 } 1462 callout_reset(&sc->timer, hz, emx_timer, sc); 1463 } 1464 1465 static void 1466 emx_intr(void *xsc) 1467 { 1468 emx_intr_body(xsc, TRUE); 1469 } 1470 1471 static void 1472 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1473 { 1474 struct ifnet *ifp = &sc->arpcom.ac_if; 1475 uint32_t reg_icr; 1476 1477 logif(intr_beg); 1478 ASSERT_SERIALIZED(&sc->main_serialize); 1479 1480 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1481 1482 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1483 logif(intr_end); 1484 return; 1485 } 1486 1487 /* 1488 * XXX: some laptops trigger several spurious interrupts 1489 * on emx(4) when in the resume cycle. The ICR register 1490 * reports all-ones value in this case. Processing such 1491 * interrupts would lead to a freeze. I don't know why. 1492 */ 1493 if (reg_icr == 0xffffffff) { 1494 logif(intr_end); 1495 return; 1496 } 1497 1498 if (ifp->if_flags & IFF_RUNNING) { 1499 if (reg_icr & 1500 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1501 int i; 1502 1503 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1504 lwkt_serialize_enter( 1505 &sc->rx_data[i].rx_serialize); 1506 emx_rxeof(&sc->rx_data[i], -1); 1507 lwkt_serialize_exit( 1508 &sc->rx_data[i].rx_serialize); 1509 } 1510 } 1511 if (reg_icr & E1000_ICR_TXDW) { 1512 struct emx_txdata *tdata = &sc->tx_data[0]; 1513 1514 lwkt_serialize_enter(&tdata->tx_serialize); 1515 emx_tx_intr(tdata); 1516 lwkt_serialize_exit(&tdata->tx_serialize); 1517 } 1518 } 1519 1520 /* Link status change */ 1521 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1522 emx_serialize_skipmain(sc); 1523 1524 callout_stop(&sc->timer); 1525 sc->hw.mac.get_link_status = 1; 1526 emx_update_link_status(sc); 1527 1528 /* Deal with TX cruft when link lost */ 1529 emx_tx_purge(sc); 1530 1531 callout_reset(&sc->timer, hz, emx_timer, sc); 1532 1533 emx_deserialize_skipmain(sc); 1534 } 1535 1536 if (reg_icr & E1000_ICR_RXO) 1537 sc->rx_overruns++; 1538 1539 logif(intr_end); 1540 } 1541 1542 static void 1543 emx_intr_mask(void *xsc) 1544 { 1545 struct emx_softc *sc = xsc; 1546 1547 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1548 /* 1549 * NOTE: 1550 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1551 * so don't check it. 1552 */ 1553 emx_intr_body(sc, FALSE); 1554 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1555 } 1556 1557 static void 1558 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1559 { 1560 struct emx_softc *sc = ifp->if_softc; 1561 1562 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1563 1564 emx_update_link_status(sc); 1565 1566 ifmr->ifm_status = IFM_AVALID; 1567 ifmr->ifm_active = IFM_ETHER; 1568 1569 if (!sc->link_active) { 1570 if (sc->hw.mac.autoneg) 1571 ifmr->ifm_active |= IFM_NONE; 1572 else 1573 ifmr->ifm_active |= sc->media.ifm_media; 1574 return; 1575 } 1576 1577 ifmr->ifm_status |= IFM_ACTIVE; 1578 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1579 ifmr->ifm_active |= sc->ifm_flowctrl; 1580 1581 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1582 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1583 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1584 } else { 1585 switch (sc->link_speed) { 1586 case 10: 1587 ifmr->ifm_active |= IFM_10_T; 1588 break; 1589 case 100: 1590 ifmr->ifm_active |= IFM_100_TX; 1591 break; 1592 1593 case 1000: 1594 ifmr->ifm_active |= IFM_1000_T; 1595 break; 1596 } 1597 if (sc->link_duplex == FULL_DUPLEX) 1598 ifmr->ifm_active |= IFM_FDX; 1599 else 1600 ifmr->ifm_active |= IFM_HDX; 1601 } 1602 if (ifmr->ifm_active & IFM_FDX) 1603 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1604 } 1605 1606 static int 1607 emx_media_change(struct ifnet *ifp) 1608 { 1609 struct emx_softc *sc = ifp->if_softc; 1610 struct ifmedia *ifm = &sc->media; 1611 1612 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1613 1614 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1615 return (EINVAL); 1616 1617 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1618 case IFM_AUTO: 1619 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1620 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1621 break; 1622 1623 case IFM_1000_SX: 1624 case IFM_1000_T: 1625 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1626 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1627 break; 1628 1629 case IFM_100_TX: 1630 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1631 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1632 } else { 1633 if (IFM_OPTIONS(ifm->ifm_media) & 1634 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1635 if (bootverbose) { 1636 if_printf(ifp, "Flow control is not " 1637 "allowed for half-duplex\n"); 1638 } 1639 return EINVAL; 1640 } 1641 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1642 } 1643 sc->hw.mac.autoneg = FALSE; 1644 sc->hw.phy.autoneg_advertised = 0; 1645 break; 1646 1647 case IFM_10_T: 1648 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1649 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1650 } else { 1651 if (IFM_OPTIONS(ifm->ifm_media) & 1652 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1653 if (bootverbose) { 1654 if_printf(ifp, "Flow control is not " 1655 "allowed for half-duplex\n"); 1656 } 1657 return EINVAL; 1658 } 1659 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1660 } 1661 sc->hw.mac.autoneg = FALSE; 1662 sc->hw.phy.autoneg_advertised = 0; 1663 break; 1664 1665 default: 1666 if (bootverbose) { 1667 if_printf(ifp, "Unsupported media type %d\n", 1668 IFM_SUBTYPE(ifm->ifm_media)); 1669 } 1670 return EINVAL; 1671 } 1672 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1673 1674 if (ifp->if_flags & IFF_RUNNING) 1675 emx_init(sc); 1676 1677 return (0); 1678 } 1679 1680 static int 1681 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1682 int *segs_used, int *idx) 1683 { 1684 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1685 bus_dmamap_t map; 1686 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1687 struct e1000_tx_desc *ctxd = NULL; 1688 struct mbuf *m_head = *m_headp; 1689 uint32_t txd_upper, txd_lower, cmd = 0; 1690 int maxsegs, nsegs, i, j, first, last = 0, error; 1691 1692 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1693 error = emx_tso_pullup(tdata, m_headp); 1694 if (error) 1695 return error; 1696 m_head = *m_headp; 1697 } 1698 1699 txd_upper = txd_lower = 0; 1700 1701 /* 1702 * Capture the first descriptor index, this descriptor 1703 * will have the index of the EOP which is the only one 1704 * that now gets a DONE bit writeback. 1705 */ 1706 first = tdata->next_avail_tx_desc; 1707 tx_buffer = &tdata->tx_buf[first]; 1708 tx_buffer_mapped = tx_buffer; 1709 map = tx_buffer->map; 1710 1711 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1712 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1713 if (maxsegs > EMX_MAX_SCATTER) 1714 maxsegs = EMX_MAX_SCATTER; 1715 1716 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1717 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1718 if (error) { 1719 m_freem(*m_headp); 1720 *m_headp = NULL; 1721 return error; 1722 } 1723 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1724 1725 m_head = *m_headp; 1726 tdata->tx_nsegs += nsegs; 1727 *segs_used += nsegs; 1728 1729 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1730 /* TSO will consume one TX desc */ 1731 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1732 tdata->tx_nsegs += i; 1733 *segs_used += i; 1734 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1735 /* TX csum offloading will consume one TX desc */ 1736 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1737 tdata->tx_nsegs += i; 1738 *segs_used += i; 1739 } 1740 1741 /* Handle VLAN tag */ 1742 if (m_head->m_flags & M_VLANTAG) { 1743 /* Set the vlan id. */ 1744 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1745 /* Tell hardware to add tag */ 1746 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1747 } 1748 1749 i = tdata->next_avail_tx_desc; 1750 1751 /* Set up our transmit descriptors */ 1752 for (j = 0; j < nsegs; j++) { 1753 tx_buffer = &tdata->tx_buf[i]; 1754 ctxd = &tdata->tx_desc_base[i]; 1755 1756 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1757 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1758 txd_lower | segs[j].ds_len); 1759 ctxd->upper.data = htole32(txd_upper); 1760 1761 last = i; 1762 if (++i == tdata->num_tx_desc) 1763 i = 0; 1764 } 1765 1766 tdata->next_avail_tx_desc = i; 1767 1768 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1769 tdata->num_tx_desc_avail -= nsegs; 1770 tdata->tx_nmbuf++; 1771 1772 tx_buffer->m_head = m_head; 1773 tx_buffer_mapped->map = tx_buffer->map; 1774 tx_buffer->map = map; 1775 1776 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1777 tdata->tx_nsegs = 0; 1778 1779 /* 1780 * Report Status (RS) is turned on 1781 * every tx_intr_nsegs descriptors. 1782 */ 1783 cmd = E1000_TXD_CMD_RS; 1784 1785 /* 1786 * Keep track of the descriptor, which will 1787 * be written back by hardware. 1788 */ 1789 tdata->tx_dd[tdata->tx_dd_tail] = last; 1790 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1791 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1792 } 1793 1794 /* 1795 * Last Descriptor of Packet needs End Of Packet (EOP) 1796 */ 1797 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1798 1799 /* 1800 * Defer TDT updating, until enough descriptors are setup 1801 */ 1802 *idx = i; 1803 1804 #ifdef EMX_TSS_DEBUG 1805 tdata->tx_pkts++; 1806 #endif 1807 1808 return (0); 1809 } 1810 1811 static void 1812 emx_set_promisc(struct emx_softc *sc) 1813 { 1814 struct ifnet *ifp = &sc->arpcom.ac_if; 1815 uint32_t reg_rctl; 1816 1817 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1818 1819 if (ifp->if_flags & IFF_PROMISC) { 1820 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1821 /* Turn this on if you want to see bad packets */ 1822 if (emx_debug_sbp) 1823 reg_rctl |= E1000_RCTL_SBP; 1824 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1825 } else if (ifp->if_flags & IFF_ALLMULTI) { 1826 reg_rctl |= E1000_RCTL_MPE; 1827 reg_rctl &= ~E1000_RCTL_UPE; 1828 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1829 } 1830 } 1831 1832 static void 1833 emx_disable_promisc(struct emx_softc *sc) 1834 { 1835 struct ifnet *ifp = &sc->arpcom.ac_if; 1836 uint32_t reg_rctl; 1837 int mcnt = 0; 1838 1839 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1840 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1841 1842 if (ifp->if_flags & IFF_ALLMULTI) { 1843 mcnt = EMX_MCAST_ADDR_MAX; 1844 } else { 1845 const struct ifmultiaddr *ifma; 1846 1847 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1848 if (ifma->ifma_addr->sa_family != AF_LINK) 1849 continue; 1850 if (mcnt == EMX_MCAST_ADDR_MAX) 1851 break; 1852 mcnt++; 1853 } 1854 } 1855 /* Don't disable if in MAX groups */ 1856 if (mcnt < EMX_MCAST_ADDR_MAX) 1857 reg_rctl &= ~E1000_RCTL_MPE; 1858 1859 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1860 } 1861 1862 static void 1863 emx_set_multi(struct emx_softc *sc) 1864 { 1865 struct ifnet *ifp = &sc->arpcom.ac_if; 1866 struct ifmultiaddr *ifma; 1867 uint32_t reg_rctl = 0; 1868 uint8_t *mta; 1869 int mcnt = 0; 1870 1871 mta = sc->mta; 1872 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1873 1874 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1875 if (ifma->ifma_addr->sa_family != AF_LINK) 1876 continue; 1877 1878 if (mcnt == EMX_MCAST_ADDR_MAX) 1879 break; 1880 1881 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1882 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1883 mcnt++; 1884 } 1885 1886 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1887 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1888 reg_rctl |= E1000_RCTL_MPE; 1889 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1890 } else { 1891 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1892 } 1893 } 1894 1895 /* 1896 * This routine checks for link status and updates statistics. 1897 */ 1898 static void 1899 emx_timer(void *xsc) 1900 { 1901 struct emx_softc *sc = xsc; 1902 struct ifnet *ifp = &sc->arpcom.ac_if; 1903 1904 lwkt_serialize_enter(&sc->main_serialize); 1905 1906 emx_update_link_status(sc); 1907 emx_update_stats(sc); 1908 1909 /* Reset LAA into RAR[0] on 82571 */ 1910 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1911 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1912 1913 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1914 emx_print_hw_stats(sc); 1915 1916 emx_smartspeed(sc); 1917 1918 callout_reset(&sc->timer, hz, emx_timer, sc); 1919 1920 lwkt_serialize_exit(&sc->main_serialize); 1921 } 1922 1923 static void 1924 emx_update_link_status(struct emx_softc *sc) 1925 { 1926 struct e1000_hw *hw = &sc->hw; 1927 struct ifnet *ifp = &sc->arpcom.ac_if; 1928 device_t dev = sc->dev; 1929 uint32_t link_check = 0; 1930 1931 /* Get the cached link value or read phy for real */ 1932 switch (hw->phy.media_type) { 1933 case e1000_media_type_copper: 1934 if (hw->mac.get_link_status) { 1935 if (hw->mac.type >= e1000_pch_spt) 1936 msec_delay(50); 1937 /* Do the work to read phy */ 1938 e1000_check_for_link(hw); 1939 link_check = !hw->mac.get_link_status; 1940 if (link_check) /* ESB2 fix */ 1941 e1000_cfg_on_link_up(hw); 1942 } else { 1943 link_check = TRUE; 1944 } 1945 break; 1946 1947 case e1000_media_type_fiber: 1948 e1000_check_for_link(hw); 1949 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1950 break; 1951 1952 case e1000_media_type_internal_serdes: 1953 e1000_check_for_link(hw); 1954 link_check = sc->hw.mac.serdes_has_link; 1955 break; 1956 1957 case e1000_media_type_unknown: 1958 default: 1959 break; 1960 } 1961 1962 /* Now check for a transition */ 1963 if (link_check && sc->link_active == 0) { 1964 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1965 &sc->link_duplex); 1966 1967 /* 1968 * Check if we should enable/disable SPEED_MODE bit on 1969 * 82571EB/82572EI 1970 */ 1971 if (sc->link_speed != SPEED_1000 && 1972 (hw->mac.type == e1000_82571 || 1973 hw->mac.type == e1000_82572)) { 1974 int tarc0; 1975 1976 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1977 tarc0 &= ~EMX_TARC_SPEED_MODE; 1978 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1979 } 1980 if (bootverbose) { 1981 char flowctrl[IFM_ETH_FC_STRLEN]; 1982 1983 e1000_fc2str(hw->fc.current_mode, flowctrl, 1984 sizeof(flowctrl)); 1985 device_printf(dev, "Link is up %d Mbps %s, " 1986 "Flow control: %s\n", 1987 sc->link_speed, 1988 (sc->link_duplex == FULL_DUPLEX) ? 1989 "Full Duplex" : "Half Duplex", 1990 flowctrl); 1991 } 1992 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1993 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1994 sc->link_active = 1; 1995 sc->smartspeed = 0; 1996 ifp->if_baudrate = sc->link_speed * 1000000; 1997 ifp->if_link_state = LINK_STATE_UP; 1998 if_link_state_change(ifp); 1999 } else if (!link_check && sc->link_active == 1) { 2000 ifp->if_baudrate = sc->link_speed = 0; 2001 sc->link_duplex = 0; 2002 if (bootverbose) 2003 device_printf(dev, "Link is Down\n"); 2004 sc->link_active = 0; 2005 ifp->if_link_state = LINK_STATE_DOWN; 2006 if_link_state_change(ifp); 2007 } 2008 } 2009 2010 static void 2011 emx_stop(struct emx_softc *sc) 2012 { 2013 struct ifnet *ifp = &sc->arpcom.ac_if; 2014 int i; 2015 2016 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2017 2018 emx_disable_intr(sc); 2019 2020 callout_stop(&sc->timer); 2021 2022 ifp->if_flags &= ~IFF_RUNNING; 2023 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2024 struct emx_txdata *tdata = &sc->tx_data[i]; 2025 2026 ifsq_clr_oactive(tdata->ifsq); 2027 ifsq_watchdog_stop(&tdata->tx_watchdog); 2028 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 2029 2030 tdata->tx_running = 0; 2031 callout_stop(&tdata->tx_gc_timer); 2032 } 2033 2034 /* I219 needs some special flushing to avoid hangs */ 2035 if (sc->hw.mac.type >= e1000_pch_spt) 2036 emx_flush_txrx_ring(sc); 2037 2038 /* 2039 * Disable multiple receive queues. 2040 * 2041 * NOTE: 2042 * We should disable multiple receive queues before 2043 * resetting the hardware. 2044 */ 2045 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 2046 2047 e1000_reset_hw(&sc->hw); 2048 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2049 2050 for (i = 0; i < sc->tx_ring_cnt; ++i) 2051 emx_free_tx_ring(&sc->tx_data[i]); 2052 for (i = 0; i < sc->rx_ring_cnt; ++i) 2053 emx_free_rx_ring(&sc->rx_data[i]); 2054 } 2055 2056 static int 2057 emx_reset(struct emx_softc *sc) 2058 { 2059 device_t dev = sc->dev; 2060 uint16_t rx_buffer_size; 2061 uint32_t pba; 2062 2063 /* Set up smart power down as default off on newer adapters. */ 2064 if (!emx_smart_pwr_down && 2065 (sc->hw.mac.type == e1000_82571 || 2066 sc->hw.mac.type == e1000_82572)) { 2067 uint16_t phy_tmp = 0; 2068 2069 /* Speed up time to link by disabling smart power down. */ 2070 e1000_read_phy_reg(&sc->hw, 2071 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2072 phy_tmp &= ~IGP02E1000_PM_SPD; 2073 e1000_write_phy_reg(&sc->hw, 2074 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2075 } 2076 2077 /* 2078 * Packet Buffer Allocation (PBA) 2079 * Writing PBA sets the receive portion of the buffer 2080 * the remainder is used for the transmit buffer. 2081 */ 2082 switch (sc->hw.mac.type) { 2083 /* Total Packet Buffer on these is 48K */ 2084 case e1000_82571: 2085 case e1000_82572: 2086 case e1000_80003es2lan: 2087 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2088 break; 2089 2090 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2091 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2092 break; 2093 2094 case e1000_82574: 2095 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2096 break; 2097 2098 case e1000_pch_lpt: 2099 case e1000_pch_spt: 2100 case e1000_pch_cnp: 2101 pba = E1000_PBA_26K; 2102 break; 2103 2104 default: 2105 /* Devices before 82547 had a Packet Buffer of 64K. */ 2106 if (sc->hw.mac.max_frame_size > 8192) 2107 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2108 else 2109 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2110 } 2111 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2112 2113 /* 2114 * These parameters control the automatic generation (Tx) and 2115 * response (Rx) to Ethernet PAUSE frames. 2116 * - High water mark should allow for at least two frames to be 2117 * received after sending an XOFF. 2118 * - Low water mark works best when it is very near the high water mark. 2119 * This allows the receiver to restart by sending XON when it has 2120 * drained a bit. Here we use an arbitary value of 1500 which will 2121 * restart after one full frame is pulled from the buffer. There 2122 * could be several smaller frames in the buffer and if so they will 2123 * not trigger the XON until their total number reduces the buffer 2124 * by 1500. 2125 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2126 */ 2127 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2128 2129 sc->hw.fc.high_water = rx_buffer_size - 2130 roundup2(sc->hw.mac.max_frame_size, 1024); 2131 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2132 2133 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2134 sc->hw.fc.send_xon = TRUE; 2135 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2136 2137 /* 2138 * Device specific overrides/settings 2139 */ 2140 if (sc->hw.mac.type == e1000_pch_lpt || 2141 sc->hw.mac.type == e1000_pch_spt || 2142 sc->hw.mac.type == e1000_pch_cnp) { 2143 sc->hw.fc.high_water = 0x5C20; 2144 sc->hw.fc.low_water = 0x5048; 2145 sc->hw.fc.pause_time = 0x0650; 2146 sc->hw.fc.refresh_time = 0x0400; 2147 /* Jumbos need adjusted PBA */ 2148 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2149 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2150 else 2151 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2152 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2153 sc->hw.fc.pause_time = 0xFFFF; 2154 } 2155 2156 /* I219 needs some special flushing to avoid hangs */ 2157 if (sc->hw.mac.type >= e1000_pch_spt) 2158 emx_flush_txrx_ring(sc); 2159 2160 /* Issue a global reset */ 2161 e1000_reset_hw(&sc->hw); 2162 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2163 emx_disable_aspm(sc); 2164 2165 if (e1000_init_hw(&sc->hw) < 0) { 2166 device_printf(dev, "Hardware Initialization Failed\n"); 2167 return (EIO); 2168 } 2169 2170 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2171 e1000_get_phy_info(&sc->hw); 2172 e1000_check_for_link(&sc->hw); 2173 2174 return (0); 2175 } 2176 2177 static void 2178 emx_setup_ifp(struct emx_softc *sc) 2179 { 2180 struct ifnet *ifp = &sc->arpcom.ac_if; 2181 int i; 2182 2183 if_initname(ifp, device_get_name(sc->dev), 2184 device_get_unit(sc->dev)); 2185 ifp->if_softc = sc; 2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2187 ifp->if_init = emx_init; 2188 ifp->if_ioctl = emx_ioctl; 2189 ifp->if_start = emx_start; 2190 #ifdef IFPOLL_ENABLE 2191 ifp->if_npoll = emx_npoll; 2192 #endif 2193 ifp->if_serialize = emx_serialize; 2194 ifp->if_deserialize = emx_deserialize; 2195 ifp->if_tryserialize = emx_tryserialize; 2196 #ifdef INVARIANTS 2197 ifp->if_serialize_assert = emx_serialize_assert; 2198 #endif 2199 2200 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2201 2202 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2203 ifq_set_ready(&ifp->if_snd); 2204 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2205 2206 ifp->if_mapsubq = ifq_mapsubq_modulo; 2207 ifq_set_subq_divisor(&ifp->if_snd, 1); 2208 2209 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2210 2211 ifp->if_capabilities = IFCAP_HWCSUM | 2212 IFCAP_VLAN_HWTAGGING | 2213 IFCAP_VLAN_MTU | 2214 IFCAP_TSO; 2215 if (sc->rx_ring_cnt > 1) 2216 ifp->if_capabilities |= IFCAP_RSS; 2217 ifp->if_capenable = ifp->if_capabilities; 2218 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2219 2220 /* 2221 * Tell the upper layer(s) we support long frames. 2222 */ 2223 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2224 2225 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2226 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2227 struct emx_txdata *tdata = &sc->tx_data[i]; 2228 2229 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2230 ifsq_set_priv(ifsq, tdata); 2231 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2232 tdata->ifsq = ifsq; 2233 2234 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2235 } 2236 2237 /* 2238 * Specify the media types supported by this sc and register 2239 * callbacks to update media and link information 2240 */ 2241 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2242 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2243 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2244 0, NULL); 2245 } else { 2246 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2247 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2248 0, NULL); 2249 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2250 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2251 0, NULL); 2252 if (sc->hw.phy.type != e1000_phy_ife) { 2253 ifmedia_add(&sc->media, 2254 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2255 } 2256 } 2257 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2258 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2259 } 2260 2261 /* 2262 * Workaround for SmartSpeed on 82541 and 82547 controllers 2263 */ 2264 static void 2265 emx_smartspeed(struct emx_softc *sc) 2266 { 2267 uint16_t phy_tmp; 2268 2269 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2270 sc->hw.mac.autoneg == 0 || 2271 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2272 return; 2273 2274 if (sc->smartspeed == 0) { 2275 /* 2276 * If Master/Slave config fault is asserted twice, 2277 * we assume back-to-back 2278 */ 2279 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2280 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2281 return; 2282 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2283 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2284 e1000_read_phy_reg(&sc->hw, 2285 PHY_1000T_CTRL, &phy_tmp); 2286 if (phy_tmp & CR_1000T_MS_ENABLE) { 2287 phy_tmp &= ~CR_1000T_MS_ENABLE; 2288 e1000_write_phy_reg(&sc->hw, 2289 PHY_1000T_CTRL, phy_tmp); 2290 sc->smartspeed++; 2291 if (sc->hw.mac.autoneg && 2292 !e1000_phy_setup_autoneg(&sc->hw) && 2293 !e1000_read_phy_reg(&sc->hw, 2294 PHY_CONTROL, &phy_tmp)) { 2295 phy_tmp |= MII_CR_AUTO_NEG_EN | 2296 MII_CR_RESTART_AUTO_NEG; 2297 e1000_write_phy_reg(&sc->hw, 2298 PHY_CONTROL, phy_tmp); 2299 } 2300 } 2301 } 2302 return; 2303 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2304 /* If still no link, perhaps using 2/3 pair cable */ 2305 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2306 phy_tmp |= CR_1000T_MS_ENABLE; 2307 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2308 if (sc->hw.mac.autoneg && 2309 !e1000_phy_setup_autoneg(&sc->hw) && 2310 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2311 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2312 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2313 } 2314 } 2315 2316 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2317 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2318 sc->smartspeed = 0; 2319 } 2320 2321 static int 2322 emx_create_tx_ring(struct emx_txdata *tdata) 2323 { 2324 device_t dev = tdata->sc->dev; 2325 struct emx_txbuf *tx_buffer; 2326 int error, i, tsize, ntxd; 2327 2328 /* 2329 * Validate number of transmit descriptors. It must not exceed 2330 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2331 */ 2332 ntxd = device_getenv_int(dev, "txd", emx_txd); 2333 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2334 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2335 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2336 EMX_DEFAULT_TXD, ntxd); 2337 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2338 } else { 2339 tdata->num_tx_desc = ntxd; 2340 } 2341 2342 /* 2343 * Allocate Transmit Descriptor ring 2344 */ 2345 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2346 EMX_DBA_ALIGN); 2347 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2348 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2349 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2350 &tdata->tx_desc_paddr); 2351 if (tdata->tx_desc_base == NULL) { 2352 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2353 return ENOMEM; 2354 } 2355 2356 tsize = __VM_CACHELINE_ALIGN( 2357 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2358 tdata->tx_buf = kmalloc(tsize, M_DEVBUF, 2359 M_WAITOK | M_ZERO | M_CACHEALIGN); 2360 2361 /* 2362 * Create DMA tags for tx buffers 2363 */ 2364 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2365 1, 0, /* alignment, bounds */ 2366 BUS_SPACE_MAXADDR, /* lowaddr */ 2367 BUS_SPACE_MAXADDR, /* highaddr */ 2368 NULL, NULL, /* filter, filterarg */ 2369 EMX_TSO_SIZE, /* maxsize */ 2370 EMX_MAX_SCATTER, /* nsegments */ 2371 EMX_MAX_SEGSIZE, /* maxsegsize */ 2372 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2373 BUS_DMA_ONEBPAGE, /* flags */ 2374 &tdata->txtag); 2375 if (error) { 2376 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2377 kfree(tdata->tx_buf, M_DEVBUF); 2378 tdata->tx_buf = NULL; 2379 return error; 2380 } 2381 2382 /* 2383 * Create DMA maps for tx buffers 2384 */ 2385 for (i = 0; i < tdata->num_tx_desc; i++) { 2386 tx_buffer = &tdata->tx_buf[i]; 2387 2388 error = bus_dmamap_create(tdata->txtag, 2389 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2390 &tx_buffer->map); 2391 if (error) { 2392 device_printf(dev, "Unable to create TX DMA map\n"); 2393 emx_destroy_tx_ring(tdata, i); 2394 return error; 2395 } 2396 } 2397 2398 /* 2399 * Setup TX parameters 2400 */ 2401 tdata->spare_tx_desc = EMX_TX_SPARE; 2402 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2403 2404 /* 2405 * Keep following relationship between spare_tx_desc, oact_tx_desc 2406 * and tx_intr_nsegs: 2407 * (spare_tx_desc + EMX_TX_RESERVED) <= 2408 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2409 */ 2410 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2411 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2412 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2413 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2414 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2415 2416 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2417 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2418 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2419 2420 /* 2421 * Pullup extra 4bytes into the first data segment for TSO, see: 2422 * 82571/82572 specification update errata #7 2423 * 2424 * Same applies to I217 (and maybe I218 and I219). 2425 * 2426 * NOTE: 2427 * 4bytes instead of 2bytes, which are mentioned in the errata, 2428 * are pulled; mainly to keep rest of the data properly aligned. 2429 */ 2430 if (tdata->sc->hw.mac.type == e1000_82571 || 2431 tdata->sc->hw.mac.type == e1000_82572 || 2432 tdata->sc->hw.mac.type == e1000_pch_lpt || 2433 tdata->sc->hw.mac.type == e1000_pch_spt || 2434 tdata->sc->hw.mac.type == e1000_pch_cnp) 2435 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2436 2437 return (0); 2438 } 2439 2440 static void 2441 emx_init_tx_ring(struct emx_txdata *tdata) 2442 { 2443 /* Clear the old ring contents */ 2444 bzero(tdata->tx_desc_base, 2445 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2446 2447 /* Reset state */ 2448 tdata->next_avail_tx_desc = 0; 2449 tdata->next_tx_to_clean = 0; 2450 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2451 tdata->tx_nmbuf = 0; 2452 tdata->tx_running = 0; 2453 2454 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2455 if (tdata->sc->tx_ring_inuse > 1) { 2456 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2457 if (bootverbose) { 2458 if_printf(&tdata->sc->arpcom.ac_if, 2459 "TX %d force ctx setup\n", tdata->idx); 2460 } 2461 } 2462 } 2463 2464 static void 2465 emx_init_tx_unit(struct emx_softc *sc) 2466 { 2467 uint32_t tctl, tarc, tipg = 0, txdctl; 2468 int i; 2469 2470 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2471 struct emx_txdata *tdata = &sc->tx_data[i]; 2472 uint64_t bus_addr; 2473 2474 /* Setup the Base and Length of the Tx Descriptor Ring */ 2475 bus_addr = tdata->tx_desc_paddr; 2476 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2477 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2478 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2479 (uint32_t)(bus_addr >> 32)); 2480 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2481 (uint32_t)bus_addr); 2482 /* Setup the HW Tx Head and Tail descriptor pointers */ 2483 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2484 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2485 2486 txdctl = 0x1f; /* PTHRESH */ 2487 txdctl |= 1 << 8; /* HTHRESH */ 2488 txdctl |= 1 << 16; /* WTHRESH */ 2489 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2490 txdctl |= E1000_TXDCTL_GRAN; 2491 txdctl |= 1 << 25; /* LWTHRESH */ 2492 2493 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl); 2494 } 2495 2496 /* Set the default values for the Tx Inter Packet Gap timer */ 2497 switch (sc->hw.mac.type) { 2498 case e1000_80003es2lan: 2499 tipg = DEFAULT_82543_TIPG_IPGR1; 2500 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2501 E1000_TIPG_IPGR2_SHIFT; 2502 break; 2503 2504 default: 2505 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2506 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2507 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2508 else 2509 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2510 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2511 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2512 break; 2513 } 2514 2515 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2516 2517 /* NOTE: 0 is not allowed for TIDV */ 2518 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2519 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2520 2521 /* 2522 * Errata workaround (obtained from Linux). This is necessary 2523 * to make multiple TX queues work on 82574. 2524 * XXX can't find it in any published errata though. 2525 */ 2526 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2527 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2528 2529 if (sc->hw.mac.type == e1000_82571 || 2530 sc->hw.mac.type == e1000_82572) { 2531 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2532 tarc |= EMX_TARC_SPEED_MODE; 2533 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2534 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2535 /* errata: program both queues to unweighted RR */ 2536 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2537 tarc |= 1; 2538 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2539 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2540 tarc |= 1; 2541 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2542 } else if (sc->hw.mac.type == e1000_82574) { 2543 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2544 tarc |= EMX_TARC_ERRATA; 2545 if (sc->tx_ring_inuse > 1) { 2546 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX); 2547 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2548 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2549 } else { 2550 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2551 } 2552 } 2553 2554 /* Program the Transmit Control Register */ 2555 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2556 tctl &= ~E1000_TCTL_CT; 2557 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2558 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2559 tctl |= E1000_TCTL_MULR; 2560 2561 /* This write will effectively turn on the transmit unit. */ 2562 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2563 2564 if (sc->hw.mac.type == e1000_82571 || 2565 sc->hw.mac.type == e1000_82572 || 2566 sc->hw.mac.type == e1000_80003es2lan) { 2567 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2568 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2569 tarc &= ~(1 << 28); 2570 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2571 } else if (sc->hw.mac.type >= e1000_pch_spt) { 2572 uint32_t reg; 2573 2574 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC); 2575 reg |= E1000_RCTL_RDMTS_HEX; 2576 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg); 2577 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2578 reg |= E1000_TARC0_CB_MULTIQ_3_REQ; 2579 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg); 2580 } 2581 2582 if (sc->tx_ring_inuse > 1) { 2583 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2584 tarc &= ~EMX_TARC_COUNT_MASK; 2585 tarc |= 1; 2586 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2587 2588 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2589 tarc &= ~EMX_TARC_COUNT_MASK; 2590 tarc |= 1; 2591 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2592 } 2593 } 2594 2595 static void 2596 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2597 { 2598 struct emx_txbuf *tx_buffer; 2599 int i; 2600 2601 /* Free Transmit Descriptor ring */ 2602 if (tdata->tx_desc_base) { 2603 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2604 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2605 tdata->tx_desc_dmap); 2606 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2607 2608 tdata->tx_desc_base = NULL; 2609 } 2610 2611 if (tdata->tx_buf == NULL) 2612 return; 2613 2614 for (i = 0; i < ndesc; i++) { 2615 tx_buffer = &tdata->tx_buf[i]; 2616 2617 KKASSERT(tx_buffer->m_head == NULL); 2618 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2619 } 2620 bus_dma_tag_destroy(tdata->txtag); 2621 2622 kfree(tdata->tx_buf, M_DEVBUF); 2623 tdata->tx_buf = NULL; 2624 } 2625 2626 /* 2627 * The offload context needs to be set when we transfer the first 2628 * packet of a particular protocol (TCP/UDP). This routine has been 2629 * enhanced to deal with inserted VLAN headers. 2630 * 2631 * If the new packet's ether header length, ip header length and 2632 * csum offloading type are same as the previous packet, we should 2633 * avoid allocating a new csum context descriptor; mainly to take 2634 * advantage of the pipeline effect of the TX data read request. 2635 * 2636 * This function returns number of TX descrptors allocated for 2637 * csum context. 2638 */ 2639 static int 2640 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2641 uint32_t *txd_upper, uint32_t *txd_lower) 2642 { 2643 struct e1000_context_desc *TXD; 2644 int curr_txd, ehdrlen, csum_flags; 2645 uint32_t cmd, hdr_len, ip_hlen; 2646 2647 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2648 ip_hlen = mp->m_pkthdr.csum_iphlen; 2649 ehdrlen = mp->m_pkthdr.csum_lhlen; 2650 2651 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2652 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2653 tdata->csum_flags == csum_flags) { 2654 /* 2655 * Same csum offload context as the previous packets; 2656 * just return. 2657 */ 2658 *txd_upper = tdata->csum_txd_upper; 2659 *txd_lower = tdata->csum_txd_lower; 2660 return 0; 2661 } 2662 2663 /* 2664 * Setup a new csum offload context. 2665 */ 2666 2667 curr_txd = tdata->next_avail_tx_desc; 2668 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2669 2670 cmd = 0; 2671 2672 /* Setup of IP header checksum. */ 2673 if (csum_flags & CSUM_IP) { 2674 /* 2675 * Start offset for header checksum calculation. 2676 * End offset for header checksum calculation. 2677 * Offset of place to put the checksum. 2678 */ 2679 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2680 TXD->lower_setup.ip_fields.ipcse = 2681 htole16(ehdrlen + ip_hlen - 1); 2682 TXD->lower_setup.ip_fields.ipcso = 2683 ehdrlen + offsetof(struct ip, ip_sum); 2684 cmd |= E1000_TXD_CMD_IP; 2685 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2686 } 2687 hdr_len = ehdrlen + ip_hlen; 2688 2689 if (csum_flags & CSUM_TCP) { 2690 /* 2691 * Start offset for payload checksum calculation. 2692 * End offset for payload checksum calculation. 2693 * Offset of place to put the checksum. 2694 */ 2695 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2696 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2697 TXD->upper_setup.tcp_fields.tucso = 2698 hdr_len + offsetof(struct tcphdr, th_sum); 2699 cmd |= E1000_TXD_CMD_TCP; 2700 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2701 } else if (csum_flags & CSUM_UDP) { 2702 /* 2703 * Start offset for header checksum calculation. 2704 * End offset for header checksum calculation. 2705 * Offset of place to put the checksum. 2706 */ 2707 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2708 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2709 TXD->upper_setup.tcp_fields.tucso = 2710 hdr_len + offsetof(struct udphdr, uh_sum); 2711 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2712 } 2713 2714 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2715 E1000_TXD_DTYP_D; /* Data descr */ 2716 2717 /* Save the information for this csum offloading context */ 2718 tdata->csum_lhlen = ehdrlen; 2719 tdata->csum_iphlen = ip_hlen; 2720 tdata->csum_flags = csum_flags; 2721 tdata->csum_txd_upper = *txd_upper; 2722 tdata->csum_txd_lower = *txd_lower; 2723 2724 TXD->tcp_seg_setup.data = htole32(0); 2725 TXD->cmd_and_length = 2726 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2727 2728 if (++curr_txd == tdata->num_tx_desc) 2729 curr_txd = 0; 2730 2731 KKASSERT(tdata->num_tx_desc_avail > 0); 2732 tdata->num_tx_desc_avail--; 2733 2734 tdata->next_avail_tx_desc = curr_txd; 2735 return 1; 2736 } 2737 2738 static void 2739 emx_txeof(struct emx_txdata *tdata) 2740 { 2741 struct emx_txbuf *tx_buffer; 2742 int first, num_avail; 2743 2744 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2745 return; 2746 2747 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2748 return; 2749 2750 num_avail = tdata->num_tx_desc_avail; 2751 first = tdata->next_tx_to_clean; 2752 2753 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2754 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2755 struct e1000_tx_desc *tx_desc; 2756 2757 tx_desc = &tdata->tx_desc_base[dd_idx]; 2758 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2759 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2760 2761 if (++dd_idx == tdata->num_tx_desc) 2762 dd_idx = 0; 2763 2764 while (first != dd_idx) { 2765 logif(pkt_txclean); 2766 2767 KKASSERT(num_avail < tdata->num_tx_desc); 2768 num_avail++; 2769 2770 tx_buffer = &tdata->tx_buf[first]; 2771 if (tx_buffer->m_head) 2772 emx_free_txbuf(tdata, tx_buffer); 2773 2774 if (++first == tdata->num_tx_desc) 2775 first = 0; 2776 } 2777 } else { 2778 break; 2779 } 2780 } 2781 tdata->next_tx_to_clean = first; 2782 tdata->num_tx_desc_avail = num_avail; 2783 2784 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2785 tdata->tx_dd_head = 0; 2786 tdata->tx_dd_tail = 0; 2787 } 2788 2789 if (!EMX_IS_OACTIVE(tdata)) { 2790 ifsq_clr_oactive(tdata->ifsq); 2791 2792 /* All clean, turn off the timer */ 2793 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2794 tdata->tx_watchdog.wd_timer = 0; 2795 } 2796 tdata->tx_running = EMX_TX_RUNNING; 2797 } 2798 2799 static void 2800 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc) 2801 { 2802 struct emx_txbuf *tx_buffer; 2803 int tdh, first, num_avail, dd_idx = -1; 2804 2805 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2806 return; 2807 2808 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2809 if (tdh == tdata->next_tx_to_clean) { 2810 if (gc && tdata->tx_nmbuf > 0) 2811 tdata->tx_running = EMX_TX_RUNNING; 2812 return; 2813 } 2814 if (gc) 2815 tdata->tx_gc++; 2816 2817 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2818 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2819 2820 num_avail = tdata->num_tx_desc_avail; 2821 first = tdata->next_tx_to_clean; 2822 2823 while (first != tdh) { 2824 logif(pkt_txclean); 2825 2826 KKASSERT(num_avail < tdata->num_tx_desc); 2827 num_avail++; 2828 2829 tx_buffer = &tdata->tx_buf[first]; 2830 if (tx_buffer->m_head) 2831 emx_free_txbuf(tdata, tx_buffer); 2832 2833 if (first == dd_idx) { 2834 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2835 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2836 tdata->tx_dd_head = 0; 2837 tdata->tx_dd_tail = 0; 2838 dd_idx = -1; 2839 } else { 2840 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2841 } 2842 } 2843 2844 if (++first == tdata->num_tx_desc) 2845 first = 0; 2846 } 2847 tdata->next_tx_to_clean = first; 2848 tdata->num_tx_desc_avail = num_avail; 2849 2850 if (!EMX_IS_OACTIVE(tdata)) { 2851 ifsq_clr_oactive(tdata->ifsq); 2852 2853 /* All clean, turn off the timer */ 2854 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2855 tdata->tx_watchdog.wd_timer = 0; 2856 } 2857 if (!gc || tdata->tx_nmbuf > 0) 2858 tdata->tx_running = EMX_TX_RUNNING; 2859 } 2860 2861 /* 2862 * When Link is lost sometimes there is work still in the TX ring 2863 * which will result in a watchdog, rather than allow that do an 2864 * attempted cleanup and then reinit here. Note that this has been 2865 * seens mostly with fiber adapters. 2866 */ 2867 static void 2868 emx_tx_purge(struct emx_softc *sc) 2869 { 2870 int i; 2871 2872 if (sc->link_active) 2873 return; 2874 2875 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2876 struct emx_txdata *tdata = &sc->tx_data[i]; 2877 2878 if (tdata->tx_watchdog.wd_timer) { 2879 emx_tx_collect(tdata, FALSE); 2880 if (tdata->tx_watchdog.wd_timer) { 2881 if_printf(&sc->arpcom.ac_if, 2882 "Link lost, TX pending, reinit\n"); 2883 emx_init(sc); 2884 return; 2885 } 2886 } 2887 } 2888 } 2889 2890 static int 2891 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2892 { 2893 struct mbuf *m; 2894 bus_dma_segment_t seg; 2895 bus_dmamap_t map; 2896 struct emx_rxbuf *rx_buffer; 2897 int error, nseg; 2898 2899 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2900 if (m == NULL) { 2901 if (init) { 2902 if_printf(&rdata->sc->arpcom.ac_if, 2903 "Unable to allocate RX mbuf\n"); 2904 } 2905 return (ENOBUFS); 2906 } 2907 m->m_len = m->m_pkthdr.len = MCLBYTES; 2908 2909 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2910 m_adj(m, ETHER_ALIGN); 2911 2912 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2913 rdata->rx_sparemap, m, 2914 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2915 if (error) { 2916 m_freem(m); 2917 if (init) { 2918 if_printf(&rdata->sc->arpcom.ac_if, 2919 "Unable to load RX mbuf\n"); 2920 } 2921 return (error); 2922 } 2923 2924 rx_buffer = &rdata->rx_buf[i]; 2925 if (rx_buffer->m_head != NULL) 2926 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2927 2928 map = rx_buffer->map; 2929 rx_buffer->map = rdata->rx_sparemap; 2930 rdata->rx_sparemap = map; 2931 2932 rx_buffer->m_head = m; 2933 rx_buffer->paddr = seg.ds_addr; 2934 2935 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2936 return (0); 2937 } 2938 2939 static int 2940 emx_create_rx_ring(struct emx_rxdata *rdata) 2941 { 2942 device_t dev = rdata->sc->dev; 2943 struct emx_rxbuf *rx_buffer; 2944 int i, error, rsize, nrxd; 2945 2946 /* 2947 * Validate number of receive descriptors. It must not exceed 2948 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2949 */ 2950 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2951 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2952 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2953 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2954 EMX_DEFAULT_RXD, nrxd); 2955 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2956 } else { 2957 rdata->num_rx_desc = nrxd; 2958 } 2959 2960 /* 2961 * Allocate Receive Descriptor ring 2962 */ 2963 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2964 EMX_DBA_ALIGN); 2965 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2966 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2967 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2968 &rdata->rx_desc_paddr); 2969 if (rdata->rx_desc == NULL) { 2970 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2971 return ENOMEM; 2972 } 2973 2974 rsize = __VM_CACHELINE_ALIGN( 2975 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2976 rdata->rx_buf = kmalloc(rsize, M_DEVBUF, 2977 M_WAITOK | M_ZERO | M_CACHEALIGN); 2978 2979 /* 2980 * Create DMA tag for rx buffers 2981 */ 2982 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2983 1, 0, /* alignment, bounds */ 2984 BUS_SPACE_MAXADDR, /* lowaddr */ 2985 BUS_SPACE_MAXADDR, /* highaddr */ 2986 NULL, NULL, /* filter, filterarg */ 2987 MCLBYTES, /* maxsize */ 2988 1, /* nsegments */ 2989 MCLBYTES, /* maxsegsize */ 2990 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2991 &rdata->rxtag); 2992 if (error) { 2993 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2994 kfree(rdata->rx_buf, M_DEVBUF); 2995 rdata->rx_buf = NULL; 2996 return error; 2997 } 2998 2999 /* 3000 * Create spare DMA map for rx buffers 3001 */ 3002 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 3003 &rdata->rx_sparemap); 3004 if (error) { 3005 device_printf(dev, "Unable to create spare RX DMA map\n"); 3006 bus_dma_tag_destroy(rdata->rxtag); 3007 kfree(rdata->rx_buf, M_DEVBUF); 3008 rdata->rx_buf = NULL; 3009 return error; 3010 } 3011 3012 /* 3013 * Create DMA maps for rx buffers 3014 */ 3015 for (i = 0; i < rdata->num_rx_desc; i++) { 3016 rx_buffer = &rdata->rx_buf[i]; 3017 3018 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 3019 &rx_buffer->map); 3020 if (error) { 3021 device_printf(dev, "Unable to create RX DMA map\n"); 3022 emx_destroy_rx_ring(rdata, i); 3023 return error; 3024 } 3025 } 3026 return (0); 3027 } 3028 3029 static void 3030 emx_free_rx_ring(struct emx_rxdata *rdata) 3031 { 3032 int i; 3033 3034 for (i = 0; i < rdata->num_rx_desc; i++) { 3035 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 3036 3037 if (rx_buffer->m_head != NULL) { 3038 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 3039 m_freem(rx_buffer->m_head); 3040 rx_buffer->m_head = NULL; 3041 } 3042 } 3043 3044 if (rdata->fmp != NULL) 3045 m_freem(rdata->fmp); 3046 rdata->fmp = NULL; 3047 rdata->lmp = NULL; 3048 } 3049 3050 static void 3051 emx_free_tx_ring(struct emx_txdata *tdata) 3052 { 3053 int i; 3054 3055 for (i = 0; i < tdata->num_tx_desc; i++) { 3056 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 3057 3058 if (tx_buffer->m_head != NULL) 3059 emx_free_txbuf(tdata, tx_buffer); 3060 } 3061 3062 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 3063 3064 tdata->csum_flags = 0; 3065 tdata->csum_lhlen = 0; 3066 tdata->csum_iphlen = 0; 3067 tdata->csum_thlen = 0; 3068 tdata->csum_mss = 0; 3069 tdata->csum_pktlen = 0; 3070 3071 tdata->tx_dd_head = 0; 3072 tdata->tx_dd_tail = 0; 3073 tdata->tx_nsegs = 0; 3074 } 3075 3076 static int 3077 emx_init_rx_ring(struct emx_rxdata *rdata) 3078 { 3079 int i, error; 3080 3081 /* Reset descriptor ring */ 3082 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 3083 3084 /* Allocate new ones. */ 3085 for (i = 0; i < rdata->num_rx_desc; i++) { 3086 error = emx_newbuf(rdata, i, 1); 3087 if (error) 3088 return (error); 3089 } 3090 3091 /* Setup our descriptor pointers */ 3092 rdata->next_rx_desc_to_check = 0; 3093 3094 return (0); 3095 } 3096 3097 static void 3098 emx_init_rx_unit(struct emx_softc *sc) 3099 { 3100 struct ifnet *ifp = &sc->arpcom.ac_if; 3101 uint64_t bus_addr; 3102 uint32_t rctl, itr, rfctl, rxcsum; 3103 int i; 3104 3105 /* 3106 * Make sure receives are disabled while setting 3107 * up the descriptor ring 3108 */ 3109 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 3110 /* Do not disable if ever enabled on this hardware */ 3111 if (sc->hw.mac.type != e1000_82574) 3112 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3113 3114 /* 3115 * Set the interrupt throttling rate. Value is calculated 3116 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3117 */ 3118 if (sc->int_throttle_ceil) 3119 itr = 1000000000 / 256 / sc->int_throttle_ceil; 3120 else 3121 itr = 0; 3122 emx_set_itr(sc, itr); 3123 3124 /* Use extended RX descriptor */ 3125 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL); 3126 rfctl |= E1000_RFCTL_EXTEN; 3127 /* Disable accelerated ackknowledge */ 3128 if (sc->hw.mac.type == e1000_82574) 3129 rfctl |= E1000_RFCTL_ACK_DIS; 3130 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 3131 3132 /* 3133 * Receive Checksum Offload for TCP and UDP 3134 * 3135 * Checksum offloading is also enabled if multiple receive 3136 * queue is to be supported, since we need it to figure out 3137 * packet type. 3138 */ 3139 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3140 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3141 sc->rx_ring_cnt > 1) { 3142 /* 3143 * NOTE: 3144 * PCSD must be enabled to enable multiple 3145 * receive queues. 3146 */ 3147 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3148 E1000_RXCSUM_PCSD; 3149 } else { 3150 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3151 E1000_RXCSUM_PCSD); 3152 } 3153 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3154 3155 /* 3156 * Configure multiple receive queue (RSS) 3157 */ 3158 if (sc->rx_ring_cnt > 1) { 3159 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3160 int r, j; 3161 3162 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3163 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3164 3165 /* 3166 * NOTE: 3167 * When we reach here, RSS has already been disabled 3168 * in emx_stop(), so we could safely configure RSS key 3169 * and redirect table. 3170 */ 3171 3172 /* 3173 * Configure RSS key 3174 */ 3175 toeplitz_get_key(key, sizeof(key)); 3176 for (i = 0; i < EMX_NRSSRK; ++i) { 3177 uint32_t rssrk; 3178 3179 rssrk = EMX_RSSRK_VAL(key, i); 3180 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3181 3182 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3183 } 3184 3185 /* 3186 * Configure RSS redirect table. 3187 */ 3188 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3189 EMX_RDRTABLE_SIZE); 3190 3191 r = 0; 3192 for (j = 0; j < EMX_NRETA; ++j) { 3193 uint32_t reta = 0; 3194 3195 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3196 uint32_t q; 3197 3198 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3199 reta |= q << (8 * i); 3200 ++r; 3201 } 3202 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3203 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3204 } 3205 3206 /* 3207 * Enable multiple receive queues. 3208 * Enable IPv4 RSS standard hash functions. 3209 * Disable RSS interrupt. 3210 */ 3211 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3212 E1000_MRQC_ENABLE_RSS_2Q | 3213 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3214 E1000_MRQC_RSS_FIELD_IPV4); 3215 } 3216 3217 /* 3218 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3219 * long latencies are observed, like Lenovo X60. This 3220 * change eliminates the problem, but since having positive 3221 * values in RDTR is a known source of problems on other 3222 * platforms another solution is being sought. 3223 */ 3224 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3225 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3226 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3227 } 3228 3229 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3230 struct emx_rxdata *rdata = &sc->rx_data[i]; 3231 3232 /* 3233 * Setup the Base and Length of the Rx Descriptor Ring 3234 */ 3235 bus_addr = rdata->rx_desc_paddr; 3236 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3237 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3238 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3239 (uint32_t)(bus_addr >> 32)); 3240 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3241 (uint32_t)bus_addr); 3242 3243 /* 3244 * Setup the HW Rx Head and Tail Descriptor Pointers 3245 */ 3246 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3247 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3248 sc->rx_data[i].num_rx_desc - 1); 3249 } 3250 3251 /* Set PTHRESH for improved jumbo performance */ 3252 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) { 3253 uint32_t rxdctl; 3254 3255 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3256 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i)); 3257 rxdctl |= 0x20; /* PTHRESH */ 3258 rxdctl |= 4 << 8; /* HTHRESH */ 3259 rxdctl |= 4 << 16; /* WTHRESH */ 3260 rxdctl |= 1 << 24; /* Switch to granularity */ 3261 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl); 3262 } 3263 } 3264 3265 if (sc->hw.mac.type >= e1000_pch2lan) { 3266 if (ifp->if_mtu > ETHERMTU) 3267 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3268 else 3269 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3270 } 3271 3272 /* Setup the Receive Control Register */ 3273 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3274 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3275 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3276 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3277 3278 /* Make sure VLAN Filters are off */ 3279 rctl &= ~E1000_RCTL_VFE; 3280 3281 /* Don't store bad paket */ 3282 rctl &= ~E1000_RCTL_SBP; 3283 3284 /* MCLBYTES */ 3285 rctl |= E1000_RCTL_SZ_2048; 3286 3287 if (ifp->if_mtu > ETHERMTU) 3288 rctl |= E1000_RCTL_LPE; 3289 else 3290 rctl &= ~E1000_RCTL_LPE; 3291 3292 /* Enable Receives */ 3293 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3294 } 3295 3296 static void 3297 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3298 { 3299 struct emx_rxbuf *rx_buffer; 3300 int i; 3301 3302 /* Free Receive Descriptor ring */ 3303 if (rdata->rx_desc) { 3304 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3305 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3306 rdata->rx_desc_dmap); 3307 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3308 3309 rdata->rx_desc = NULL; 3310 } 3311 3312 if (rdata->rx_buf == NULL) 3313 return; 3314 3315 for (i = 0; i < ndesc; i++) { 3316 rx_buffer = &rdata->rx_buf[i]; 3317 3318 KKASSERT(rx_buffer->m_head == NULL); 3319 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3320 } 3321 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3322 bus_dma_tag_destroy(rdata->rxtag); 3323 3324 kfree(rdata->rx_buf, M_DEVBUF); 3325 rdata->rx_buf = NULL; 3326 } 3327 3328 static void 3329 emx_rxeof(struct emx_rxdata *rdata, int count) 3330 { 3331 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3332 uint32_t staterr; 3333 emx_rxdesc_t *current_desc; 3334 struct mbuf *mp; 3335 int i, cpuid = mycpuid; 3336 3337 i = rdata->next_rx_desc_to_check; 3338 current_desc = &rdata->rx_desc[i]; 3339 staterr = le32toh(current_desc->rxd_staterr); 3340 3341 if (!(staterr & E1000_RXD_STAT_DD)) 3342 return; 3343 3344 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3345 struct pktinfo *pi = NULL, pi0; 3346 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3347 struct mbuf *m = NULL; 3348 int eop, len; 3349 3350 logif(pkt_receive); 3351 3352 mp = rx_buf->m_head; 3353 3354 /* 3355 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3356 * needs to access the last received byte in the mbuf. 3357 */ 3358 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3359 BUS_DMASYNC_POSTREAD); 3360 3361 len = le16toh(current_desc->rxd_length); 3362 if (staterr & E1000_RXD_STAT_EOP) { 3363 count--; 3364 eop = 1; 3365 } else { 3366 eop = 0; 3367 } 3368 3369 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3370 uint16_t vlan = 0; 3371 uint32_t mrq, rss_hash; 3372 3373 /* 3374 * Save several necessary information, 3375 * before emx_newbuf() destroy it. 3376 */ 3377 if ((staterr & E1000_RXD_STAT_VP) && eop) 3378 vlan = le16toh(current_desc->rxd_vlan); 3379 3380 mrq = le32toh(current_desc->rxd_mrq); 3381 rss_hash = le32toh(current_desc->rxd_rss); 3382 3383 EMX_RSS_DPRINTF(rdata->sc, 10, 3384 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3385 rdata->idx, mrq, rss_hash); 3386 3387 if (emx_newbuf(rdata, i, 0) != 0) { 3388 IFNET_STAT_INC(ifp, iqdrops, 1); 3389 goto discard; 3390 } 3391 3392 /* Assign correct length to the current fragment */ 3393 mp->m_len = len; 3394 3395 if (rdata->fmp == NULL) { 3396 mp->m_pkthdr.len = len; 3397 rdata->fmp = mp; /* Store the first mbuf */ 3398 rdata->lmp = mp; 3399 } else { 3400 /* 3401 * Chain mbuf's together 3402 */ 3403 rdata->lmp->m_next = mp; 3404 rdata->lmp = rdata->lmp->m_next; 3405 rdata->fmp->m_pkthdr.len += len; 3406 } 3407 3408 if (eop) { 3409 rdata->fmp->m_pkthdr.rcvif = ifp; 3410 IFNET_STAT_INC(ifp, ipackets, 1); 3411 3412 if (ifp->if_capenable & IFCAP_RXCSUM) 3413 emx_rxcsum(staterr, rdata->fmp); 3414 3415 if (staterr & E1000_RXD_STAT_VP) { 3416 rdata->fmp->m_pkthdr.ether_vlantag = 3417 vlan; 3418 rdata->fmp->m_flags |= M_VLANTAG; 3419 } 3420 m = rdata->fmp; 3421 rdata->fmp = NULL; 3422 rdata->lmp = NULL; 3423 3424 if (ifp->if_capenable & IFCAP_RSS) { 3425 pi = emx_rssinfo(m, &pi0, mrq, 3426 rss_hash, staterr); 3427 } 3428 #ifdef EMX_RSS_DEBUG 3429 rdata->rx_pkts++; 3430 #endif 3431 } 3432 } else { 3433 IFNET_STAT_INC(ifp, ierrors, 1); 3434 discard: 3435 emx_setup_rxdesc(current_desc, rx_buf); 3436 if (rdata->fmp != NULL) { 3437 m_freem(rdata->fmp); 3438 rdata->fmp = NULL; 3439 rdata->lmp = NULL; 3440 } 3441 m = NULL; 3442 } 3443 3444 if (m != NULL) 3445 ifp->if_input(ifp, m, pi, cpuid); 3446 3447 /* Advance our pointers to the next descriptor. */ 3448 if (++i == rdata->num_rx_desc) 3449 i = 0; 3450 3451 current_desc = &rdata->rx_desc[i]; 3452 staterr = le32toh(current_desc->rxd_staterr); 3453 } 3454 rdata->next_rx_desc_to_check = i; 3455 3456 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3457 if (--i < 0) 3458 i = rdata->num_rx_desc - 1; 3459 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3460 } 3461 3462 static void 3463 emx_enable_intr(struct emx_softc *sc) 3464 { 3465 uint32_t ims_mask = IMS_ENABLE_MASK; 3466 3467 lwkt_serialize_handler_enable(&sc->main_serialize); 3468 3469 #if 0 3470 if (sc->hw.mac.type == e1000_82574) { 3471 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3472 ims_mask |= EM_MSIX_MASK; 3473 } 3474 #endif 3475 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3476 } 3477 3478 static void 3479 emx_disable_intr(struct emx_softc *sc) 3480 { 3481 if (sc->hw.mac.type == e1000_82574) 3482 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3483 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3484 3485 lwkt_serialize_handler_disable(&sc->main_serialize); 3486 } 3487 3488 /* 3489 * Bit of a misnomer, what this really means is 3490 * to enable OS management of the system... aka 3491 * to disable special hardware management features 3492 */ 3493 static void 3494 emx_get_mgmt(struct emx_softc *sc) 3495 { 3496 /* A shared code workaround */ 3497 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3498 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3499 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3500 3501 /* disable hardware interception of ARP */ 3502 manc &= ~(E1000_MANC_ARP_EN); 3503 3504 /* enable receiving management packets to the host */ 3505 manc |= E1000_MANC_EN_MNG2HOST; 3506 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3507 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3508 manc2h |= E1000_MNG2HOST_PORT_623; 3509 manc2h |= E1000_MNG2HOST_PORT_664; 3510 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3511 3512 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3513 } 3514 } 3515 3516 /* 3517 * Give control back to hardware management 3518 * controller if there is one. 3519 */ 3520 static void 3521 emx_rel_mgmt(struct emx_softc *sc) 3522 { 3523 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3524 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3525 3526 /* re-enable hardware interception of ARP */ 3527 manc |= E1000_MANC_ARP_EN; 3528 manc &= ~E1000_MANC_EN_MNG2HOST; 3529 3530 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3531 } 3532 } 3533 3534 /* 3535 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3536 * For ASF and Pass Through versions of f/w this means that 3537 * the driver is loaded. For AMT version (only with 82573) 3538 * of the f/w this means that the network i/f is open. 3539 */ 3540 static void 3541 emx_get_hw_control(struct emx_softc *sc) 3542 { 3543 /* Let firmware know the driver has taken over */ 3544 if (sc->hw.mac.type == e1000_82573) { 3545 uint32_t swsm; 3546 3547 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3548 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3549 swsm | E1000_SWSM_DRV_LOAD); 3550 } else { 3551 uint32_t ctrl_ext; 3552 3553 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3554 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3555 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3556 } 3557 sc->flags |= EMX_FLAG_HW_CTRL; 3558 } 3559 3560 /* 3561 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3562 * For ASF and Pass Through versions of f/w this means that the 3563 * driver is no longer loaded. For AMT version (only with 82573) 3564 * of the f/w this means that the network i/f is closed. 3565 */ 3566 static void 3567 emx_rel_hw_control(struct emx_softc *sc) 3568 { 3569 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3570 return; 3571 sc->flags &= ~EMX_FLAG_HW_CTRL; 3572 3573 /* Let firmware taken over control of h/w */ 3574 if (sc->hw.mac.type == e1000_82573) { 3575 uint32_t swsm; 3576 3577 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3578 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3579 swsm & ~E1000_SWSM_DRV_LOAD); 3580 } else { 3581 uint32_t ctrl_ext; 3582 3583 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3584 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3585 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3586 } 3587 } 3588 3589 static int 3590 emx_is_valid_eaddr(const uint8_t *addr) 3591 { 3592 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3593 3594 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3595 return (FALSE); 3596 3597 return (TRUE); 3598 } 3599 3600 /* 3601 * Enable PCI Wake On Lan capability 3602 */ 3603 static void 3604 emx_enable_wol(device_t dev) 3605 { 3606 uint16_t cap, status; 3607 uint8_t id; 3608 3609 /* First find the capabilities pointer*/ 3610 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3611 3612 /* Read the PM Capabilities */ 3613 id = pci_read_config(dev, cap, 1); 3614 if (id != PCIY_PMG) /* Something wrong */ 3615 return; 3616 3617 /* 3618 * OK, we have the power capabilities, 3619 * so now get the status register 3620 */ 3621 cap += PCIR_POWER_STATUS; 3622 status = pci_read_config(dev, cap, 2); 3623 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3624 pci_write_config(dev, cap, status, 2); 3625 } 3626 3627 static void 3628 emx_update_stats(struct emx_softc *sc) 3629 { 3630 struct ifnet *ifp = &sc->arpcom.ac_if; 3631 3632 if (sc->hw.phy.media_type == e1000_media_type_copper || 3633 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3634 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3635 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3636 } 3637 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3638 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3639 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3640 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3641 3642 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3643 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3644 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3645 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3646 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3647 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3648 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3649 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3650 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3651 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3652 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3653 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3654 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3655 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3656 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3657 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3658 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3659 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3660 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3661 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3662 3663 /* For the 64-bit byte counters the low dword must be read first. */ 3664 /* Both registers clear on the read of the high dword */ 3665 3666 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3667 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3668 3669 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3670 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3671 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3672 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3673 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3674 3675 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3676 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3677 3678 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3679 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3680 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3681 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3682 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3683 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3684 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3685 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3686 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3687 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3688 3689 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3690 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3691 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3692 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3693 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3694 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3695 3696 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3697 3698 /* Rx Errors */ 3699 IFNET_STAT_SET(ifp, ierrors, 3700 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3701 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3702 3703 /* Tx Errors */ 3704 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3705 } 3706 3707 static void 3708 emx_print_debug_info(struct emx_softc *sc) 3709 { 3710 device_t dev = sc->dev; 3711 uint8_t *hw_addr = sc->hw.hw_addr; 3712 int i; 3713 3714 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3715 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3716 E1000_READ_REG(&sc->hw, E1000_CTRL), 3717 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3718 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3719 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3720 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3721 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3722 sc->hw.fc.high_water, sc->hw.fc.low_water); 3723 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3724 E1000_READ_REG(&sc->hw, E1000_TIDV), 3725 E1000_READ_REG(&sc->hw, E1000_TADV)); 3726 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3727 E1000_READ_REG(&sc->hw, E1000_RDTR), 3728 E1000_READ_REG(&sc->hw, E1000_RADV)); 3729 3730 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3731 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3732 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3733 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3734 } 3735 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3736 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3737 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3738 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3739 } 3740 3741 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3742 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3743 sc->tx_data[i].num_tx_desc_avail); 3744 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3745 sc->tx_data[i].tso_segments); 3746 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3747 sc->tx_data[i].tso_ctx_reused); 3748 } 3749 } 3750 3751 static void 3752 emx_print_hw_stats(struct emx_softc *sc) 3753 { 3754 device_t dev = sc->dev; 3755 3756 device_printf(dev, "Excessive collisions = %lld\n", 3757 (long long)sc->stats.ecol); 3758 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3759 device_printf(dev, "Symbol errors = %lld\n", 3760 (long long)sc->stats.symerrs); 3761 #endif 3762 device_printf(dev, "Sequence errors = %lld\n", 3763 (long long)sc->stats.sec); 3764 device_printf(dev, "Defer count = %lld\n", 3765 (long long)sc->stats.dc); 3766 device_printf(dev, "Missed Packets = %lld\n", 3767 (long long)sc->stats.mpc); 3768 device_printf(dev, "Receive No Buffers = %lld\n", 3769 (long long)sc->stats.rnbc); 3770 /* RLEC is inaccurate on some hardware, calculate our own. */ 3771 device_printf(dev, "Receive Length Errors = %lld\n", 3772 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3773 device_printf(dev, "Receive errors = %lld\n", 3774 (long long)sc->stats.rxerrc); 3775 device_printf(dev, "Crc errors = %lld\n", 3776 (long long)sc->stats.crcerrs); 3777 device_printf(dev, "Alignment errors = %lld\n", 3778 (long long)sc->stats.algnerrc); 3779 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3780 (long long)sc->stats.cexterr); 3781 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3782 device_printf(dev, "XON Rcvd = %lld\n", 3783 (long long)sc->stats.xonrxc); 3784 device_printf(dev, "XON Xmtd = %lld\n", 3785 (long long)sc->stats.xontxc); 3786 device_printf(dev, "XOFF Rcvd = %lld\n", 3787 (long long)sc->stats.xoffrxc); 3788 device_printf(dev, "XOFF Xmtd = %lld\n", 3789 (long long)sc->stats.xofftxc); 3790 device_printf(dev, "Good Packets Rcvd = %lld\n", 3791 (long long)sc->stats.gprc); 3792 device_printf(dev, "Good Packets Xmtd = %lld\n", 3793 (long long)sc->stats.gptc); 3794 } 3795 3796 static void 3797 emx_print_nvm_info(struct emx_softc *sc) 3798 { 3799 uint16_t eeprom_data; 3800 int i, j, row = 0; 3801 3802 /* Its a bit crude, but it gets the job done */ 3803 kprintf("\nInterface EEPROM Dump:\n"); 3804 kprintf("Offset\n0x0000 "); 3805 for (i = 0, j = 0; i < 32; i++, j++) { 3806 if (j == 8) { /* Make the offset block */ 3807 j = 0; ++row; 3808 kprintf("\n0x00%x0 ",row); 3809 } 3810 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3811 kprintf("%04x ", eeprom_data); 3812 } 3813 kprintf("\n"); 3814 } 3815 3816 static int 3817 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3818 { 3819 struct emx_softc *sc; 3820 struct ifnet *ifp; 3821 int error, result; 3822 3823 result = -1; 3824 error = sysctl_handle_int(oidp, &result, 0, req); 3825 if (error || !req->newptr) 3826 return (error); 3827 3828 sc = (struct emx_softc *)arg1; 3829 ifp = &sc->arpcom.ac_if; 3830 3831 ifnet_serialize_all(ifp); 3832 3833 if (result == 1) 3834 emx_print_debug_info(sc); 3835 3836 /* 3837 * This value will cause a hex dump of the 3838 * first 32 16-bit words of the EEPROM to 3839 * the screen. 3840 */ 3841 if (result == 2) 3842 emx_print_nvm_info(sc); 3843 3844 ifnet_deserialize_all(ifp); 3845 3846 return (error); 3847 } 3848 3849 static int 3850 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3851 { 3852 int error, result; 3853 3854 result = -1; 3855 error = sysctl_handle_int(oidp, &result, 0, req); 3856 if (error || !req->newptr) 3857 return (error); 3858 3859 if (result == 1) { 3860 struct emx_softc *sc = (struct emx_softc *)arg1; 3861 struct ifnet *ifp = &sc->arpcom.ac_if; 3862 3863 ifnet_serialize_all(ifp); 3864 emx_print_hw_stats(sc); 3865 ifnet_deserialize_all(ifp); 3866 } 3867 return (error); 3868 } 3869 3870 static void 3871 emx_add_sysctl(struct emx_softc *sc) 3872 { 3873 struct sysctl_ctx_list *ctx; 3874 struct sysctl_oid *tree; 3875 char pkt_desc[32]; 3876 int i; 3877 3878 ctx = device_get_sysctl_ctx(sc->dev); 3879 tree = device_get_sysctl_tree(sc->dev); 3880 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3881 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3882 emx_sysctl_debug_info, "I", "Debug Information"); 3883 3884 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3885 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3886 emx_sysctl_stats, "I", "Statistics"); 3887 3888 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3889 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3890 "# of RX descs"); 3891 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3892 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3893 "# of TX descs"); 3894 3895 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3896 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3897 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3898 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3899 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3900 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3901 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3902 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3903 emx_sysctl_tx_wreg_nsegs, "I", 3904 "# segments sent before write to hardware register"); 3905 3906 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3907 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3908 "# of RX rings"); 3909 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3910 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3911 "# of TX rings"); 3912 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3913 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3914 "# of TX rings used"); 3915 3916 #ifdef IFPOLL_ENABLE 3917 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3918 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3919 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3920 "TX polling CPU map"); 3921 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3922 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3923 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3924 "RX polling CPU map"); 3925 #endif 3926 3927 #ifdef EMX_RSS_DEBUG 3928 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3929 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3930 0, "RSS debug level"); 3931 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3932 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3933 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3934 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3935 "RXed packets"); 3936 } 3937 #endif 3938 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3939 #ifdef EMX_TSS_DEBUG 3940 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3941 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3942 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3943 "TXed packets"); 3944 #endif 3945 3946 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i); 3947 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3948 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0, 3949 "# of pending TX mbufs"); 3950 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i); 3951 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3952 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc, 3953 "# of TX desc GC"); 3954 } 3955 } 3956 3957 static int 3958 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3959 { 3960 struct emx_softc *sc = (void *)arg1; 3961 struct ifnet *ifp = &sc->arpcom.ac_if; 3962 int error, throttle; 3963 3964 throttle = sc->int_throttle_ceil; 3965 error = sysctl_handle_int(oidp, &throttle, 0, req); 3966 if (error || req->newptr == NULL) 3967 return error; 3968 if (throttle < 0 || throttle > 1000000000 / 256) 3969 return EINVAL; 3970 3971 if (throttle) { 3972 /* 3973 * Set the interrupt throttling rate in 256ns increments, 3974 * recalculate sysctl value assignment to get exact frequency. 3975 */ 3976 throttle = 1000000000 / 256 / throttle; 3977 3978 /* Upper 16bits of ITR is reserved and should be zero */ 3979 if (throttle & 0xffff0000) 3980 return EINVAL; 3981 } 3982 3983 ifnet_serialize_all(ifp); 3984 3985 if (throttle) 3986 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3987 else 3988 sc->int_throttle_ceil = 0; 3989 3990 if (ifp->if_flags & IFF_RUNNING) 3991 emx_set_itr(sc, throttle); 3992 3993 ifnet_deserialize_all(ifp); 3994 3995 if (bootverbose) { 3996 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3997 sc->int_throttle_ceil); 3998 } 3999 return 0; 4000 } 4001 4002 static int 4003 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 4004 { 4005 struct emx_softc *sc = (void *)arg1; 4006 struct ifnet *ifp = &sc->arpcom.ac_if; 4007 struct emx_txdata *tdata = &sc->tx_data[0]; 4008 int error, segs; 4009 4010 segs = tdata->tx_intr_nsegs; 4011 error = sysctl_handle_int(oidp, &segs, 0, req); 4012 if (error || req->newptr == NULL) 4013 return error; 4014 if (segs <= 0) 4015 return EINVAL; 4016 4017 ifnet_serialize_all(ifp); 4018 4019 /* 4020 * Don't allow tx_intr_nsegs to become: 4021 * o Less the oact_tx_desc 4022 * o Too large that no TX desc will cause TX interrupt to 4023 * be generated (OACTIVE will never recover) 4024 * o Too small that will cause tx_dd[] overflow 4025 */ 4026 if (segs < tdata->oact_tx_desc || 4027 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 4028 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 4029 error = EINVAL; 4030 } else { 4031 int i; 4032 4033 error = 0; 4034 for (i = 0; i < sc->tx_ring_cnt; ++i) 4035 sc->tx_data[i].tx_intr_nsegs = segs; 4036 } 4037 4038 ifnet_deserialize_all(ifp); 4039 4040 return error; 4041 } 4042 4043 static int 4044 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4045 { 4046 struct emx_softc *sc = (void *)arg1; 4047 struct ifnet *ifp = &sc->arpcom.ac_if; 4048 int error, nsegs, i; 4049 4050 nsegs = sc->tx_data[0].tx_wreg_nsegs; 4051 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4052 if (error || req->newptr == NULL) 4053 return error; 4054 4055 ifnet_serialize_all(ifp); 4056 for (i = 0; i < sc->tx_ring_cnt; ++i) 4057 sc->tx_data[i].tx_wreg_nsegs =nsegs; 4058 ifnet_deserialize_all(ifp); 4059 4060 return 0; 4061 } 4062 4063 static int 4064 emx_dma_alloc(struct emx_softc *sc) 4065 { 4066 int error, i; 4067 4068 /* 4069 * Create top level busdma tag 4070 */ 4071 error = bus_dma_tag_create(NULL, 1, 0, 4072 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4073 NULL, NULL, 4074 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4075 0, &sc->parent_dtag); 4076 if (error) { 4077 device_printf(sc->dev, "could not create top level DMA tag\n"); 4078 return error; 4079 } 4080 4081 /* 4082 * Allocate transmit descriptors ring and buffers 4083 */ 4084 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4085 error = emx_create_tx_ring(&sc->tx_data[i]); 4086 if (error) { 4087 device_printf(sc->dev, 4088 "Could not setup transmit structures\n"); 4089 return error; 4090 } 4091 } 4092 4093 /* 4094 * Allocate receive descriptors ring and buffers 4095 */ 4096 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4097 error = emx_create_rx_ring(&sc->rx_data[i]); 4098 if (error) { 4099 device_printf(sc->dev, 4100 "Could not setup receive structures\n"); 4101 return error; 4102 } 4103 } 4104 return 0; 4105 } 4106 4107 static void 4108 emx_dma_free(struct emx_softc *sc) 4109 { 4110 int i; 4111 4112 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4113 emx_destroy_tx_ring(&sc->tx_data[i], 4114 sc->tx_data[i].num_tx_desc); 4115 } 4116 4117 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4118 emx_destroy_rx_ring(&sc->rx_data[i], 4119 sc->rx_data[i].num_rx_desc); 4120 } 4121 4122 /* Free top level busdma tag */ 4123 if (sc->parent_dtag != NULL) 4124 bus_dma_tag_destroy(sc->parent_dtag); 4125 } 4126 4127 static void 4128 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4129 { 4130 struct emx_softc *sc = ifp->if_softc; 4131 4132 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4133 } 4134 4135 static void 4136 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4137 { 4138 struct emx_softc *sc = ifp->if_softc; 4139 4140 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4141 } 4142 4143 static int 4144 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4145 { 4146 struct emx_softc *sc = ifp->if_softc; 4147 4148 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4149 } 4150 4151 static void 4152 emx_serialize_skipmain(struct emx_softc *sc) 4153 { 4154 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4155 } 4156 4157 static void 4158 emx_deserialize_skipmain(struct emx_softc *sc) 4159 { 4160 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4161 } 4162 4163 #ifdef INVARIANTS 4164 4165 static void 4166 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4167 boolean_t serialized) 4168 { 4169 struct emx_softc *sc = ifp->if_softc; 4170 4171 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4172 slz, serialized); 4173 } 4174 4175 #endif /* INVARIANTS */ 4176 4177 #ifdef IFPOLL_ENABLE 4178 4179 static void 4180 emx_npoll_status(struct ifnet *ifp) 4181 { 4182 struct emx_softc *sc = ifp->if_softc; 4183 uint32_t reg_icr; 4184 4185 ASSERT_SERIALIZED(&sc->main_serialize); 4186 4187 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4188 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4189 callout_stop(&sc->timer); 4190 sc->hw.mac.get_link_status = 1; 4191 emx_update_link_status(sc); 4192 callout_reset(&sc->timer, hz, emx_timer, sc); 4193 } 4194 } 4195 4196 static void 4197 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4198 { 4199 struct emx_txdata *tdata = arg; 4200 4201 ASSERT_SERIALIZED(&tdata->tx_serialize); 4202 4203 emx_tx_intr(tdata); 4204 emx_try_txgc(tdata, 1); 4205 } 4206 4207 static void 4208 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4209 { 4210 struct emx_rxdata *rdata = arg; 4211 4212 ASSERT_SERIALIZED(&rdata->rx_serialize); 4213 4214 emx_rxeof(rdata, cycle); 4215 } 4216 4217 static void 4218 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4219 { 4220 struct emx_softc *sc = ifp->if_softc; 4221 int i, txr_cnt; 4222 4223 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4224 4225 if (info) { 4226 int cpu; 4227 4228 info->ifpi_status.status_func = emx_npoll_status; 4229 info->ifpi_status.serializer = &sc->main_serialize; 4230 4231 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4232 for (i = 0; i < txr_cnt; ++i) { 4233 struct emx_txdata *tdata = &sc->tx_data[i]; 4234 4235 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4236 KKASSERT(cpu < netisr_ncpus); 4237 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4238 info->ifpi_tx[cpu].arg = tdata; 4239 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4240 ifsq_set_cpuid(tdata->ifsq, cpu); 4241 } 4242 4243 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4244 struct emx_rxdata *rdata = &sc->rx_data[i]; 4245 4246 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4247 KKASSERT(cpu < netisr_ncpus); 4248 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4249 info->ifpi_rx[cpu].arg = rdata; 4250 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4251 } 4252 } else { 4253 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4254 struct emx_txdata *tdata = &sc->tx_data[i]; 4255 4256 ifsq_set_cpuid(tdata->ifsq, 4257 rman_get_cpuid(sc->intr_res)); 4258 } 4259 } 4260 if (ifp->if_flags & IFF_RUNNING) 4261 emx_init(sc); 4262 } 4263 4264 #endif /* IFPOLL_ENABLE */ 4265 4266 static void 4267 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4268 { 4269 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4270 if (sc->hw.mac.type == e1000_82574) { 4271 int i; 4272 4273 /* 4274 * When using MSIX interrupts we need to 4275 * throttle using the EITR register 4276 */ 4277 for (i = 0; i < 4; ++i) 4278 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4279 } 4280 } 4281 4282 /* 4283 * Disable the L0s, 82574L Errata #20 4284 */ 4285 static void 4286 emx_disable_aspm(struct emx_softc *sc) 4287 { 4288 uint16_t link_cap, link_ctrl, disable; 4289 uint8_t pcie_ptr, reg; 4290 device_t dev = sc->dev; 4291 4292 switch (sc->hw.mac.type) { 4293 case e1000_82571: 4294 case e1000_82572: 4295 case e1000_82573: 4296 /* 4297 * 82573 specification update 4298 * errata #8 disable L0s 4299 * errata #41 disable L1 4300 * 4301 * 82571/82572 specification update 4302 # errata #13 disable L1 4303 * errata #68 disable L0s 4304 */ 4305 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4306 break; 4307 4308 case e1000_82574: 4309 /* 4310 * 82574 specification update errata #20 4311 * 4312 * There is no need to disable L1 4313 */ 4314 disable = PCIEM_LNKCTL_ASPM_L0S; 4315 break; 4316 4317 default: 4318 return; 4319 } 4320 4321 pcie_ptr = pci_get_pciecap_ptr(dev); 4322 if (pcie_ptr == 0) 4323 return; 4324 4325 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4326 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4327 return; 4328 4329 if (bootverbose) 4330 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4331 4332 reg = pcie_ptr + PCIER_LINKCTRL; 4333 link_ctrl = pci_read_config(dev, reg, 2); 4334 link_ctrl &= ~disable; 4335 pci_write_config(dev, reg, link_ctrl, 2); 4336 } 4337 4338 static int 4339 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4340 { 4341 int iphlen, hoff, thoff, ex = 0; 4342 struct mbuf *m; 4343 struct ip *ip; 4344 4345 m = *mp; 4346 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4347 4348 iphlen = m->m_pkthdr.csum_iphlen; 4349 thoff = m->m_pkthdr.csum_thlen; 4350 hoff = m->m_pkthdr.csum_lhlen; 4351 4352 KASSERT(iphlen > 0, ("invalid ip hlen")); 4353 KASSERT(thoff > 0, ("invalid tcp hlen")); 4354 KASSERT(hoff > 0, ("invalid ether hlen")); 4355 4356 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4357 ex = 4; 4358 4359 if (m->m_len < hoff + iphlen + thoff + ex) { 4360 m = m_pullup(m, hoff + iphlen + thoff + ex); 4361 if (m == NULL) { 4362 *mp = NULL; 4363 return ENOBUFS; 4364 } 4365 *mp = m; 4366 } 4367 ip = mtodoff(m, struct ip *, hoff); 4368 ip->ip_len = 0; 4369 4370 return 0; 4371 } 4372 4373 static int 4374 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4375 uint32_t *txd_upper, uint32_t *txd_lower) 4376 { 4377 struct e1000_context_desc *TXD; 4378 int hoff, iphlen, thoff, hlen; 4379 int mss, pktlen, curr_txd; 4380 4381 #ifdef EMX_TSO_DEBUG 4382 tdata->tso_segments++; 4383 #endif 4384 4385 iphlen = mp->m_pkthdr.csum_iphlen; 4386 thoff = mp->m_pkthdr.csum_thlen; 4387 hoff = mp->m_pkthdr.csum_lhlen; 4388 mss = mp->m_pkthdr.tso_segsz; 4389 pktlen = mp->m_pkthdr.len; 4390 4391 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4392 tdata->csum_flags == CSUM_TSO && 4393 tdata->csum_iphlen == iphlen && 4394 tdata->csum_lhlen == hoff && 4395 tdata->csum_thlen == thoff && 4396 tdata->csum_mss == mss && 4397 tdata->csum_pktlen == pktlen) { 4398 *txd_upper = tdata->csum_txd_upper; 4399 *txd_lower = tdata->csum_txd_lower; 4400 #ifdef EMX_TSO_DEBUG 4401 tdata->tso_ctx_reused++; 4402 #endif 4403 return 0; 4404 } 4405 hlen = hoff + iphlen + thoff; 4406 4407 /* 4408 * Setup a new TSO context. 4409 */ 4410 4411 curr_txd = tdata->next_avail_tx_desc; 4412 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4413 4414 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4415 E1000_TXD_DTYP_D | /* Data descr type */ 4416 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4417 4418 /* IP and/or TCP header checksum calculation and insertion. */ 4419 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4420 4421 /* 4422 * Start offset for header checksum calculation. 4423 * End offset for header checksum calculation. 4424 * Offset of place put the checksum. 4425 */ 4426 TXD->lower_setup.ip_fields.ipcss = hoff; 4427 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4428 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4429 4430 /* 4431 * Start offset for payload checksum calculation. 4432 * End offset for payload checksum calculation. 4433 * Offset of place to put the checksum. 4434 */ 4435 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4436 TXD->upper_setup.tcp_fields.tucse = 0; 4437 TXD->upper_setup.tcp_fields.tucso = 4438 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4439 4440 /* 4441 * Payload size per packet w/o any headers. 4442 * Length of all headers up to payload. 4443 */ 4444 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4445 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4446 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4447 E1000_TXD_CMD_DEXT | /* Extended descr */ 4448 E1000_TXD_CMD_TSE | /* TSE context */ 4449 E1000_TXD_CMD_IP | /* Do IP csum */ 4450 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4451 (pktlen - hlen)); /* Total len */ 4452 4453 /* Save the information for this TSO context */ 4454 tdata->csum_flags = CSUM_TSO; 4455 tdata->csum_lhlen = hoff; 4456 tdata->csum_iphlen = iphlen; 4457 tdata->csum_thlen = thoff; 4458 tdata->csum_mss = mss; 4459 tdata->csum_pktlen = pktlen; 4460 tdata->csum_txd_upper = *txd_upper; 4461 tdata->csum_txd_lower = *txd_lower; 4462 4463 if (++curr_txd == tdata->num_tx_desc) 4464 curr_txd = 0; 4465 4466 KKASSERT(tdata->num_tx_desc_avail > 0); 4467 tdata->num_tx_desc_avail--; 4468 4469 tdata->next_avail_tx_desc = curr_txd; 4470 return 1; 4471 } 4472 4473 static int 4474 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4475 { 4476 if (polling) 4477 return sc->tx_ring_cnt; 4478 else 4479 return 1; 4480 } 4481 4482 /* 4483 * Remove all descriptors from the TX ring. 4484 * 4485 * We want to clear all pending descriptors from the TX ring. Zeroing 4486 * happens when the HW reads the regs. We assign the ring itself as 4487 * the data of the next descriptor. We don't care about the data we 4488 * are about to reset the HW. 4489 */ 4490 static void 4491 emx_flush_tx_ring(struct emx_softc *sc) 4492 { 4493 struct e1000_hw *hw = &sc->hw; 4494 uint32_t tctl; 4495 int i; 4496 4497 tctl = E1000_READ_REG(hw, E1000_TCTL); 4498 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); 4499 4500 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4501 struct emx_txdata *tdata = &sc->tx_data[i]; 4502 struct e1000_tx_desc *txd; 4503 4504 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0) 4505 continue; 4506 4507 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++]; 4508 if (tdata->next_avail_tx_desc == tdata->num_tx_desc) 4509 tdata->next_avail_tx_desc = 0; 4510 4511 /* Just use the ring as a dummy buffer addr */ 4512 txd->buffer_addr = tdata->tx_desc_paddr; 4513 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512); 4514 txd->upper.data = 0; 4515 4516 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc); 4517 usec_delay(250); 4518 } 4519 } 4520 4521 /* 4522 * Remove all descriptors from the RX rings. 4523 * 4524 * Mark all descriptors in the RX rings as consumed and disable the RX rings. 4525 */ 4526 static void 4527 emx_flush_rx_ring(struct emx_softc *sc) 4528 { 4529 struct e1000_hw *hw = &sc->hw; 4530 uint32_t rctl; 4531 int i; 4532 4533 rctl = E1000_READ_REG(hw, E1000_RCTL); 4534 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4535 E1000_WRITE_FLUSH(hw); 4536 usec_delay(150); 4537 4538 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4539 uint32_t rxdctl; 4540 4541 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 4542 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4543 rxdctl &= 0xffffc000; 4544 /* 4545 * Update thresholds: prefetch threshold to 31, host threshold 4546 * to 1 and make sure the granularity is "descriptors" and not 4547 * "cache lines". 4548 */ 4549 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 4550 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 4551 } 4552 4553 /* Momentarily enable the RX rings for the changes to take effect */ 4554 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); 4555 E1000_WRITE_FLUSH(hw); 4556 usec_delay(150); 4557 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4558 } 4559 4560 /* 4561 * Remove all descriptors from the descriptor rings. 4562 * 4563 * In i219, the descriptor rings must be emptied before resetting the HW 4564 * or before changing the device state to D3 during runtime (runtime PM). 4565 * 4566 * Failure to do this will cause the HW to enter a unit hang state which 4567 * can only be released by PCI reset on the device. 4568 */ 4569 static void 4570 emx_flush_txrx_ring(struct emx_softc *sc) 4571 { 4572 struct e1000_hw *hw = &sc->hw; 4573 device_t dev = sc->dev; 4574 uint16_t hang_state; 4575 uint32_t fext_nvm11, tdlen; 4576 int i; 4577 4578 /* 4579 * First, disable MULR fix in FEXTNVM11. 4580 */ 4581 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 4582 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 4583 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 4584 4585 /* 4586 * Do nothing if we're not in faulty state, or if the queue is 4587 * empty. 4588 */ 4589 tdlen = 0; 4590 for (i = 0; i < sc->tx_ring_inuse; ++i) 4591 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i)); 4592 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4593 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen) 4594 emx_flush_tx_ring(sc); 4595 4596 /* 4597 * Recheck, maybe the fault is caused by the RX ring. 4598 */ 4599 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4600 if (hang_state & EMX_FLUSH_DESC_REQUIRED) 4601 emx_flush_rx_ring(sc); 4602 } 4603