1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 EMX_DEVICE(PCH_LBG_I219_LM3), 179 EMX_DEVICE(PCH_SPT_I219_LM4), 180 EMX_DEVICE(PCH_SPT_I219_V4), 181 EMX_DEVICE(PCH_SPT_I219_LM5), 182 EMX_DEVICE(PCH_SPT_I219_V5), 183 184 /* required last entry */ 185 EMX_DEVICE_NULL 186 }; 187 188 static int emx_probe(device_t); 189 static int emx_attach(device_t); 190 static int emx_detach(device_t); 191 static int emx_shutdown(device_t); 192 static int emx_suspend(device_t); 193 static int emx_resume(device_t); 194 195 static void emx_init(void *); 196 static void emx_stop(struct emx_softc *); 197 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 198 static void emx_start(struct ifnet *, struct ifaltq_subque *); 199 #ifdef IFPOLL_ENABLE 200 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 201 static void emx_npoll_status(struct ifnet *); 202 static void emx_npoll_tx(struct ifnet *, void *, int); 203 static void emx_npoll_rx(struct ifnet *, void *, int); 204 #endif 205 static void emx_watchdog(struct ifaltq_subque *); 206 static void emx_media_status(struct ifnet *, struct ifmediareq *); 207 static int emx_media_change(struct ifnet *); 208 static void emx_timer(void *); 209 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 210 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 211 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 212 #ifdef INVARIANTS 213 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 214 boolean_t); 215 #endif 216 217 static void emx_intr(void *); 218 static void emx_intr_mask(void *); 219 static void emx_intr_body(struct emx_softc *, boolean_t); 220 static void emx_rxeof(struct emx_rxdata *, int); 221 static void emx_txeof(struct emx_txdata *); 222 static void emx_tx_collect(struct emx_txdata *, boolean_t); 223 static void emx_txgc_timer(void *); 224 static void emx_tx_purge(struct emx_softc *); 225 static void emx_enable_intr(struct emx_softc *); 226 static void emx_disable_intr(struct emx_softc *); 227 228 static int emx_dma_alloc(struct emx_softc *); 229 static void emx_dma_free(struct emx_softc *); 230 static void emx_init_tx_ring(struct emx_txdata *); 231 static int emx_init_rx_ring(struct emx_rxdata *); 232 static void emx_free_tx_ring(struct emx_txdata *); 233 static void emx_free_rx_ring(struct emx_rxdata *); 234 static int emx_create_tx_ring(struct emx_txdata *); 235 static int emx_create_rx_ring(struct emx_rxdata *); 236 static void emx_destroy_tx_ring(struct emx_txdata *, int); 237 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 238 static int emx_newbuf(struct emx_rxdata *, int, int); 239 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 240 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 241 uint32_t *, uint32_t *); 242 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 243 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 244 uint32_t *, uint32_t *); 245 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 246 247 static int emx_is_valid_eaddr(const uint8_t *); 248 static int emx_reset(struct emx_softc *); 249 static void emx_setup_ifp(struct emx_softc *); 250 static void emx_init_tx_unit(struct emx_softc *); 251 static void emx_init_rx_unit(struct emx_softc *); 252 static void emx_update_stats(struct emx_softc *); 253 static void emx_set_promisc(struct emx_softc *); 254 static void emx_disable_promisc(struct emx_softc *); 255 static void emx_set_multi(struct emx_softc *); 256 static void emx_update_link_status(struct emx_softc *); 257 static void emx_smartspeed(struct emx_softc *); 258 static void emx_set_itr(struct emx_softc *, uint32_t); 259 static void emx_disable_aspm(struct emx_softc *); 260 static void emx_flush_tx_ring(struct emx_softc *); 261 static void emx_flush_rx_ring(struct emx_softc *); 262 static void emx_flush_txrx_ring(struct emx_softc *); 263 264 static void emx_print_debug_info(struct emx_softc *); 265 static void emx_print_nvm_info(struct emx_softc *); 266 static void emx_print_hw_stats(struct emx_softc *); 267 268 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 269 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 270 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 271 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 272 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 273 static void emx_add_sysctl(struct emx_softc *); 274 275 static void emx_serialize_skipmain(struct emx_softc *); 276 static void emx_deserialize_skipmain(struct emx_softc *); 277 278 /* Management and WOL Support */ 279 static void emx_get_mgmt(struct emx_softc *); 280 static void emx_rel_mgmt(struct emx_softc *); 281 static void emx_get_hw_control(struct emx_softc *); 282 static void emx_rel_hw_control(struct emx_softc *); 283 static void emx_enable_wol(device_t); 284 285 static device_method_t emx_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, emx_probe), 288 DEVMETHOD(device_attach, emx_attach), 289 DEVMETHOD(device_detach, emx_detach), 290 DEVMETHOD(device_shutdown, emx_shutdown), 291 DEVMETHOD(device_suspend, emx_suspend), 292 DEVMETHOD(device_resume, emx_resume), 293 DEVMETHOD_END 294 }; 295 296 static driver_t emx_driver = { 297 "emx", 298 emx_methods, 299 sizeof(struct emx_softc), 300 }; 301 302 static devclass_t emx_devclass; 303 304 DECLARE_DUMMY_MODULE(if_emx); 305 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 306 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 307 308 /* 309 * Tunables 310 */ 311 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 312 static int emx_rxd = EMX_DEFAULT_RXD; 313 static int emx_txd = EMX_DEFAULT_TXD; 314 static int emx_smart_pwr_down = 0; 315 static int emx_rxr = 0; 316 static int emx_txr = 1; 317 318 /* Controls whether promiscuous also shows bad packets */ 319 static int emx_debug_sbp = 0; 320 321 static int emx_82573_workaround = 1; 322 static int emx_msi_enable = 1; 323 324 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 325 326 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 327 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 328 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 329 TUNABLE_INT("hw.emx.txd", &emx_txd); 330 TUNABLE_INT("hw.emx.txr", &emx_txr); 331 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 332 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 333 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 334 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 335 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 336 337 /* Global used in WOL setup with multiport cards */ 338 static int emx_global_quad_port_a = 0; 339 340 /* Set this to one to display debug statistics */ 341 static int emx_display_debug_stats = 0; 342 343 #if !defined(KTR_IF_EMX) 344 #define KTR_IF_EMX KTR_ALL 345 #endif 346 KTR_INFO_MASTER(if_emx); 347 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 348 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 349 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 350 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 351 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 352 #define logif(name) KTR_LOG(if_emx_ ## name) 353 354 static __inline void 355 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 356 { 357 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 358 /* DD bit must be cleared */ 359 rxd->rxd_staterr = 0; 360 } 361 362 static __inline void 363 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer) 364 { 365 366 KKASSERT(tx_buffer->m_head != NULL); 367 KKASSERT(tdata->tx_nmbuf > 0); 368 tdata->tx_nmbuf--; 369 370 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 371 m_freem(tx_buffer->m_head); 372 tx_buffer->m_head = NULL; 373 } 374 375 static __inline void 376 emx_tx_intr(struct emx_txdata *tdata) 377 { 378 379 emx_txeof(tdata); 380 if (!ifsq_is_empty(tdata->ifsq)) 381 ifsq_devstart(tdata->ifsq); 382 } 383 384 static __inline void 385 emx_try_txgc(struct emx_txdata *tdata, int16_t dec) 386 { 387 388 if (tdata->tx_running > 0) { 389 tdata->tx_running -= dec; 390 if (tdata->tx_running <= 0 && tdata->tx_nmbuf && 391 tdata->num_tx_desc_avail < tdata->num_tx_desc && 392 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs > 393 tdata->num_tx_desc) 394 emx_tx_collect(tdata, TRUE); 395 } 396 } 397 398 static void 399 emx_txgc_timer(void *xtdata) 400 { 401 struct emx_txdata *tdata = xtdata; 402 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 403 404 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 405 (IFF_RUNNING | IFF_UP)) 406 return; 407 408 if (!lwkt_serialize_try(&tdata->tx_serialize)) 409 goto done; 410 411 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 412 (IFF_RUNNING | IFF_UP)) { 413 lwkt_serialize_exit(&tdata->tx_serialize); 414 return; 415 } 416 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC); 417 418 lwkt_serialize_exit(&tdata->tx_serialize); 419 done: 420 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata); 421 } 422 423 static __inline void 424 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 425 { 426 /* Ignore Checksum bit is set */ 427 if (staterr & E1000_RXD_STAT_IXSM) 428 return; 429 430 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 431 E1000_RXD_STAT_IPCS) 432 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 433 434 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 435 E1000_RXD_STAT_TCPCS) { 436 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 437 CSUM_PSEUDO_HDR | 438 CSUM_FRAG_NOT_CHECKED; 439 mp->m_pkthdr.csum_data = htons(0xffff); 440 } 441 } 442 443 static __inline struct pktinfo * 444 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 445 uint32_t mrq, uint32_t hash, uint32_t staterr) 446 { 447 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 448 case EMX_RXDMRQ_IPV4_TCP: 449 pi->pi_netisr = NETISR_IP; 450 pi->pi_flags = 0; 451 pi->pi_l3proto = IPPROTO_TCP; 452 break; 453 454 case EMX_RXDMRQ_IPV6_TCP: 455 pi->pi_netisr = NETISR_IPV6; 456 pi->pi_flags = 0; 457 pi->pi_l3proto = IPPROTO_TCP; 458 break; 459 460 case EMX_RXDMRQ_IPV4: 461 if (staterr & E1000_RXD_STAT_IXSM) 462 return NULL; 463 464 if ((staterr & 465 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 466 E1000_RXD_STAT_TCPCS) { 467 pi->pi_netisr = NETISR_IP; 468 pi->pi_flags = 0; 469 pi->pi_l3proto = IPPROTO_UDP; 470 break; 471 } 472 /* FALL THROUGH */ 473 default: 474 return NULL; 475 } 476 477 m_sethash(m, toeplitz_hash(hash)); 478 return pi; 479 } 480 481 static int 482 emx_probe(device_t dev) 483 { 484 const struct emx_device *d; 485 uint16_t vid, did; 486 487 vid = pci_get_vendor(dev); 488 did = pci_get_device(dev); 489 490 for (d = emx_devices; d->desc != NULL; ++d) { 491 if (vid == d->vid && did == d->did) { 492 device_set_desc(dev, d->desc); 493 device_set_async_attach(dev, TRUE); 494 return 0; 495 } 496 } 497 return ENXIO; 498 } 499 500 static int 501 emx_attach(device_t dev) 502 { 503 struct emx_softc *sc = device_get_softc(dev); 504 int error = 0, i, throttle, msi_enable; 505 int tx_ring_max, ring_cnt; 506 u_int intr_flags; 507 uint16_t eeprom_data, device_id, apme_mask; 508 driver_intr_t *intr_func; 509 char flowctrl[IFM_ETH_FC_STRLEN]; 510 511 /* 512 * Setup RX rings 513 */ 514 for (i = 0; i < EMX_NRX_RING; ++i) { 515 sc->rx_data[i].sc = sc; 516 sc->rx_data[i].idx = i; 517 } 518 519 /* 520 * Setup TX ring 521 */ 522 for (i = 0; i < EMX_NTX_RING; ++i) { 523 sc->tx_data[i].sc = sc; 524 sc->tx_data[i].idx = i; 525 callout_init_mp(&sc->tx_data[i].tx_gc_timer); 526 } 527 528 /* 529 * Initialize serializers 530 */ 531 lwkt_serialize_init(&sc->main_serialize); 532 for (i = 0; i < EMX_NTX_RING; ++i) 533 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 534 for (i = 0; i < EMX_NRX_RING; ++i) 535 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 536 537 /* 538 * Initialize serializer array 539 */ 540 i = 0; 541 542 KKASSERT(i < EMX_NSERIALIZE); 543 sc->serializes[i++] = &sc->main_serialize; 544 545 KKASSERT(i < EMX_NSERIALIZE); 546 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 547 KKASSERT(i < EMX_NSERIALIZE); 548 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 549 550 KKASSERT(i < EMX_NSERIALIZE); 551 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 552 KKASSERT(i < EMX_NSERIALIZE); 553 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 554 555 KKASSERT(i == EMX_NSERIALIZE); 556 557 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 558 emx_media_change, emx_media_status); 559 callout_init_mp(&sc->timer); 560 561 sc->dev = sc->osdep.dev = dev; 562 563 /* 564 * Determine hardware and mac type 565 */ 566 sc->hw.vendor_id = pci_get_vendor(dev); 567 sc->hw.device_id = pci_get_device(dev); 568 sc->hw.revision_id = pci_get_revid(dev); 569 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 570 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 571 572 if (e1000_set_mac_type(&sc->hw)) 573 return ENXIO; 574 575 /* Enable bus mastering */ 576 pci_enable_busmaster(dev); 577 578 /* 579 * Allocate IO memory 580 */ 581 sc->memory_rid = EMX_BAR_MEM; 582 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 583 &sc->memory_rid, RF_ACTIVE); 584 if (sc->memory == NULL) { 585 device_printf(dev, "Unable to allocate bus resource: memory\n"); 586 error = ENXIO; 587 goto fail; 588 } 589 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 590 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 591 592 /* XXX This is quite goofy, it is not actually used */ 593 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 594 595 /* 596 * Don't enable MSI-X on 82574, see: 597 * 82574 specification update errata #15 598 * 599 * Don't enable MSI on 82571/82572, see: 600 * 82571/82572 specification update errata #63 601 */ 602 msi_enable = emx_msi_enable; 603 if (msi_enable && 604 (sc->hw.mac.type == e1000_82571 || 605 sc->hw.mac.type == e1000_82572)) 606 msi_enable = 0; 607 again: 608 /* 609 * Allocate interrupt 610 */ 611 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 612 &sc->intr_rid, &intr_flags); 613 614 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 615 int unshared; 616 617 unshared = device_getenv_int(dev, "irq.unshared", 0); 618 if (!unshared) { 619 sc->flags |= EMX_FLAG_SHARED_INTR; 620 if (bootverbose) 621 device_printf(dev, "IRQ shared\n"); 622 } else { 623 intr_flags &= ~RF_SHAREABLE; 624 if (bootverbose) 625 device_printf(dev, "IRQ unshared\n"); 626 } 627 } 628 629 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 630 intr_flags); 631 if (sc->intr_res == NULL) { 632 device_printf(dev, "Unable to allocate bus resource: %s\n", 633 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 634 if (!msi_enable) { 635 /* Retry with MSI. */ 636 msi_enable = 1; 637 sc->flags &= ~EMX_FLAG_SHARED_INTR; 638 goto again; 639 } 640 error = ENXIO; 641 goto fail; 642 } 643 644 /* Save PCI command register for Shared Code */ 645 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 646 sc->hw.back = &sc->osdep; 647 648 /* 649 * For I217/I218, we need to map the flash memory and this 650 * must happen after the MAC is identified. 651 */ 652 if (sc->hw.mac.type == e1000_pch_lpt) { 653 sc->flash_rid = EMX_BAR_FLASH; 654 655 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 656 &sc->flash_rid, RF_ACTIVE); 657 if (sc->flash == NULL) { 658 device_printf(dev, "Mapping of Flash failed\n"); 659 error = ENXIO; 660 goto fail; 661 } 662 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 663 sc->osdep.flash_bus_space_handle = 664 rman_get_bushandle(sc->flash); 665 666 /* 667 * This is used in the shared code 668 * XXX this goof is actually not used. 669 */ 670 sc->hw.flash_address = (uint8_t *)sc->flash; 671 } else if (sc->hw.mac.type == e1000_pch_spt) { 672 /* 673 * In the new SPT device flash is not a seperate BAR, 674 * rather it is also in BAR0, so use the same tag and 675 * an offset handle for the FLASH read/write macros 676 * in the shared code. 677 */ 678 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag; 679 sc->osdep.flash_bus_space_handle = 680 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; 681 } 682 683 /* Do Shared Code initialization */ 684 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 685 device_printf(dev, "Setup of Shared code failed\n"); 686 error = ENXIO; 687 goto fail; 688 } 689 e1000_get_bus_info(&sc->hw); 690 691 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 692 sc->hw.phy.autoneg_wait_to_complete = FALSE; 693 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 694 695 /* 696 * Interrupt throttle rate 697 */ 698 throttle = device_getenv_int(dev, "int_throttle_ceil", 699 emx_int_throttle_ceil); 700 if (throttle == 0) { 701 sc->int_throttle_ceil = 0; 702 } else { 703 if (throttle < 0) 704 throttle = EMX_DEFAULT_ITR; 705 706 /* Recalculate the tunable value to get the exact frequency. */ 707 throttle = 1000000000 / 256 / throttle; 708 709 /* Upper 16bits of ITR is reserved and should be zero */ 710 if (throttle & 0xffff0000) 711 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 712 713 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 714 } 715 716 e1000_init_script_state_82541(&sc->hw, TRUE); 717 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 718 719 /* Copper options */ 720 if (sc->hw.phy.media_type == e1000_media_type_copper) { 721 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 722 sc->hw.phy.disable_polarity_correction = FALSE; 723 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 724 } 725 726 /* Set the frame limits assuming standard ethernet sized frames. */ 727 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 728 729 /* This controls when hardware reports transmit completion status. */ 730 sc->hw.mac.report_tx_early = 1; 731 732 /* 733 * Calculate # of RX/TX rings 734 */ 735 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 736 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 737 738 tx_ring_max = 1; 739 if (sc->hw.mac.type == e1000_82571 || 740 sc->hw.mac.type == e1000_82572 || 741 sc->hw.mac.type == e1000_80003es2lan || 742 sc->hw.mac.type == e1000_pch_lpt || 743 sc->hw.mac.type == e1000_pch_spt || 744 sc->hw.mac.type == e1000_82574) 745 tx_ring_max = EMX_NTX_RING; 746 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 747 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 748 749 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 750 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 751 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 752 753 /* Allocate RX/TX rings' busdma(9) stuffs */ 754 error = emx_dma_alloc(sc); 755 if (error) 756 goto fail; 757 758 /* Allocate multicast array memory. */ 759 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 760 M_DEVBUF, M_WAITOK); 761 762 /* Indicate SOL/IDER usage */ 763 if (e1000_check_reset_block(&sc->hw)) { 764 device_printf(dev, 765 "PHY reset is blocked due to SOL/IDER session.\n"); 766 } 767 768 /* Disable EEE on I217/I218 */ 769 sc->hw.dev_spec.ich8lan.eee_disable = 1; 770 771 /* 772 * Start from a known state, this is important in reading the 773 * nvm and mac from that. 774 */ 775 e1000_reset_hw(&sc->hw); 776 777 /* Make sure we have a good EEPROM before we read from it */ 778 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 779 /* 780 * Some PCI-E parts fail the first check due to 781 * the link being in sleep state, call it again, 782 * if it fails a second time its a real issue. 783 */ 784 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 785 device_printf(dev, 786 "The EEPROM Checksum Is Not Valid\n"); 787 error = EIO; 788 goto fail; 789 } 790 } 791 792 /* Copy the permanent MAC address out of the EEPROM */ 793 if (e1000_read_mac_addr(&sc->hw) < 0) { 794 device_printf(dev, "EEPROM read error while reading MAC" 795 " address\n"); 796 error = EIO; 797 goto fail; 798 } 799 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 800 device_printf(dev, "Invalid MAC address\n"); 801 error = EIO; 802 goto fail; 803 } 804 805 /* Disable ULP support */ 806 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 807 808 /* Determine if we have to control management hardware */ 809 if (e1000_enable_mng_pass_thru(&sc->hw)) 810 sc->flags |= EMX_FLAG_HAS_MGMT; 811 812 /* 813 * Setup Wake-on-Lan 814 */ 815 apme_mask = EMX_EEPROM_APME; 816 eeprom_data = 0; 817 switch (sc->hw.mac.type) { 818 case e1000_82573: 819 sc->flags |= EMX_FLAG_HAS_AMT; 820 /* FALL THROUGH */ 821 822 case e1000_82571: 823 case e1000_82572: 824 case e1000_80003es2lan: 825 if (sc->hw.bus.func == 1) { 826 e1000_read_nvm(&sc->hw, 827 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 828 } else { 829 e1000_read_nvm(&sc->hw, 830 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 831 } 832 break; 833 834 default: 835 e1000_read_nvm(&sc->hw, 836 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 837 break; 838 } 839 if (eeprom_data & apme_mask) 840 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 841 842 /* 843 * We have the eeprom settings, now apply the special cases 844 * where the eeprom may be wrong or the board won't support 845 * wake on lan on a particular port 846 */ 847 device_id = pci_get_device(dev); 848 switch (device_id) { 849 case E1000_DEV_ID_82571EB_FIBER: 850 /* 851 * Wake events only supported on port A for dual fiber 852 * regardless of eeprom setting 853 */ 854 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 855 E1000_STATUS_FUNC_1) 856 sc->wol = 0; 857 break; 858 859 case E1000_DEV_ID_82571EB_QUAD_COPPER: 860 case E1000_DEV_ID_82571EB_QUAD_FIBER: 861 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 862 /* if quad port sc, disable WoL on all but port A */ 863 if (emx_global_quad_port_a != 0) 864 sc->wol = 0; 865 /* Reset for multiple quad port adapters */ 866 if (++emx_global_quad_port_a == 4) 867 emx_global_quad_port_a = 0; 868 break; 869 } 870 871 /* XXX disable wol */ 872 sc->wol = 0; 873 874 /* Initialized #of TX rings to use. */ 875 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 876 877 /* Setup flow control. */ 878 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 879 emx_flowctrl); 880 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 881 882 /* Setup OS specific network interface */ 883 emx_setup_ifp(sc); 884 885 /* Add sysctl tree, must after em_setup_ifp() */ 886 emx_add_sysctl(sc); 887 888 /* Reset the hardware */ 889 error = emx_reset(sc); 890 if (error) { 891 /* 892 * Some 82573 parts fail the first reset, call it again, 893 * if it fails a second time its a real issue. 894 */ 895 error = emx_reset(sc); 896 if (error) { 897 device_printf(dev, "Unable to reset the hardware\n"); 898 ether_ifdetach(&sc->arpcom.ac_if); 899 goto fail; 900 } 901 } 902 903 /* Initialize statistics */ 904 emx_update_stats(sc); 905 906 sc->hw.mac.get_link_status = 1; 907 emx_update_link_status(sc); 908 909 /* Non-AMT based hardware can now take control from firmware */ 910 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 911 EMX_FLAG_HAS_MGMT) 912 emx_get_hw_control(sc); 913 914 /* 915 * Missing Interrupt Following ICR read: 916 * 917 * 82571/82572 specification update errata #76 918 * 82573 specification update errata #31 919 * 82574 specification update errata #12 920 */ 921 intr_func = emx_intr; 922 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 923 (sc->hw.mac.type == e1000_82571 || 924 sc->hw.mac.type == e1000_82572 || 925 sc->hw.mac.type == e1000_82573 || 926 sc->hw.mac.type == e1000_82574)) 927 intr_func = emx_intr_mask; 928 929 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 930 &sc->intr_tag, &sc->main_serialize); 931 if (error) { 932 device_printf(dev, "Failed to register interrupt handler"); 933 ether_ifdetach(&sc->arpcom.ac_if); 934 goto fail; 935 } 936 return (0); 937 fail: 938 emx_detach(dev); 939 return (error); 940 } 941 942 static int 943 emx_detach(device_t dev) 944 { 945 struct emx_softc *sc = device_get_softc(dev); 946 947 if (device_is_attached(dev)) { 948 struct ifnet *ifp = &sc->arpcom.ac_if; 949 950 ifnet_serialize_all(ifp); 951 952 emx_stop(sc); 953 954 e1000_phy_hw_reset(&sc->hw); 955 956 emx_rel_mgmt(sc); 957 emx_rel_hw_control(sc); 958 959 if (sc->wol) { 960 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 961 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 962 emx_enable_wol(dev); 963 } 964 965 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 966 967 ifnet_deserialize_all(ifp); 968 969 ether_ifdetach(ifp); 970 } else if (sc->memory != NULL) { 971 emx_rel_hw_control(sc); 972 } 973 974 ifmedia_removeall(&sc->media); 975 bus_generic_detach(dev); 976 977 if (sc->intr_res != NULL) { 978 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 979 sc->intr_res); 980 } 981 982 if (sc->intr_type == PCI_INTR_TYPE_MSI) 983 pci_release_msi(dev); 984 985 if (sc->memory != NULL) { 986 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 987 sc->memory); 988 } 989 990 if (sc->flash != NULL) { 991 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 992 sc->flash); 993 } 994 995 emx_dma_free(sc); 996 997 if (sc->mta != NULL) 998 kfree(sc->mta, M_DEVBUF); 999 1000 if (sc->rx_rmap != NULL) 1001 if_ringmap_free(sc->rx_rmap); 1002 if (sc->tx_rmap != NULL) 1003 if_ringmap_free(sc->tx_rmap); 1004 1005 return (0); 1006 } 1007 1008 static int 1009 emx_shutdown(device_t dev) 1010 { 1011 return emx_suspend(dev); 1012 } 1013 1014 static int 1015 emx_suspend(device_t dev) 1016 { 1017 struct emx_softc *sc = device_get_softc(dev); 1018 struct ifnet *ifp = &sc->arpcom.ac_if; 1019 1020 ifnet_serialize_all(ifp); 1021 1022 emx_stop(sc); 1023 1024 emx_rel_mgmt(sc); 1025 emx_rel_hw_control(sc); 1026 1027 if (sc->wol) { 1028 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 1029 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 1030 emx_enable_wol(dev); 1031 } 1032 1033 ifnet_deserialize_all(ifp); 1034 1035 return bus_generic_suspend(dev); 1036 } 1037 1038 static int 1039 emx_resume(device_t dev) 1040 { 1041 struct emx_softc *sc = device_get_softc(dev); 1042 struct ifnet *ifp = &sc->arpcom.ac_if; 1043 int i; 1044 1045 ifnet_serialize_all(ifp); 1046 1047 emx_init(sc); 1048 emx_get_mgmt(sc); 1049 for (i = 0; i < sc->tx_ring_inuse; ++i) 1050 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1051 1052 ifnet_deserialize_all(ifp); 1053 1054 return bus_generic_resume(dev); 1055 } 1056 1057 static void 1058 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1059 { 1060 struct emx_softc *sc = ifp->if_softc; 1061 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1062 struct mbuf *m_head; 1063 int idx = -1, nsegs = 0; 1064 1065 KKASSERT(tdata->ifsq == ifsq); 1066 ASSERT_SERIALIZED(&tdata->tx_serialize); 1067 1068 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1069 return; 1070 1071 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1072 ifsq_purge(ifsq); 1073 return; 1074 } 1075 1076 while (!ifsq_is_empty(ifsq)) { 1077 /* Now do we at least have a minimal? */ 1078 if (EMX_IS_OACTIVE(tdata)) { 1079 emx_tx_collect(tdata, FALSE); 1080 if (EMX_IS_OACTIVE(tdata)) { 1081 ifsq_set_oactive(ifsq); 1082 break; 1083 } 1084 } 1085 1086 logif(pkt_txqueue); 1087 m_head = ifsq_dequeue(ifsq); 1088 if (m_head == NULL) 1089 break; 1090 1091 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1092 IFNET_STAT_INC(ifp, oerrors, 1); 1093 emx_tx_collect(tdata, FALSE); 1094 continue; 1095 } 1096 1097 /* 1098 * TX interrupt are aggressively aggregated, so increasing 1099 * opackets at TX interrupt time will make the opackets 1100 * statistics vastly inaccurate; we do the opackets increment 1101 * now. 1102 */ 1103 IFNET_STAT_INC(ifp, opackets, 1); 1104 1105 if (nsegs >= tdata->tx_wreg_nsegs) { 1106 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1107 nsegs = 0; 1108 idx = -1; 1109 } 1110 1111 /* Send a copy of the frame to the BPF listener */ 1112 ETHER_BPF_MTAP(ifp, m_head); 1113 1114 /* Set timeout in case hardware has problems transmitting. */ 1115 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1116 } 1117 if (idx >= 0) 1118 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1119 tdata->tx_running = EMX_TX_RUNNING; 1120 } 1121 1122 static int 1123 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1124 { 1125 struct emx_softc *sc = ifp->if_softc; 1126 struct ifreq *ifr = (struct ifreq *)data; 1127 uint16_t eeprom_data = 0; 1128 int max_frame_size, mask, reinit; 1129 int error = 0; 1130 1131 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1132 1133 switch (command) { 1134 case SIOCSIFMTU: 1135 switch (sc->hw.mac.type) { 1136 case e1000_82573: 1137 /* 1138 * 82573 only supports jumbo frames 1139 * if ASPM is disabled. 1140 */ 1141 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1142 &eeprom_data); 1143 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1144 max_frame_size = ETHER_MAX_LEN; 1145 break; 1146 } 1147 /* FALL THROUGH */ 1148 1149 /* Limit Jumbo Frame size */ 1150 case e1000_82571: 1151 case e1000_82572: 1152 case e1000_82574: 1153 case e1000_pch_lpt: 1154 case e1000_pch_spt: 1155 case e1000_80003es2lan: 1156 max_frame_size = 9234; 1157 break; 1158 1159 default: 1160 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1161 break; 1162 } 1163 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1164 ETHER_CRC_LEN) { 1165 error = EINVAL; 1166 break; 1167 } 1168 1169 ifp->if_mtu = ifr->ifr_mtu; 1170 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1171 ETHER_CRC_LEN; 1172 1173 if (ifp->if_flags & IFF_RUNNING) 1174 emx_init(sc); 1175 break; 1176 1177 case SIOCSIFFLAGS: 1178 if (ifp->if_flags & IFF_UP) { 1179 if ((ifp->if_flags & IFF_RUNNING)) { 1180 if ((ifp->if_flags ^ sc->if_flags) & 1181 (IFF_PROMISC | IFF_ALLMULTI)) { 1182 emx_disable_promisc(sc); 1183 emx_set_promisc(sc); 1184 } 1185 } else { 1186 emx_init(sc); 1187 } 1188 } else if (ifp->if_flags & IFF_RUNNING) { 1189 emx_stop(sc); 1190 } 1191 sc->if_flags = ifp->if_flags; 1192 break; 1193 1194 case SIOCADDMULTI: 1195 case SIOCDELMULTI: 1196 if (ifp->if_flags & IFF_RUNNING) { 1197 emx_disable_intr(sc); 1198 emx_set_multi(sc); 1199 #ifdef IFPOLL_ENABLE 1200 if (!(ifp->if_flags & IFF_NPOLLING)) 1201 #endif 1202 emx_enable_intr(sc); 1203 } 1204 break; 1205 1206 case SIOCSIFMEDIA: 1207 /* Check SOL/IDER usage */ 1208 if (e1000_check_reset_block(&sc->hw)) { 1209 device_printf(sc->dev, "Media change is" 1210 " blocked due to SOL/IDER session.\n"); 1211 break; 1212 } 1213 /* FALL THROUGH */ 1214 1215 case SIOCGIFMEDIA: 1216 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1217 break; 1218 1219 case SIOCSIFCAP: 1220 reinit = 0; 1221 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1222 if (mask & IFCAP_RXCSUM) { 1223 ifp->if_capenable ^= IFCAP_RXCSUM; 1224 reinit = 1; 1225 } 1226 if (mask & IFCAP_VLAN_HWTAGGING) { 1227 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1228 reinit = 1; 1229 } 1230 if (mask & IFCAP_TXCSUM) { 1231 ifp->if_capenable ^= IFCAP_TXCSUM; 1232 if (ifp->if_capenable & IFCAP_TXCSUM) 1233 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1234 else 1235 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1236 } 1237 if (mask & IFCAP_TSO) { 1238 ifp->if_capenable ^= IFCAP_TSO; 1239 if (ifp->if_capenable & IFCAP_TSO) 1240 ifp->if_hwassist |= CSUM_TSO; 1241 else 1242 ifp->if_hwassist &= ~CSUM_TSO; 1243 } 1244 if (mask & IFCAP_RSS) 1245 ifp->if_capenable ^= IFCAP_RSS; 1246 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1247 emx_init(sc); 1248 break; 1249 1250 default: 1251 error = ether_ioctl(ifp, command, data); 1252 break; 1253 } 1254 return (error); 1255 } 1256 1257 static void 1258 emx_watchdog(struct ifaltq_subque *ifsq) 1259 { 1260 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1261 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1262 struct emx_softc *sc = ifp->if_softc; 1263 int i; 1264 1265 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1266 1267 /* 1268 * The timer is set to 5 every time start queues a packet. 1269 * Then txeof keeps resetting it as long as it cleans at 1270 * least one descriptor. 1271 * Finally, anytime all descriptors are clean the timer is 1272 * set to 0. 1273 */ 1274 1275 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1276 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1277 /* 1278 * If we reach here, all TX jobs are completed and 1279 * the TX engine should have been idled for some time. 1280 * We don't need to call ifsq_devstart_sched() here. 1281 */ 1282 ifsq_clr_oactive(ifsq); 1283 tdata->tx_watchdog.wd_timer = 0; 1284 return; 1285 } 1286 1287 /* 1288 * If we are in this routine because of pause frames, then 1289 * don't reset the hardware. 1290 */ 1291 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1292 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1293 return; 1294 } 1295 1296 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1297 1298 IFNET_STAT_INC(ifp, oerrors, 1); 1299 1300 emx_init(sc); 1301 for (i = 0; i < sc->tx_ring_inuse; ++i) 1302 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1303 } 1304 1305 static void 1306 emx_init(void *xsc) 1307 { 1308 struct emx_softc *sc = xsc; 1309 struct ifnet *ifp = &sc->arpcom.ac_if; 1310 device_t dev = sc->dev; 1311 boolean_t polling; 1312 int i; 1313 1314 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1315 1316 emx_stop(sc); 1317 1318 /* Get the latest mac address, User can use a LAA */ 1319 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1320 1321 /* Put the address into the Receive Address Array */ 1322 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1323 1324 /* 1325 * With the 82571 sc, RAR[0] may be overwritten 1326 * when the other port is reset, we make a duplicate 1327 * in RAR[14] for that eventuality, this assures 1328 * the interface continues to function. 1329 */ 1330 if (sc->hw.mac.type == e1000_82571) { 1331 e1000_set_laa_state_82571(&sc->hw, TRUE); 1332 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1333 E1000_RAR_ENTRIES - 1); 1334 } 1335 1336 /* Initialize the hardware */ 1337 if (emx_reset(sc)) { 1338 device_printf(dev, "Unable to reset the hardware\n"); 1339 /* XXX emx_stop()? */ 1340 return; 1341 } 1342 emx_update_link_status(sc); 1343 1344 /* Setup VLAN support, basic and offload if available */ 1345 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1346 1347 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1348 uint32_t ctrl; 1349 1350 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1351 ctrl |= E1000_CTRL_VME; 1352 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1353 } 1354 1355 /* Configure for OS presence */ 1356 emx_get_mgmt(sc); 1357 1358 polling = FALSE; 1359 #ifdef IFPOLL_ENABLE 1360 if (ifp->if_flags & IFF_NPOLLING) 1361 polling = TRUE; 1362 #endif 1363 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1364 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1365 1366 /* Prepare transmit descriptors and buffers */ 1367 for (i = 0; i < sc->tx_ring_inuse; ++i) 1368 emx_init_tx_ring(&sc->tx_data[i]); 1369 emx_init_tx_unit(sc); 1370 1371 /* Setup Multicast table */ 1372 emx_set_multi(sc); 1373 1374 /* Prepare receive descriptors and buffers */ 1375 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1376 if (emx_init_rx_ring(&sc->rx_data[i])) { 1377 device_printf(dev, 1378 "Could not setup receive structures\n"); 1379 emx_stop(sc); 1380 return; 1381 } 1382 } 1383 emx_init_rx_unit(sc); 1384 1385 /* Don't lose promiscuous settings */ 1386 emx_set_promisc(sc); 1387 1388 /* Reset hardware counters */ 1389 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1390 1391 /* MSI/X configuration for 82574 */ 1392 if (sc->hw.mac.type == e1000_82574) { 1393 int tmp; 1394 1395 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1396 tmp |= E1000_CTRL_EXT_PBA_CLR; 1397 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1398 /* 1399 * XXX MSIX 1400 * Set the IVAR - interrupt vector routing. 1401 * Each nibble represents a vector, high bit 1402 * is enable, other 3 bits are the MSIX table 1403 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1404 * Link (other) to 2, hence the magic number. 1405 */ 1406 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1407 } 1408 1409 /* 1410 * Only enable interrupts if we are not polling, make sure 1411 * they are off otherwise. 1412 */ 1413 if (polling) 1414 emx_disable_intr(sc); 1415 else 1416 emx_enable_intr(sc); 1417 1418 /* AMT based hardware can now take control from firmware */ 1419 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1420 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1421 emx_get_hw_control(sc); 1422 1423 ifp->if_flags |= IFF_RUNNING; 1424 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1425 struct emx_txdata *tdata = &sc->tx_data[i]; 1426 1427 ifsq_clr_oactive(tdata->ifsq); 1428 ifsq_watchdog_start(&tdata->tx_watchdog); 1429 if (!polling) { 1430 callout_reset_bycpu(&tdata->tx_gc_timer, 1, 1431 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq)); 1432 } 1433 } 1434 callout_reset(&sc->timer, hz, emx_timer, sc); 1435 } 1436 1437 static void 1438 emx_intr(void *xsc) 1439 { 1440 emx_intr_body(xsc, TRUE); 1441 } 1442 1443 static void 1444 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1445 { 1446 struct ifnet *ifp = &sc->arpcom.ac_if; 1447 uint32_t reg_icr; 1448 1449 logif(intr_beg); 1450 ASSERT_SERIALIZED(&sc->main_serialize); 1451 1452 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1453 1454 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1455 logif(intr_end); 1456 return; 1457 } 1458 1459 /* 1460 * XXX: some laptops trigger several spurious interrupts 1461 * on emx(4) when in the resume cycle. The ICR register 1462 * reports all-ones value in this case. Processing such 1463 * interrupts would lead to a freeze. I don't know why. 1464 */ 1465 if (reg_icr == 0xffffffff) { 1466 logif(intr_end); 1467 return; 1468 } 1469 1470 if (ifp->if_flags & IFF_RUNNING) { 1471 if (reg_icr & 1472 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1473 int i; 1474 1475 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1476 lwkt_serialize_enter( 1477 &sc->rx_data[i].rx_serialize); 1478 emx_rxeof(&sc->rx_data[i], -1); 1479 lwkt_serialize_exit( 1480 &sc->rx_data[i].rx_serialize); 1481 } 1482 } 1483 if (reg_icr & E1000_ICR_TXDW) { 1484 struct emx_txdata *tdata = &sc->tx_data[0]; 1485 1486 lwkt_serialize_enter(&tdata->tx_serialize); 1487 emx_tx_intr(tdata); 1488 lwkt_serialize_exit(&tdata->tx_serialize); 1489 } 1490 } 1491 1492 /* Link status change */ 1493 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1494 emx_serialize_skipmain(sc); 1495 1496 callout_stop(&sc->timer); 1497 sc->hw.mac.get_link_status = 1; 1498 emx_update_link_status(sc); 1499 1500 /* Deal with TX cruft when link lost */ 1501 emx_tx_purge(sc); 1502 1503 callout_reset(&sc->timer, hz, emx_timer, sc); 1504 1505 emx_deserialize_skipmain(sc); 1506 } 1507 1508 if (reg_icr & E1000_ICR_RXO) 1509 sc->rx_overruns++; 1510 1511 logif(intr_end); 1512 } 1513 1514 static void 1515 emx_intr_mask(void *xsc) 1516 { 1517 struct emx_softc *sc = xsc; 1518 1519 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1520 /* 1521 * NOTE: 1522 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1523 * so don't check it. 1524 */ 1525 emx_intr_body(sc, FALSE); 1526 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1527 } 1528 1529 static void 1530 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1531 { 1532 struct emx_softc *sc = ifp->if_softc; 1533 1534 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1535 1536 emx_update_link_status(sc); 1537 1538 ifmr->ifm_status = IFM_AVALID; 1539 ifmr->ifm_active = IFM_ETHER; 1540 1541 if (!sc->link_active) { 1542 if (sc->hw.mac.autoneg) 1543 ifmr->ifm_active |= IFM_NONE; 1544 else 1545 ifmr->ifm_active |= sc->media.ifm_media; 1546 return; 1547 } 1548 1549 ifmr->ifm_status |= IFM_ACTIVE; 1550 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1551 ifmr->ifm_active |= sc->ifm_flowctrl; 1552 1553 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1554 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1555 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1556 } else { 1557 switch (sc->link_speed) { 1558 case 10: 1559 ifmr->ifm_active |= IFM_10_T; 1560 break; 1561 case 100: 1562 ifmr->ifm_active |= IFM_100_TX; 1563 break; 1564 1565 case 1000: 1566 ifmr->ifm_active |= IFM_1000_T; 1567 break; 1568 } 1569 if (sc->link_duplex == FULL_DUPLEX) 1570 ifmr->ifm_active |= IFM_FDX; 1571 else 1572 ifmr->ifm_active |= IFM_HDX; 1573 } 1574 if (ifmr->ifm_active & IFM_FDX) 1575 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1576 } 1577 1578 static int 1579 emx_media_change(struct ifnet *ifp) 1580 { 1581 struct emx_softc *sc = ifp->if_softc; 1582 struct ifmedia *ifm = &sc->media; 1583 1584 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1585 1586 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1587 return (EINVAL); 1588 1589 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1590 case IFM_AUTO: 1591 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1592 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1593 break; 1594 1595 case IFM_1000_SX: 1596 case IFM_1000_T: 1597 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1598 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1599 break; 1600 1601 case IFM_100_TX: 1602 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1603 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1604 } else { 1605 if (IFM_OPTIONS(ifm->ifm_media) & 1606 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1607 if (bootverbose) { 1608 if_printf(ifp, "Flow control is not " 1609 "allowed for half-duplex\n"); 1610 } 1611 return EINVAL; 1612 } 1613 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1614 } 1615 sc->hw.mac.autoneg = FALSE; 1616 sc->hw.phy.autoneg_advertised = 0; 1617 break; 1618 1619 case IFM_10_T: 1620 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1621 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1622 } else { 1623 if (IFM_OPTIONS(ifm->ifm_media) & 1624 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1625 if (bootverbose) { 1626 if_printf(ifp, "Flow control is not " 1627 "allowed for half-duplex\n"); 1628 } 1629 return EINVAL; 1630 } 1631 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1632 } 1633 sc->hw.mac.autoneg = FALSE; 1634 sc->hw.phy.autoneg_advertised = 0; 1635 break; 1636 1637 default: 1638 if (bootverbose) { 1639 if_printf(ifp, "Unsupported media type %d\n", 1640 IFM_SUBTYPE(ifm->ifm_media)); 1641 } 1642 return EINVAL; 1643 } 1644 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1645 1646 if (ifp->if_flags & IFF_RUNNING) 1647 emx_init(sc); 1648 1649 return (0); 1650 } 1651 1652 static int 1653 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1654 int *segs_used, int *idx) 1655 { 1656 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1657 bus_dmamap_t map; 1658 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1659 struct e1000_tx_desc *ctxd = NULL; 1660 struct mbuf *m_head = *m_headp; 1661 uint32_t txd_upper, txd_lower, cmd = 0; 1662 int maxsegs, nsegs, i, j, first, last = 0, error; 1663 1664 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1665 error = emx_tso_pullup(tdata, m_headp); 1666 if (error) 1667 return error; 1668 m_head = *m_headp; 1669 } 1670 1671 txd_upper = txd_lower = 0; 1672 1673 /* 1674 * Capture the first descriptor index, this descriptor 1675 * will have the index of the EOP which is the only one 1676 * that now gets a DONE bit writeback. 1677 */ 1678 first = tdata->next_avail_tx_desc; 1679 tx_buffer = &tdata->tx_buf[first]; 1680 tx_buffer_mapped = tx_buffer; 1681 map = tx_buffer->map; 1682 1683 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1684 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1685 if (maxsegs > EMX_MAX_SCATTER) 1686 maxsegs = EMX_MAX_SCATTER; 1687 1688 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1689 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1690 if (error) { 1691 m_freem(*m_headp); 1692 *m_headp = NULL; 1693 return error; 1694 } 1695 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1696 1697 m_head = *m_headp; 1698 tdata->tx_nsegs += nsegs; 1699 *segs_used += nsegs; 1700 1701 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1702 /* TSO will consume one TX desc */ 1703 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1704 tdata->tx_nsegs += i; 1705 *segs_used += i; 1706 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1707 /* TX csum offloading will consume one TX desc */ 1708 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1709 tdata->tx_nsegs += i; 1710 *segs_used += i; 1711 } 1712 1713 /* Handle VLAN tag */ 1714 if (m_head->m_flags & M_VLANTAG) { 1715 /* Set the vlan id. */ 1716 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1717 /* Tell hardware to add tag */ 1718 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1719 } 1720 1721 i = tdata->next_avail_tx_desc; 1722 1723 /* Set up our transmit descriptors */ 1724 for (j = 0; j < nsegs; j++) { 1725 tx_buffer = &tdata->tx_buf[i]; 1726 ctxd = &tdata->tx_desc_base[i]; 1727 1728 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1729 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1730 txd_lower | segs[j].ds_len); 1731 ctxd->upper.data = htole32(txd_upper); 1732 1733 last = i; 1734 if (++i == tdata->num_tx_desc) 1735 i = 0; 1736 } 1737 1738 tdata->next_avail_tx_desc = i; 1739 1740 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1741 tdata->num_tx_desc_avail -= nsegs; 1742 tdata->tx_nmbuf++; 1743 1744 tx_buffer->m_head = m_head; 1745 tx_buffer_mapped->map = tx_buffer->map; 1746 tx_buffer->map = map; 1747 1748 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1749 tdata->tx_nsegs = 0; 1750 1751 /* 1752 * Report Status (RS) is turned on 1753 * every tx_intr_nsegs descriptors. 1754 */ 1755 cmd = E1000_TXD_CMD_RS; 1756 1757 /* 1758 * Keep track of the descriptor, which will 1759 * be written back by hardware. 1760 */ 1761 tdata->tx_dd[tdata->tx_dd_tail] = last; 1762 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1763 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1764 } 1765 1766 /* 1767 * Last Descriptor of Packet needs End Of Packet (EOP) 1768 */ 1769 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1770 1771 /* 1772 * Defer TDT updating, until enough descriptors are setup 1773 */ 1774 *idx = i; 1775 1776 #ifdef EMX_TSS_DEBUG 1777 tdata->tx_pkts++; 1778 #endif 1779 1780 return (0); 1781 } 1782 1783 static void 1784 emx_set_promisc(struct emx_softc *sc) 1785 { 1786 struct ifnet *ifp = &sc->arpcom.ac_if; 1787 uint32_t reg_rctl; 1788 1789 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1790 1791 if (ifp->if_flags & IFF_PROMISC) { 1792 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1793 /* Turn this on if you want to see bad packets */ 1794 if (emx_debug_sbp) 1795 reg_rctl |= E1000_RCTL_SBP; 1796 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1797 } else if (ifp->if_flags & IFF_ALLMULTI) { 1798 reg_rctl |= E1000_RCTL_MPE; 1799 reg_rctl &= ~E1000_RCTL_UPE; 1800 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1801 } 1802 } 1803 1804 static void 1805 emx_disable_promisc(struct emx_softc *sc) 1806 { 1807 struct ifnet *ifp = &sc->arpcom.ac_if; 1808 uint32_t reg_rctl; 1809 int mcnt = 0; 1810 1811 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1812 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1813 1814 if (ifp->if_flags & IFF_ALLMULTI) { 1815 mcnt = EMX_MCAST_ADDR_MAX; 1816 } else { 1817 const struct ifmultiaddr *ifma; 1818 1819 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1820 if (ifma->ifma_addr->sa_family != AF_LINK) 1821 continue; 1822 if (mcnt == EMX_MCAST_ADDR_MAX) 1823 break; 1824 mcnt++; 1825 } 1826 } 1827 /* Don't disable if in MAX groups */ 1828 if (mcnt < EMX_MCAST_ADDR_MAX) 1829 reg_rctl &= ~E1000_RCTL_MPE; 1830 1831 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1832 } 1833 1834 static void 1835 emx_set_multi(struct emx_softc *sc) 1836 { 1837 struct ifnet *ifp = &sc->arpcom.ac_if; 1838 struct ifmultiaddr *ifma; 1839 uint32_t reg_rctl = 0; 1840 uint8_t *mta; 1841 int mcnt = 0; 1842 1843 mta = sc->mta; 1844 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1845 1846 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1847 if (ifma->ifma_addr->sa_family != AF_LINK) 1848 continue; 1849 1850 if (mcnt == EMX_MCAST_ADDR_MAX) 1851 break; 1852 1853 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1854 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1855 mcnt++; 1856 } 1857 1858 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1859 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1860 reg_rctl |= E1000_RCTL_MPE; 1861 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1862 } else { 1863 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1864 } 1865 } 1866 1867 /* 1868 * This routine checks for link status and updates statistics. 1869 */ 1870 static void 1871 emx_timer(void *xsc) 1872 { 1873 struct emx_softc *sc = xsc; 1874 struct ifnet *ifp = &sc->arpcom.ac_if; 1875 1876 lwkt_serialize_enter(&sc->main_serialize); 1877 1878 emx_update_link_status(sc); 1879 emx_update_stats(sc); 1880 1881 /* Reset LAA into RAR[0] on 82571 */ 1882 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1883 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1884 1885 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1886 emx_print_hw_stats(sc); 1887 1888 emx_smartspeed(sc); 1889 1890 callout_reset(&sc->timer, hz, emx_timer, sc); 1891 1892 lwkt_serialize_exit(&sc->main_serialize); 1893 } 1894 1895 static void 1896 emx_update_link_status(struct emx_softc *sc) 1897 { 1898 struct e1000_hw *hw = &sc->hw; 1899 struct ifnet *ifp = &sc->arpcom.ac_if; 1900 device_t dev = sc->dev; 1901 uint32_t link_check = 0; 1902 1903 /* Get the cached link value or read phy for real */ 1904 switch (hw->phy.media_type) { 1905 case e1000_media_type_copper: 1906 if (hw->mac.get_link_status) { 1907 if (hw->mac.type == e1000_pch_spt) 1908 msec_delay(50); 1909 /* Do the work to read phy */ 1910 e1000_check_for_link(hw); 1911 link_check = !hw->mac.get_link_status; 1912 if (link_check) /* ESB2 fix */ 1913 e1000_cfg_on_link_up(hw); 1914 } else { 1915 link_check = TRUE; 1916 } 1917 break; 1918 1919 case e1000_media_type_fiber: 1920 e1000_check_for_link(hw); 1921 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1922 break; 1923 1924 case e1000_media_type_internal_serdes: 1925 e1000_check_for_link(hw); 1926 link_check = sc->hw.mac.serdes_has_link; 1927 break; 1928 1929 case e1000_media_type_unknown: 1930 default: 1931 break; 1932 } 1933 1934 /* Now check for a transition */ 1935 if (link_check && sc->link_active == 0) { 1936 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1937 &sc->link_duplex); 1938 1939 /* 1940 * Check if we should enable/disable SPEED_MODE bit on 1941 * 82571EB/82572EI 1942 */ 1943 if (sc->link_speed != SPEED_1000 && 1944 (hw->mac.type == e1000_82571 || 1945 hw->mac.type == e1000_82572)) { 1946 int tarc0; 1947 1948 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1949 tarc0 &= ~EMX_TARC_SPEED_MODE; 1950 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1951 } 1952 if (bootverbose) { 1953 char flowctrl[IFM_ETH_FC_STRLEN]; 1954 1955 e1000_fc2str(hw->fc.current_mode, flowctrl, 1956 sizeof(flowctrl)); 1957 device_printf(dev, "Link is up %d Mbps %s, " 1958 "Flow control: %s\n", 1959 sc->link_speed, 1960 (sc->link_duplex == FULL_DUPLEX) ? 1961 "Full Duplex" : "Half Duplex", 1962 flowctrl); 1963 } 1964 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1965 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1966 sc->link_active = 1; 1967 sc->smartspeed = 0; 1968 ifp->if_baudrate = sc->link_speed * 1000000; 1969 ifp->if_link_state = LINK_STATE_UP; 1970 if_link_state_change(ifp); 1971 } else if (!link_check && sc->link_active == 1) { 1972 ifp->if_baudrate = sc->link_speed = 0; 1973 sc->link_duplex = 0; 1974 if (bootverbose) 1975 device_printf(dev, "Link is Down\n"); 1976 sc->link_active = 0; 1977 ifp->if_link_state = LINK_STATE_DOWN; 1978 if_link_state_change(ifp); 1979 } 1980 } 1981 1982 static void 1983 emx_stop(struct emx_softc *sc) 1984 { 1985 struct ifnet *ifp = &sc->arpcom.ac_if; 1986 int i; 1987 1988 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1989 1990 emx_disable_intr(sc); 1991 1992 callout_stop(&sc->timer); 1993 1994 ifp->if_flags &= ~IFF_RUNNING; 1995 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1996 struct emx_txdata *tdata = &sc->tx_data[i]; 1997 1998 ifsq_clr_oactive(tdata->ifsq); 1999 ifsq_watchdog_stop(&tdata->tx_watchdog); 2000 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 2001 2002 tdata->tx_running = 0; 2003 callout_stop(&tdata->tx_gc_timer); 2004 } 2005 2006 /* I219 needs some special flushing to avoid hangs */ 2007 if (sc->hw.mac.type == e1000_pch_spt) 2008 emx_flush_txrx_ring(sc); 2009 2010 /* 2011 * Disable multiple receive queues. 2012 * 2013 * NOTE: 2014 * We should disable multiple receive queues before 2015 * resetting the hardware. 2016 */ 2017 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 2018 2019 e1000_reset_hw(&sc->hw); 2020 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2021 2022 for (i = 0; i < sc->tx_ring_cnt; ++i) 2023 emx_free_tx_ring(&sc->tx_data[i]); 2024 for (i = 0; i < sc->rx_ring_cnt; ++i) 2025 emx_free_rx_ring(&sc->rx_data[i]); 2026 } 2027 2028 static int 2029 emx_reset(struct emx_softc *sc) 2030 { 2031 device_t dev = sc->dev; 2032 uint16_t rx_buffer_size; 2033 uint32_t pba; 2034 2035 /* Set up smart power down as default off on newer adapters. */ 2036 if (!emx_smart_pwr_down && 2037 (sc->hw.mac.type == e1000_82571 || 2038 sc->hw.mac.type == e1000_82572)) { 2039 uint16_t phy_tmp = 0; 2040 2041 /* Speed up time to link by disabling smart power down. */ 2042 e1000_read_phy_reg(&sc->hw, 2043 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2044 phy_tmp &= ~IGP02E1000_PM_SPD; 2045 e1000_write_phy_reg(&sc->hw, 2046 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2047 } 2048 2049 /* 2050 * Packet Buffer Allocation (PBA) 2051 * Writing PBA sets the receive portion of the buffer 2052 * the remainder is used for the transmit buffer. 2053 */ 2054 switch (sc->hw.mac.type) { 2055 /* Total Packet Buffer on these is 48K */ 2056 case e1000_82571: 2057 case e1000_82572: 2058 case e1000_80003es2lan: 2059 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2060 break; 2061 2062 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2063 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2064 break; 2065 2066 case e1000_82574: 2067 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2068 break; 2069 2070 case e1000_pch_lpt: 2071 case e1000_pch_spt: 2072 pba = E1000_PBA_26K; 2073 break; 2074 2075 default: 2076 /* Devices before 82547 had a Packet Buffer of 64K. */ 2077 if (sc->hw.mac.max_frame_size > 8192) 2078 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2079 else 2080 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2081 } 2082 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2083 2084 /* 2085 * These parameters control the automatic generation (Tx) and 2086 * response (Rx) to Ethernet PAUSE frames. 2087 * - High water mark should allow for at least two frames to be 2088 * received after sending an XOFF. 2089 * - Low water mark works best when it is very near the high water mark. 2090 * This allows the receiver to restart by sending XON when it has 2091 * drained a bit. Here we use an arbitary value of 1500 which will 2092 * restart after one full frame is pulled from the buffer. There 2093 * could be several smaller frames in the buffer and if so they will 2094 * not trigger the XON until their total number reduces the buffer 2095 * by 1500. 2096 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2097 */ 2098 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2099 2100 sc->hw.fc.high_water = rx_buffer_size - 2101 roundup2(sc->hw.mac.max_frame_size, 1024); 2102 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2103 2104 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2105 sc->hw.fc.send_xon = TRUE; 2106 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2107 2108 /* 2109 * Device specific overrides/settings 2110 */ 2111 if (sc->hw.mac.type == e1000_pch_lpt || 2112 sc->hw.mac.type == e1000_pch_spt) { 2113 sc->hw.fc.high_water = 0x5C20; 2114 sc->hw.fc.low_water = 0x5048; 2115 sc->hw.fc.pause_time = 0x0650; 2116 sc->hw.fc.refresh_time = 0x0400; 2117 /* Jumbos need adjusted PBA */ 2118 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2119 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2120 else 2121 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2122 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2123 sc->hw.fc.pause_time = 0xFFFF; 2124 } 2125 2126 /* I219 needs some special flushing to avoid hangs */ 2127 if (sc->hw.mac.type == e1000_pch_spt) 2128 emx_flush_txrx_ring(sc); 2129 2130 /* Issue a global reset */ 2131 e1000_reset_hw(&sc->hw); 2132 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2133 emx_disable_aspm(sc); 2134 2135 if (e1000_init_hw(&sc->hw) < 0) { 2136 device_printf(dev, "Hardware Initialization Failed\n"); 2137 return (EIO); 2138 } 2139 2140 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2141 e1000_get_phy_info(&sc->hw); 2142 e1000_check_for_link(&sc->hw); 2143 2144 return (0); 2145 } 2146 2147 static void 2148 emx_setup_ifp(struct emx_softc *sc) 2149 { 2150 struct ifnet *ifp = &sc->arpcom.ac_if; 2151 int i; 2152 2153 if_initname(ifp, device_get_name(sc->dev), 2154 device_get_unit(sc->dev)); 2155 ifp->if_softc = sc; 2156 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2157 ifp->if_init = emx_init; 2158 ifp->if_ioctl = emx_ioctl; 2159 ifp->if_start = emx_start; 2160 #ifdef IFPOLL_ENABLE 2161 ifp->if_npoll = emx_npoll; 2162 #endif 2163 ifp->if_serialize = emx_serialize; 2164 ifp->if_deserialize = emx_deserialize; 2165 ifp->if_tryserialize = emx_tryserialize; 2166 #ifdef INVARIANTS 2167 ifp->if_serialize_assert = emx_serialize_assert; 2168 #endif 2169 2170 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2171 2172 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2173 ifq_set_ready(&ifp->if_snd); 2174 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2175 2176 ifp->if_mapsubq = ifq_mapsubq_modulo; 2177 ifq_set_subq_divisor(&ifp->if_snd, 1); 2178 2179 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2180 2181 ifp->if_capabilities = IFCAP_HWCSUM | 2182 IFCAP_VLAN_HWTAGGING | 2183 IFCAP_VLAN_MTU | 2184 IFCAP_TSO; 2185 if (sc->rx_ring_cnt > 1) 2186 ifp->if_capabilities |= IFCAP_RSS; 2187 ifp->if_capenable = ifp->if_capabilities; 2188 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2189 2190 /* 2191 * Tell the upper layer(s) we support long frames. 2192 */ 2193 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2194 2195 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2196 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2197 struct emx_txdata *tdata = &sc->tx_data[i]; 2198 2199 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2200 ifsq_set_priv(ifsq, tdata); 2201 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2202 tdata->ifsq = ifsq; 2203 2204 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2205 } 2206 2207 /* 2208 * Specify the media types supported by this sc and register 2209 * callbacks to update media and link information 2210 */ 2211 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2212 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2213 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2214 0, NULL); 2215 } else { 2216 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2217 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2218 0, NULL); 2219 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2220 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2221 0, NULL); 2222 if (sc->hw.phy.type != e1000_phy_ife) { 2223 ifmedia_add(&sc->media, 2224 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2225 } 2226 } 2227 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2228 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2229 } 2230 2231 /* 2232 * Workaround for SmartSpeed on 82541 and 82547 controllers 2233 */ 2234 static void 2235 emx_smartspeed(struct emx_softc *sc) 2236 { 2237 uint16_t phy_tmp; 2238 2239 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2240 sc->hw.mac.autoneg == 0 || 2241 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2242 return; 2243 2244 if (sc->smartspeed == 0) { 2245 /* 2246 * If Master/Slave config fault is asserted twice, 2247 * we assume back-to-back 2248 */ 2249 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2250 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2251 return; 2252 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2253 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2254 e1000_read_phy_reg(&sc->hw, 2255 PHY_1000T_CTRL, &phy_tmp); 2256 if (phy_tmp & CR_1000T_MS_ENABLE) { 2257 phy_tmp &= ~CR_1000T_MS_ENABLE; 2258 e1000_write_phy_reg(&sc->hw, 2259 PHY_1000T_CTRL, phy_tmp); 2260 sc->smartspeed++; 2261 if (sc->hw.mac.autoneg && 2262 !e1000_phy_setup_autoneg(&sc->hw) && 2263 !e1000_read_phy_reg(&sc->hw, 2264 PHY_CONTROL, &phy_tmp)) { 2265 phy_tmp |= MII_CR_AUTO_NEG_EN | 2266 MII_CR_RESTART_AUTO_NEG; 2267 e1000_write_phy_reg(&sc->hw, 2268 PHY_CONTROL, phy_tmp); 2269 } 2270 } 2271 } 2272 return; 2273 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2274 /* If still no link, perhaps using 2/3 pair cable */ 2275 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2276 phy_tmp |= CR_1000T_MS_ENABLE; 2277 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2278 if (sc->hw.mac.autoneg && 2279 !e1000_phy_setup_autoneg(&sc->hw) && 2280 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2281 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2282 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2283 } 2284 } 2285 2286 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2287 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2288 sc->smartspeed = 0; 2289 } 2290 2291 static int 2292 emx_create_tx_ring(struct emx_txdata *tdata) 2293 { 2294 device_t dev = tdata->sc->dev; 2295 struct emx_txbuf *tx_buffer; 2296 int error, i, tsize, ntxd; 2297 2298 /* 2299 * Validate number of transmit descriptors. It must not exceed 2300 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2301 */ 2302 ntxd = device_getenv_int(dev, "txd", emx_txd); 2303 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2304 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2305 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2306 EMX_DEFAULT_TXD, ntxd); 2307 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2308 } else { 2309 tdata->num_tx_desc = ntxd; 2310 } 2311 2312 /* 2313 * Allocate Transmit Descriptor ring 2314 */ 2315 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2316 EMX_DBA_ALIGN); 2317 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2318 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2319 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2320 &tdata->tx_desc_paddr); 2321 if (tdata->tx_desc_base == NULL) { 2322 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2323 return ENOMEM; 2324 } 2325 2326 tsize = __VM_CACHELINE_ALIGN( 2327 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2328 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2329 2330 /* 2331 * Create DMA tags for tx buffers 2332 */ 2333 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2334 1, 0, /* alignment, bounds */ 2335 BUS_SPACE_MAXADDR, /* lowaddr */ 2336 BUS_SPACE_MAXADDR, /* highaddr */ 2337 NULL, NULL, /* filter, filterarg */ 2338 EMX_TSO_SIZE, /* maxsize */ 2339 EMX_MAX_SCATTER, /* nsegments */ 2340 EMX_MAX_SEGSIZE, /* maxsegsize */ 2341 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2342 BUS_DMA_ONEBPAGE, /* flags */ 2343 &tdata->txtag); 2344 if (error) { 2345 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2346 kfree(tdata->tx_buf, M_DEVBUF); 2347 tdata->tx_buf = NULL; 2348 return error; 2349 } 2350 2351 /* 2352 * Create DMA maps for tx buffers 2353 */ 2354 for (i = 0; i < tdata->num_tx_desc; i++) { 2355 tx_buffer = &tdata->tx_buf[i]; 2356 2357 error = bus_dmamap_create(tdata->txtag, 2358 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2359 &tx_buffer->map); 2360 if (error) { 2361 device_printf(dev, "Unable to create TX DMA map\n"); 2362 emx_destroy_tx_ring(tdata, i); 2363 return error; 2364 } 2365 } 2366 2367 /* 2368 * Setup TX parameters 2369 */ 2370 tdata->spare_tx_desc = EMX_TX_SPARE; 2371 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2372 2373 /* 2374 * Keep following relationship between spare_tx_desc, oact_tx_desc 2375 * and tx_intr_nsegs: 2376 * (spare_tx_desc + EMX_TX_RESERVED) <= 2377 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2378 */ 2379 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2380 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2381 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2382 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2383 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2384 2385 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2386 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2387 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2388 2389 /* 2390 * Pullup extra 4bytes into the first data segment for TSO, see: 2391 * 82571/82572 specification update errata #7 2392 * 2393 * Same applies to I217 (and maybe I218 and I219). 2394 * 2395 * NOTE: 2396 * 4bytes instead of 2bytes, which are mentioned in the errata, 2397 * are pulled; mainly to keep rest of the data properly aligned. 2398 */ 2399 if (tdata->sc->hw.mac.type == e1000_82571 || 2400 tdata->sc->hw.mac.type == e1000_82572 || 2401 tdata->sc->hw.mac.type == e1000_pch_lpt || 2402 tdata->sc->hw.mac.type == e1000_pch_spt) 2403 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2404 2405 return (0); 2406 } 2407 2408 static void 2409 emx_init_tx_ring(struct emx_txdata *tdata) 2410 { 2411 /* Clear the old ring contents */ 2412 bzero(tdata->tx_desc_base, 2413 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2414 2415 /* Reset state */ 2416 tdata->next_avail_tx_desc = 0; 2417 tdata->next_tx_to_clean = 0; 2418 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2419 tdata->tx_nmbuf = 0; 2420 tdata->tx_running = 0; 2421 2422 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2423 if (tdata->sc->tx_ring_inuse > 1) { 2424 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2425 if (bootverbose) { 2426 if_printf(&tdata->sc->arpcom.ac_if, 2427 "TX %d force ctx setup\n", tdata->idx); 2428 } 2429 } 2430 } 2431 2432 static void 2433 emx_init_tx_unit(struct emx_softc *sc) 2434 { 2435 uint32_t tctl, tarc, tipg = 0, txdctl; 2436 int i; 2437 2438 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2439 struct emx_txdata *tdata = &sc->tx_data[i]; 2440 uint64_t bus_addr; 2441 2442 /* Setup the Base and Length of the Tx Descriptor Ring */ 2443 bus_addr = tdata->tx_desc_paddr; 2444 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2445 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2446 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2447 (uint32_t)(bus_addr >> 32)); 2448 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2449 (uint32_t)bus_addr); 2450 /* Setup the HW Tx Head and Tail descriptor pointers */ 2451 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2452 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2453 2454 txdctl = 0x1f; /* PTHRESH */ 2455 txdctl |= 1 << 8; /* HTHRESH */ 2456 txdctl |= 1 << 16; /* WTHRESH */ 2457 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2458 txdctl |= E1000_TXDCTL_GRAN; 2459 txdctl |= 1 << 25; /* LWTHRESH */ 2460 2461 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl); 2462 } 2463 2464 /* Set the default values for the Tx Inter Packet Gap timer */ 2465 switch (sc->hw.mac.type) { 2466 case e1000_80003es2lan: 2467 tipg = DEFAULT_82543_TIPG_IPGR1; 2468 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2469 E1000_TIPG_IPGR2_SHIFT; 2470 break; 2471 2472 default: 2473 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2474 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2475 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2476 else 2477 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2478 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2479 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2480 break; 2481 } 2482 2483 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2484 2485 /* NOTE: 0 is not allowed for TIDV */ 2486 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2487 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2488 2489 /* 2490 * Errata workaround (obtained from Linux). This is necessary 2491 * to make multiple TX queues work on 82574. 2492 * XXX can't find it in any published errata though. 2493 */ 2494 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2495 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2496 2497 if (sc->hw.mac.type == e1000_82571 || 2498 sc->hw.mac.type == e1000_82572) { 2499 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2500 tarc |= EMX_TARC_SPEED_MODE; 2501 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2502 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2503 /* errata: program both queues to unweighted RR */ 2504 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2505 tarc |= 1; 2506 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2507 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2508 tarc |= 1; 2509 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2510 } else if (sc->hw.mac.type == e1000_82574) { 2511 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2512 tarc |= EMX_TARC_ERRATA; 2513 if (sc->tx_ring_inuse > 1) { 2514 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX); 2515 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2516 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2517 } else { 2518 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2519 } 2520 } 2521 2522 /* Program the Transmit Control Register */ 2523 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2524 tctl &= ~E1000_TCTL_CT; 2525 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2526 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2527 tctl |= E1000_TCTL_MULR; 2528 2529 /* This write will effectively turn on the transmit unit. */ 2530 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2531 2532 if (sc->hw.mac.type == e1000_82571 || 2533 sc->hw.mac.type == e1000_82572 || 2534 sc->hw.mac.type == e1000_80003es2lan) { 2535 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2536 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2537 tarc &= ~(1 << 28); 2538 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2539 } else if (sc->hw.mac.type == e1000_pch_spt) { 2540 uint32_t reg; 2541 2542 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC); 2543 reg |= E1000_RCTL_RDMTS_HEX; 2544 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg); 2545 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2546 reg |= E1000_TARC0_CB_MULTIQ_3_REQ; 2547 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg); 2548 } 2549 2550 if (sc->tx_ring_inuse > 1) { 2551 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2552 tarc &= ~EMX_TARC_COUNT_MASK; 2553 tarc |= 1; 2554 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2555 2556 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2557 tarc &= ~EMX_TARC_COUNT_MASK; 2558 tarc |= 1; 2559 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2560 } 2561 } 2562 2563 static void 2564 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2565 { 2566 struct emx_txbuf *tx_buffer; 2567 int i; 2568 2569 /* Free Transmit Descriptor ring */ 2570 if (tdata->tx_desc_base) { 2571 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2572 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2573 tdata->tx_desc_dmap); 2574 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2575 2576 tdata->tx_desc_base = NULL; 2577 } 2578 2579 if (tdata->tx_buf == NULL) 2580 return; 2581 2582 for (i = 0; i < ndesc; i++) { 2583 tx_buffer = &tdata->tx_buf[i]; 2584 2585 KKASSERT(tx_buffer->m_head == NULL); 2586 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2587 } 2588 bus_dma_tag_destroy(tdata->txtag); 2589 2590 kfree(tdata->tx_buf, M_DEVBUF); 2591 tdata->tx_buf = NULL; 2592 } 2593 2594 /* 2595 * The offload context needs to be set when we transfer the first 2596 * packet of a particular protocol (TCP/UDP). This routine has been 2597 * enhanced to deal with inserted VLAN headers. 2598 * 2599 * If the new packet's ether header length, ip header length and 2600 * csum offloading type are same as the previous packet, we should 2601 * avoid allocating a new csum context descriptor; mainly to take 2602 * advantage of the pipeline effect of the TX data read request. 2603 * 2604 * This function returns number of TX descrptors allocated for 2605 * csum context. 2606 */ 2607 static int 2608 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2609 uint32_t *txd_upper, uint32_t *txd_lower) 2610 { 2611 struct e1000_context_desc *TXD; 2612 int curr_txd, ehdrlen, csum_flags; 2613 uint32_t cmd, hdr_len, ip_hlen; 2614 2615 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2616 ip_hlen = mp->m_pkthdr.csum_iphlen; 2617 ehdrlen = mp->m_pkthdr.csum_lhlen; 2618 2619 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2620 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2621 tdata->csum_flags == csum_flags) { 2622 /* 2623 * Same csum offload context as the previous packets; 2624 * just return. 2625 */ 2626 *txd_upper = tdata->csum_txd_upper; 2627 *txd_lower = tdata->csum_txd_lower; 2628 return 0; 2629 } 2630 2631 /* 2632 * Setup a new csum offload context. 2633 */ 2634 2635 curr_txd = tdata->next_avail_tx_desc; 2636 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2637 2638 cmd = 0; 2639 2640 /* Setup of IP header checksum. */ 2641 if (csum_flags & CSUM_IP) { 2642 /* 2643 * Start offset for header checksum calculation. 2644 * End offset for header checksum calculation. 2645 * Offset of place to put the checksum. 2646 */ 2647 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2648 TXD->lower_setup.ip_fields.ipcse = 2649 htole16(ehdrlen + ip_hlen - 1); 2650 TXD->lower_setup.ip_fields.ipcso = 2651 ehdrlen + offsetof(struct ip, ip_sum); 2652 cmd |= E1000_TXD_CMD_IP; 2653 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2654 } 2655 hdr_len = ehdrlen + ip_hlen; 2656 2657 if (csum_flags & CSUM_TCP) { 2658 /* 2659 * Start offset for payload checksum calculation. 2660 * End offset for payload checksum calculation. 2661 * Offset of place to put the checksum. 2662 */ 2663 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2664 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2665 TXD->upper_setup.tcp_fields.tucso = 2666 hdr_len + offsetof(struct tcphdr, th_sum); 2667 cmd |= E1000_TXD_CMD_TCP; 2668 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2669 } else if (csum_flags & CSUM_UDP) { 2670 /* 2671 * Start offset for header checksum calculation. 2672 * End offset for header checksum calculation. 2673 * Offset of place to put the checksum. 2674 */ 2675 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2676 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2677 TXD->upper_setup.tcp_fields.tucso = 2678 hdr_len + offsetof(struct udphdr, uh_sum); 2679 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2680 } 2681 2682 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2683 E1000_TXD_DTYP_D; /* Data descr */ 2684 2685 /* Save the information for this csum offloading context */ 2686 tdata->csum_lhlen = ehdrlen; 2687 tdata->csum_iphlen = ip_hlen; 2688 tdata->csum_flags = csum_flags; 2689 tdata->csum_txd_upper = *txd_upper; 2690 tdata->csum_txd_lower = *txd_lower; 2691 2692 TXD->tcp_seg_setup.data = htole32(0); 2693 TXD->cmd_and_length = 2694 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2695 2696 if (++curr_txd == tdata->num_tx_desc) 2697 curr_txd = 0; 2698 2699 KKASSERT(tdata->num_tx_desc_avail > 0); 2700 tdata->num_tx_desc_avail--; 2701 2702 tdata->next_avail_tx_desc = curr_txd; 2703 return 1; 2704 } 2705 2706 static void 2707 emx_txeof(struct emx_txdata *tdata) 2708 { 2709 struct emx_txbuf *tx_buffer; 2710 int first, num_avail; 2711 2712 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2713 return; 2714 2715 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2716 return; 2717 2718 num_avail = tdata->num_tx_desc_avail; 2719 first = tdata->next_tx_to_clean; 2720 2721 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2722 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2723 struct e1000_tx_desc *tx_desc; 2724 2725 tx_desc = &tdata->tx_desc_base[dd_idx]; 2726 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2727 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2728 2729 if (++dd_idx == tdata->num_tx_desc) 2730 dd_idx = 0; 2731 2732 while (first != dd_idx) { 2733 logif(pkt_txclean); 2734 2735 KKASSERT(num_avail < tdata->num_tx_desc); 2736 num_avail++; 2737 2738 tx_buffer = &tdata->tx_buf[first]; 2739 if (tx_buffer->m_head) 2740 emx_free_txbuf(tdata, tx_buffer); 2741 2742 if (++first == tdata->num_tx_desc) 2743 first = 0; 2744 } 2745 } else { 2746 break; 2747 } 2748 } 2749 tdata->next_tx_to_clean = first; 2750 tdata->num_tx_desc_avail = num_avail; 2751 2752 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2753 tdata->tx_dd_head = 0; 2754 tdata->tx_dd_tail = 0; 2755 } 2756 2757 if (!EMX_IS_OACTIVE(tdata)) { 2758 ifsq_clr_oactive(tdata->ifsq); 2759 2760 /* All clean, turn off the timer */ 2761 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2762 tdata->tx_watchdog.wd_timer = 0; 2763 } 2764 tdata->tx_running = EMX_TX_RUNNING; 2765 } 2766 2767 static void 2768 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc) 2769 { 2770 struct emx_txbuf *tx_buffer; 2771 int tdh, first, num_avail, dd_idx = -1; 2772 2773 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2774 return; 2775 2776 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2777 if (tdh == tdata->next_tx_to_clean) { 2778 if (gc && tdata->tx_nmbuf > 0) 2779 tdata->tx_running = EMX_TX_RUNNING; 2780 return; 2781 } 2782 if (gc) 2783 tdata->tx_gc++; 2784 2785 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2786 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2787 2788 num_avail = tdata->num_tx_desc_avail; 2789 first = tdata->next_tx_to_clean; 2790 2791 while (first != tdh) { 2792 logif(pkt_txclean); 2793 2794 KKASSERT(num_avail < tdata->num_tx_desc); 2795 num_avail++; 2796 2797 tx_buffer = &tdata->tx_buf[first]; 2798 if (tx_buffer->m_head) 2799 emx_free_txbuf(tdata, tx_buffer); 2800 2801 if (first == dd_idx) { 2802 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2803 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2804 tdata->tx_dd_head = 0; 2805 tdata->tx_dd_tail = 0; 2806 dd_idx = -1; 2807 } else { 2808 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2809 } 2810 } 2811 2812 if (++first == tdata->num_tx_desc) 2813 first = 0; 2814 } 2815 tdata->next_tx_to_clean = first; 2816 tdata->num_tx_desc_avail = num_avail; 2817 2818 if (!EMX_IS_OACTIVE(tdata)) { 2819 ifsq_clr_oactive(tdata->ifsq); 2820 2821 /* All clean, turn off the timer */ 2822 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2823 tdata->tx_watchdog.wd_timer = 0; 2824 } 2825 if (!gc || tdata->tx_nmbuf > 0) 2826 tdata->tx_running = EMX_TX_RUNNING; 2827 } 2828 2829 /* 2830 * When Link is lost sometimes there is work still in the TX ring 2831 * which will result in a watchdog, rather than allow that do an 2832 * attempted cleanup and then reinit here. Note that this has been 2833 * seens mostly with fiber adapters. 2834 */ 2835 static void 2836 emx_tx_purge(struct emx_softc *sc) 2837 { 2838 int i; 2839 2840 if (sc->link_active) 2841 return; 2842 2843 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2844 struct emx_txdata *tdata = &sc->tx_data[i]; 2845 2846 if (tdata->tx_watchdog.wd_timer) { 2847 emx_tx_collect(tdata, FALSE); 2848 if (tdata->tx_watchdog.wd_timer) { 2849 if_printf(&sc->arpcom.ac_if, 2850 "Link lost, TX pending, reinit\n"); 2851 emx_init(sc); 2852 return; 2853 } 2854 } 2855 } 2856 } 2857 2858 static int 2859 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2860 { 2861 struct mbuf *m; 2862 bus_dma_segment_t seg; 2863 bus_dmamap_t map; 2864 struct emx_rxbuf *rx_buffer; 2865 int error, nseg; 2866 2867 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2868 if (m == NULL) { 2869 if (init) { 2870 if_printf(&rdata->sc->arpcom.ac_if, 2871 "Unable to allocate RX mbuf\n"); 2872 } 2873 return (ENOBUFS); 2874 } 2875 m->m_len = m->m_pkthdr.len = MCLBYTES; 2876 2877 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2878 m_adj(m, ETHER_ALIGN); 2879 2880 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2881 rdata->rx_sparemap, m, 2882 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2883 if (error) { 2884 m_freem(m); 2885 if (init) { 2886 if_printf(&rdata->sc->arpcom.ac_if, 2887 "Unable to load RX mbuf\n"); 2888 } 2889 return (error); 2890 } 2891 2892 rx_buffer = &rdata->rx_buf[i]; 2893 if (rx_buffer->m_head != NULL) 2894 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2895 2896 map = rx_buffer->map; 2897 rx_buffer->map = rdata->rx_sparemap; 2898 rdata->rx_sparemap = map; 2899 2900 rx_buffer->m_head = m; 2901 rx_buffer->paddr = seg.ds_addr; 2902 2903 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2904 return (0); 2905 } 2906 2907 static int 2908 emx_create_rx_ring(struct emx_rxdata *rdata) 2909 { 2910 device_t dev = rdata->sc->dev; 2911 struct emx_rxbuf *rx_buffer; 2912 int i, error, rsize, nrxd; 2913 2914 /* 2915 * Validate number of receive descriptors. It must not exceed 2916 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2917 */ 2918 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2919 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2920 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2921 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2922 EMX_DEFAULT_RXD, nrxd); 2923 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2924 } else { 2925 rdata->num_rx_desc = nrxd; 2926 } 2927 2928 /* 2929 * Allocate Receive Descriptor ring 2930 */ 2931 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2932 EMX_DBA_ALIGN); 2933 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2934 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2935 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2936 &rdata->rx_desc_paddr); 2937 if (rdata->rx_desc == NULL) { 2938 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2939 return ENOMEM; 2940 } 2941 2942 rsize = __VM_CACHELINE_ALIGN( 2943 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2944 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2945 2946 /* 2947 * Create DMA tag for rx buffers 2948 */ 2949 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2950 1, 0, /* alignment, bounds */ 2951 BUS_SPACE_MAXADDR, /* lowaddr */ 2952 BUS_SPACE_MAXADDR, /* highaddr */ 2953 NULL, NULL, /* filter, filterarg */ 2954 MCLBYTES, /* maxsize */ 2955 1, /* nsegments */ 2956 MCLBYTES, /* maxsegsize */ 2957 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2958 &rdata->rxtag); 2959 if (error) { 2960 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2961 kfree(rdata->rx_buf, M_DEVBUF); 2962 rdata->rx_buf = NULL; 2963 return error; 2964 } 2965 2966 /* 2967 * Create spare DMA map for rx buffers 2968 */ 2969 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2970 &rdata->rx_sparemap); 2971 if (error) { 2972 device_printf(dev, "Unable to create spare RX DMA map\n"); 2973 bus_dma_tag_destroy(rdata->rxtag); 2974 kfree(rdata->rx_buf, M_DEVBUF); 2975 rdata->rx_buf = NULL; 2976 return error; 2977 } 2978 2979 /* 2980 * Create DMA maps for rx buffers 2981 */ 2982 for (i = 0; i < rdata->num_rx_desc; i++) { 2983 rx_buffer = &rdata->rx_buf[i]; 2984 2985 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2986 &rx_buffer->map); 2987 if (error) { 2988 device_printf(dev, "Unable to create RX DMA map\n"); 2989 emx_destroy_rx_ring(rdata, i); 2990 return error; 2991 } 2992 } 2993 return (0); 2994 } 2995 2996 static void 2997 emx_free_rx_ring(struct emx_rxdata *rdata) 2998 { 2999 int i; 3000 3001 for (i = 0; i < rdata->num_rx_desc; i++) { 3002 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 3003 3004 if (rx_buffer->m_head != NULL) { 3005 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 3006 m_freem(rx_buffer->m_head); 3007 rx_buffer->m_head = NULL; 3008 } 3009 } 3010 3011 if (rdata->fmp != NULL) 3012 m_freem(rdata->fmp); 3013 rdata->fmp = NULL; 3014 rdata->lmp = NULL; 3015 } 3016 3017 static void 3018 emx_free_tx_ring(struct emx_txdata *tdata) 3019 { 3020 int i; 3021 3022 for (i = 0; i < tdata->num_tx_desc; i++) { 3023 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 3024 3025 if (tx_buffer->m_head != NULL) 3026 emx_free_txbuf(tdata, tx_buffer); 3027 } 3028 3029 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 3030 3031 tdata->csum_flags = 0; 3032 tdata->csum_lhlen = 0; 3033 tdata->csum_iphlen = 0; 3034 tdata->csum_thlen = 0; 3035 tdata->csum_mss = 0; 3036 tdata->csum_pktlen = 0; 3037 3038 tdata->tx_dd_head = 0; 3039 tdata->tx_dd_tail = 0; 3040 tdata->tx_nsegs = 0; 3041 } 3042 3043 static int 3044 emx_init_rx_ring(struct emx_rxdata *rdata) 3045 { 3046 int i, error; 3047 3048 /* Reset descriptor ring */ 3049 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 3050 3051 /* Allocate new ones. */ 3052 for (i = 0; i < rdata->num_rx_desc; i++) { 3053 error = emx_newbuf(rdata, i, 1); 3054 if (error) 3055 return (error); 3056 } 3057 3058 /* Setup our descriptor pointers */ 3059 rdata->next_rx_desc_to_check = 0; 3060 3061 return (0); 3062 } 3063 3064 static void 3065 emx_init_rx_unit(struct emx_softc *sc) 3066 { 3067 struct ifnet *ifp = &sc->arpcom.ac_if; 3068 uint64_t bus_addr; 3069 uint32_t rctl, itr, rfctl, rxcsum; 3070 int i; 3071 3072 /* 3073 * Make sure receives are disabled while setting 3074 * up the descriptor ring 3075 */ 3076 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 3077 /* Do not disable if ever enabled on this hardware */ 3078 if (sc->hw.mac.type != e1000_82574) 3079 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3080 3081 /* 3082 * Set the interrupt throttling rate. Value is calculated 3083 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3084 */ 3085 if (sc->int_throttle_ceil) 3086 itr = 1000000000 / 256 / sc->int_throttle_ceil; 3087 else 3088 itr = 0; 3089 emx_set_itr(sc, itr); 3090 3091 /* Use extended RX descriptor */ 3092 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL); 3093 rfctl |= E1000_RFCTL_EXTEN; 3094 /* Disable accelerated ackknowledge */ 3095 if (sc->hw.mac.type == e1000_82574) 3096 rfctl |= E1000_RFCTL_ACK_DIS; 3097 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 3098 3099 /* 3100 * Receive Checksum Offload for TCP and UDP 3101 * 3102 * Checksum offloading is also enabled if multiple receive 3103 * queue is to be supported, since we need it to figure out 3104 * packet type. 3105 */ 3106 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3107 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3108 sc->rx_ring_cnt > 1) { 3109 /* 3110 * NOTE: 3111 * PCSD must be enabled to enable multiple 3112 * receive queues. 3113 */ 3114 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3115 E1000_RXCSUM_PCSD; 3116 } else { 3117 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3118 E1000_RXCSUM_PCSD); 3119 } 3120 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3121 3122 /* 3123 * Configure multiple receive queue (RSS) 3124 */ 3125 if (sc->rx_ring_cnt > 1) { 3126 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3127 int r, j; 3128 3129 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3130 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3131 3132 /* 3133 * NOTE: 3134 * When we reach here, RSS has already been disabled 3135 * in emx_stop(), so we could safely configure RSS key 3136 * and redirect table. 3137 */ 3138 3139 /* 3140 * Configure RSS key 3141 */ 3142 toeplitz_get_key(key, sizeof(key)); 3143 for (i = 0; i < EMX_NRSSRK; ++i) { 3144 uint32_t rssrk; 3145 3146 rssrk = EMX_RSSRK_VAL(key, i); 3147 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3148 3149 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3150 } 3151 3152 /* 3153 * Configure RSS redirect table. 3154 */ 3155 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3156 EMX_RDRTABLE_SIZE); 3157 3158 r = 0; 3159 for (j = 0; j < EMX_NRETA; ++j) { 3160 uint32_t reta = 0; 3161 3162 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3163 uint32_t q; 3164 3165 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3166 reta |= q << (8 * i); 3167 ++r; 3168 } 3169 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3170 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3171 } 3172 3173 /* 3174 * Enable multiple receive queues. 3175 * Enable IPv4 RSS standard hash functions. 3176 * Disable RSS interrupt. 3177 */ 3178 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3179 E1000_MRQC_ENABLE_RSS_2Q | 3180 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3181 E1000_MRQC_RSS_FIELD_IPV4); 3182 } 3183 3184 /* 3185 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3186 * long latencies are observed, like Lenovo X60. This 3187 * change eliminates the problem, but since having positive 3188 * values in RDTR is a known source of problems on other 3189 * platforms another solution is being sought. 3190 */ 3191 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3192 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3193 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3194 } 3195 3196 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3197 struct emx_rxdata *rdata = &sc->rx_data[i]; 3198 3199 /* 3200 * Setup the Base and Length of the Rx Descriptor Ring 3201 */ 3202 bus_addr = rdata->rx_desc_paddr; 3203 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3204 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3205 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3206 (uint32_t)(bus_addr >> 32)); 3207 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3208 (uint32_t)bus_addr); 3209 3210 /* 3211 * Setup the HW Rx Head and Tail Descriptor Pointers 3212 */ 3213 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3214 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3215 sc->rx_data[i].num_rx_desc - 1); 3216 } 3217 3218 /* Set PTHRESH for improved jumbo performance */ 3219 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) { 3220 uint32_t rxdctl; 3221 3222 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3223 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i)); 3224 rxdctl |= 0x20; /* PTHRESH */ 3225 rxdctl |= 4 << 8; /* HTHRESH */ 3226 rxdctl |= 4 << 16; /* WTHRESH */ 3227 rxdctl |= 1 << 24; /* Switch to granularity */ 3228 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl); 3229 } 3230 } 3231 3232 if (sc->hw.mac.type >= e1000_pch2lan) { 3233 if (ifp->if_mtu > ETHERMTU) 3234 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3235 else 3236 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3237 } 3238 3239 /* Setup the Receive Control Register */ 3240 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3241 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3242 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3243 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3244 3245 /* Make sure VLAN Filters are off */ 3246 rctl &= ~E1000_RCTL_VFE; 3247 3248 /* Don't store bad paket */ 3249 rctl &= ~E1000_RCTL_SBP; 3250 3251 /* MCLBYTES */ 3252 rctl |= E1000_RCTL_SZ_2048; 3253 3254 if (ifp->if_mtu > ETHERMTU) 3255 rctl |= E1000_RCTL_LPE; 3256 else 3257 rctl &= ~E1000_RCTL_LPE; 3258 3259 /* Enable Receives */ 3260 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3261 } 3262 3263 static void 3264 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3265 { 3266 struct emx_rxbuf *rx_buffer; 3267 int i; 3268 3269 /* Free Receive Descriptor ring */ 3270 if (rdata->rx_desc) { 3271 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3272 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3273 rdata->rx_desc_dmap); 3274 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3275 3276 rdata->rx_desc = NULL; 3277 } 3278 3279 if (rdata->rx_buf == NULL) 3280 return; 3281 3282 for (i = 0; i < ndesc; i++) { 3283 rx_buffer = &rdata->rx_buf[i]; 3284 3285 KKASSERT(rx_buffer->m_head == NULL); 3286 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3287 } 3288 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3289 bus_dma_tag_destroy(rdata->rxtag); 3290 3291 kfree(rdata->rx_buf, M_DEVBUF); 3292 rdata->rx_buf = NULL; 3293 } 3294 3295 static void 3296 emx_rxeof(struct emx_rxdata *rdata, int count) 3297 { 3298 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3299 uint32_t staterr; 3300 emx_rxdesc_t *current_desc; 3301 struct mbuf *mp; 3302 int i, cpuid = mycpuid; 3303 3304 i = rdata->next_rx_desc_to_check; 3305 current_desc = &rdata->rx_desc[i]; 3306 staterr = le32toh(current_desc->rxd_staterr); 3307 3308 if (!(staterr & E1000_RXD_STAT_DD)) 3309 return; 3310 3311 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3312 struct pktinfo *pi = NULL, pi0; 3313 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3314 struct mbuf *m = NULL; 3315 int eop, len; 3316 3317 logif(pkt_receive); 3318 3319 mp = rx_buf->m_head; 3320 3321 /* 3322 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3323 * needs to access the last received byte in the mbuf. 3324 */ 3325 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3326 BUS_DMASYNC_POSTREAD); 3327 3328 len = le16toh(current_desc->rxd_length); 3329 if (staterr & E1000_RXD_STAT_EOP) { 3330 count--; 3331 eop = 1; 3332 } else { 3333 eop = 0; 3334 } 3335 3336 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3337 uint16_t vlan = 0; 3338 uint32_t mrq, rss_hash; 3339 3340 /* 3341 * Save several necessary information, 3342 * before emx_newbuf() destroy it. 3343 */ 3344 if ((staterr & E1000_RXD_STAT_VP) && eop) 3345 vlan = le16toh(current_desc->rxd_vlan); 3346 3347 mrq = le32toh(current_desc->rxd_mrq); 3348 rss_hash = le32toh(current_desc->rxd_rss); 3349 3350 EMX_RSS_DPRINTF(rdata->sc, 10, 3351 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3352 rdata->idx, mrq, rss_hash); 3353 3354 if (emx_newbuf(rdata, i, 0) != 0) { 3355 IFNET_STAT_INC(ifp, iqdrops, 1); 3356 goto discard; 3357 } 3358 3359 /* Assign correct length to the current fragment */ 3360 mp->m_len = len; 3361 3362 if (rdata->fmp == NULL) { 3363 mp->m_pkthdr.len = len; 3364 rdata->fmp = mp; /* Store the first mbuf */ 3365 rdata->lmp = mp; 3366 } else { 3367 /* 3368 * Chain mbuf's together 3369 */ 3370 rdata->lmp->m_next = mp; 3371 rdata->lmp = rdata->lmp->m_next; 3372 rdata->fmp->m_pkthdr.len += len; 3373 } 3374 3375 if (eop) { 3376 rdata->fmp->m_pkthdr.rcvif = ifp; 3377 IFNET_STAT_INC(ifp, ipackets, 1); 3378 3379 if (ifp->if_capenable & IFCAP_RXCSUM) 3380 emx_rxcsum(staterr, rdata->fmp); 3381 3382 if (staterr & E1000_RXD_STAT_VP) { 3383 rdata->fmp->m_pkthdr.ether_vlantag = 3384 vlan; 3385 rdata->fmp->m_flags |= M_VLANTAG; 3386 } 3387 m = rdata->fmp; 3388 rdata->fmp = NULL; 3389 rdata->lmp = NULL; 3390 3391 if (ifp->if_capenable & IFCAP_RSS) { 3392 pi = emx_rssinfo(m, &pi0, mrq, 3393 rss_hash, staterr); 3394 } 3395 #ifdef EMX_RSS_DEBUG 3396 rdata->rx_pkts++; 3397 #endif 3398 } 3399 } else { 3400 IFNET_STAT_INC(ifp, ierrors, 1); 3401 discard: 3402 emx_setup_rxdesc(current_desc, rx_buf); 3403 if (rdata->fmp != NULL) { 3404 m_freem(rdata->fmp); 3405 rdata->fmp = NULL; 3406 rdata->lmp = NULL; 3407 } 3408 m = NULL; 3409 } 3410 3411 if (m != NULL) 3412 ifp->if_input(ifp, m, pi, cpuid); 3413 3414 /* Advance our pointers to the next descriptor. */ 3415 if (++i == rdata->num_rx_desc) 3416 i = 0; 3417 3418 current_desc = &rdata->rx_desc[i]; 3419 staterr = le32toh(current_desc->rxd_staterr); 3420 } 3421 rdata->next_rx_desc_to_check = i; 3422 3423 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3424 if (--i < 0) 3425 i = rdata->num_rx_desc - 1; 3426 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3427 } 3428 3429 static void 3430 emx_enable_intr(struct emx_softc *sc) 3431 { 3432 uint32_t ims_mask = IMS_ENABLE_MASK; 3433 3434 lwkt_serialize_handler_enable(&sc->main_serialize); 3435 3436 #if 0 3437 if (sc->hw.mac.type == e1000_82574) { 3438 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3439 ims_mask |= EM_MSIX_MASK; 3440 } 3441 #endif 3442 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3443 } 3444 3445 static void 3446 emx_disable_intr(struct emx_softc *sc) 3447 { 3448 if (sc->hw.mac.type == e1000_82574) 3449 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3450 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3451 3452 lwkt_serialize_handler_disable(&sc->main_serialize); 3453 } 3454 3455 /* 3456 * Bit of a misnomer, what this really means is 3457 * to enable OS management of the system... aka 3458 * to disable special hardware management features 3459 */ 3460 static void 3461 emx_get_mgmt(struct emx_softc *sc) 3462 { 3463 /* A shared code workaround */ 3464 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3465 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3466 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3467 3468 /* disable hardware interception of ARP */ 3469 manc &= ~(E1000_MANC_ARP_EN); 3470 3471 /* enable receiving management packets to the host */ 3472 manc |= E1000_MANC_EN_MNG2HOST; 3473 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3474 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3475 manc2h |= E1000_MNG2HOST_PORT_623; 3476 manc2h |= E1000_MNG2HOST_PORT_664; 3477 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3478 3479 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3480 } 3481 } 3482 3483 /* 3484 * Give control back to hardware management 3485 * controller if there is one. 3486 */ 3487 static void 3488 emx_rel_mgmt(struct emx_softc *sc) 3489 { 3490 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3491 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3492 3493 /* re-enable hardware interception of ARP */ 3494 manc |= E1000_MANC_ARP_EN; 3495 manc &= ~E1000_MANC_EN_MNG2HOST; 3496 3497 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3498 } 3499 } 3500 3501 /* 3502 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3503 * For ASF and Pass Through versions of f/w this means that 3504 * the driver is loaded. For AMT version (only with 82573) 3505 * of the f/w this means that the network i/f is open. 3506 */ 3507 static void 3508 emx_get_hw_control(struct emx_softc *sc) 3509 { 3510 /* Let firmware know the driver has taken over */ 3511 if (sc->hw.mac.type == e1000_82573) { 3512 uint32_t swsm; 3513 3514 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3515 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3516 swsm | E1000_SWSM_DRV_LOAD); 3517 } else { 3518 uint32_t ctrl_ext; 3519 3520 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3521 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3522 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3523 } 3524 sc->flags |= EMX_FLAG_HW_CTRL; 3525 } 3526 3527 /* 3528 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3529 * For ASF and Pass Through versions of f/w this means that the 3530 * driver is no longer loaded. For AMT version (only with 82573) 3531 * of the f/w this means that the network i/f is closed. 3532 */ 3533 static void 3534 emx_rel_hw_control(struct emx_softc *sc) 3535 { 3536 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3537 return; 3538 sc->flags &= ~EMX_FLAG_HW_CTRL; 3539 3540 /* Let firmware taken over control of h/w */ 3541 if (sc->hw.mac.type == e1000_82573) { 3542 uint32_t swsm; 3543 3544 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3545 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3546 swsm & ~E1000_SWSM_DRV_LOAD); 3547 } else { 3548 uint32_t ctrl_ext; 3549 3550 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3551 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3552 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3553 } 3554 } 3555 3556 static int 3557 emx_is_valid_eaddr(const uint8_t *addr) 3558 { 3559 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3560 3561 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3562 return (FALSE); 3563 3564 return (TRUE); 3565 } 3566 3567 /* 3568 * Enable PCI Wake On Lan capability 3569 */ 3570 static void 3571 emx_enable_wol(device_t dev) 3572 { 3573 uint16_t cap, status; 3574 uint8_t id; 3575 3576 /* First find the capabilities pointer*/ 3577 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3578 3579 /* Read the PM Capabilities */ 3580 id = pci_read_config(dev, cap, 1); 3581 if (id != PCIY_PMG) /* Something wrong */ 3582 return; 3583 3584 /* 3585 * OK, we have the power capabilities, 3586 * so now get the status register 3587 */ 3588 cap += PCIR_POWER_STATUS; 3589 status = pci_read_config(dev, cap, 2); 3590 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3591 pci_write_config(dev, cap, status, 2); 3592 } 3593 3594 static void 3595 emx_update_stats(struct emx_softc *sc) 3596 { 3597 struct ifnet *ifp = &sc->arpcom.ac_if; 3598 3599 if (sc->hw.phy.media_type == e1000_media_type_copper || 3600 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3601 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3602 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3603 } 3604 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3605 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3606 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3607 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3608 3609 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3610 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3611 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3612 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3613 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3614 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3615 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3616 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3617 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3618 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3619 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3620 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3621 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3622 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3623 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3624 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3625 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3626 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3627 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3628 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3629 3630 /* For the 64-bit byte counters the low dword must be read first. */ 3631 /* Both registers clear on the read of the high dword */ 3632 3633 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3634 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3635 3636 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3637 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3638 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3639 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3640 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3641 3642 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3643 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3644 3645 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3646 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3647 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3648 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3649 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3650 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3651 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3652 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3653 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3654 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3655 3656 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3657 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3658 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3659 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3660 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3661 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3662 3663 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3664 3665 /* Rx Errors */ 3666 IFNET_STAT_SET(ifp, ierrors, 3667 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3668 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3669 3670 /* Tx Errors */ 3671 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3672 } 3673 3674 static void 3675 emx_print_debug_info(struct emx_softc *sc) 3676 { 3677 device_t dev = sc->dev; 3678 uint8_t *hw_addr = sc->hw.hw_addr; 3679 int i; 3680 3681 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3682 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3683 E1000_READ_REG(&sc->hw, E1000_CTRL), 3684 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3685 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3686 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3687 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3688 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3689 sc->hw.fc.high_water, sc->hw.fc.low_water); 3690 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3691 E1000_READ_REG(&sc->hw, E1000_TIDV), 3692 E1000_READ_REG(&sc->hw, E1000_TADV)); 3693 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3694 E1000_READ_REG(&sc->hw, E1000_RDTR), 3695 E1000_READ_REG(&sc->hw, E1000_RADV)); 3696 3697 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3698 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3699 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3700 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3701 } 3702 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3703 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3704 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3705 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3706 } 3707 3708 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3709 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3710 sc->tx_data[i].num_tx_desc_avail); 3711 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3712 sc->tx_data[i].tso_segments); 3713 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3714 sc->tx_data[i].tso_ctx_reused); 3715 } 3716 } 3717 3718 static void 3719 emx_print_hw_stats(struct emx_softc *sc) 3720 { 3721 device_t dev = sc->dev; 3722 3723 device_printf(dev, "Excessive collisions = %lld\n", 3724 (long long)sc->stats.ecol); 3725 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3726 device_printf(dev, "Symbol errors = %lld\n", 3727 (long long)sc->stats.symerrs); 3728 #endif 3729 device_printf(dev, "Sequence errors = %lld\n", 3730 (long long)sc->stats.sec); 3731 device_printf(dev, "Defer count = %lld\n", 3732 (long long)sc->stats.dc); 3733 device_printf(dev, "Missed Packets = %lld\n", 3734 (long long)sc->stats.mpc); 3735 device_printf(dev, "Receive No Buffers = %lld\n", 3736 (long long)sc->stats.rnbc); 3737 /* RLEC is inaccurate on some hardware, calculate our own. */ 3738 device_printf(dev, "Receive Length Errors = %lld\n", 3739 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3740 device_printf(dev, "Receive errors = %lld\n", 3741 (long long)sc->stats.rxerrc); 3742 device_printf(dev, "Crc errors = %lld\n", 3743 (long long)sc->stats.crcerrs); 3744 device_printf(dev, "Alignment errors = %lld\n", 3745 (long long)sc->stats.algnerrc); 3746 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3747 (long long)sc->stats.cexterr); 3748 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3749 device_printf(dev, "XON Rcvd = %lld\n", 3750 (long long)sc->stats.xonrxc); 3751 device_printf(dev, "XON Xmtd = %lld\n", 3752 (long long)sc->stats.xontxc); 3753 device_printf(dev, "XOFF Rcvd = %lld\n", 3754 (long long)sc->stats.xoffrxc); 3755 device_printf(dev, "XOFF Xmtd = %lld\n", 3756 (long long)sc->stats.xofftxc); 3757 device_printf(dev, "Good Packets Rcvd = %lld\n", 3758 (long long)sc->stats.gprc); 3759 device_printf(dev, "Good Packets Xmtd = %lld\n", 3760 (long long)sc->stats.gptc); 3761 } 3762 3763 static void 3764 emx_print_nvm_info(struct emx_softc *sc) 3765 { 3766 uint16_t eeprom_data; 3767 int i, j, row = 0; 3768 3769 /* Its a bit crude, but it gets the job done */ 3770 kprintf("\nInterface EEPROM Dump:\n"); 3771 kprintf("Offset\n0x0000 "); 3772 for (i = 0, j = 0; i < 32; i++, j++) { 3773 if (j == 8) { /* Make the offset block */ 3774 j = 0; ++row; 3775 kprintf("\n0x00%x0 ",row); 3776 } 3777 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3778 kprintf("%04x ", eeprom_data); 3779 } 3780 kprintf("\n"); 3781 } 3782 3783 static int 3784 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3785 { 3786 struct emx_softc *sc; 3787 struct ifnet *ifp; 3788 int error, result; 3789 3790 result = -1; 3791 error = sysctl_handle_int(oidp, &result, 0, req); 3792 if (error || !req->newptr) 3793 return (error); 3794 3795 sc = (struct emx_softc *)arg1; 3796 ifp = &sc->arpcom.ac_if; 3797 3798 ifnet_serialize_all(ifp); 3799 3800 if (result == 1) 3801 emx_print_debug_info(sc); 3802 3803 /* 3804 * This value will cause a hex dump of the 3805 * first 32 16-bit words of the EEPROM to 3806 * the screen. 3807 */ 3808 if (result == 2) 3809 emx_print_nvm_info(sc); 3810 3811 ifnet_deserialize_all(ifp); 3812 3813 return (error); 3814 } 3815 3816 static int 3817 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3818 { 3819 int error, result; 3820 3821 result = -1; 3822 error = sysctl_handle_int(oidp, &result, 0, req); 3823 if (error || !req->newptr) 3824 return (error); 3825 3826 if (result == 1) { 3827 struct emx_softc *sc = (struct emx_softc *)arg1; 3828 struct ifnet *ifp = &sc->arpcom.ac_if; 3829 3830 ifnet_serialize_all(ifp); 3831 emx_print_hw_stats(sc); 3832 ifnet_deserialize_all(ifp); 3833 } 3834 return (error); 3835 } 3836 3837 static void 3838 emx_add_sysctl(struct emx_softc *sc) 3839 { 3840 struct sysctl_ctx_list *ctx; 3841 struct sysctl_oid *tree; 3842 char pkt_desc[32]; 3843 int i; 3844 3845 ctx = device_get_sysctl_ctx(sc->dev); 3846 tree = device_get_sysctl_tree(sc->dev); 3847 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3848 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3849 emx_sysctl_debug_info, "I", "Debug Information"); 3850 3851 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3852 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3853 emx_sysctl_stats, "I", "Statistics"); 3854 3855 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3856 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3857 "# of RX descs"); 3858 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3859 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3860 "# of TX descs"); 3861 3862 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3863 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3864 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3865 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3866 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3867 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3868 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3869 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3870 emx_sysctl_tx_wreg_nsegs, "I", 3871 "# segments sent before write to hardware register"); 3872 3873 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3874 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3875 "# of RX rings"); 3876 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3877 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3878 "# of TX rings"); 3879 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3880 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3881 "# of TX rings used"); 3882 3883 #ifdef IFPOLL_ENABLE 3884 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3885 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3886 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3887 "TX polling CPU map"); 3888 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3889 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3890 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3891 "RX polling CPU map"); 3892 #endif 3893 3894 #ifdef EMX_RSS_DEBUG 3895 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3896 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3897 0, "RSS debug level"); 3898 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3899 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3900 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3901 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3902 "RXed packets"); 3903 } 3904 #endif 3905 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3906 #ifdef EMX_TSS_DEBUG 3907 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3908 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3909 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3910 "TXed packets"); 3911 #endif 3912 3913 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i); 3914 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3915 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0, 3916 "# of pending TX mbufs"); 3917 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i); 3918 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3919 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc, 3920 "# of TX desc GC"); 3921 } 3922 } 3923 3924 static int 3925 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3926 { 3927 struct emx_softc *sc = (void *)arg1; 3928 struct ifnet *ifp = &sc->arpcom.ac_if; 3929 int error, throttle; 3930 3931 throttle = sc->int_throttle_ceil; 3932 error = sysctl_handle_int(oidp, &throttle, 0, req); 3933 if (error || req->newptr == NULL) 3934 return error; 3935 if (throttle < 0 || throttle > 1000000000 / 256) 3936 return EINVAL; 3937 3938 if (throttle) { 3939 /* 3940 * Set the interrupt throttling rate in 256ns increments, 3941 * recalculate sysctl value assignment to get exact frequency. 3942 */ 3943 throttle = 1000000000 / 256 / throttle; 3944 3945 /* Upper 16bits of ITR is reserved and should be zero */ 3946 if (throttle & 0xffff0000) 3947 return EINVAL; 3948 } 3949 3950 ifnet_serialize_all(ifp); 3951 3952 if (throttle) 3953 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3954 else 3955 sc->int_throttle_ceil = 0; 3956 3957 if (ifp->if_flags & IFF_RUNNING) 3958 emx_set_itr(sc, throttle); 3959 3960 ifnet_deserialize_all(ifp); 3961 3962 if (bootverbose) { 3963 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3964 sc->int_throttle_ceil); 3965 } 3966 return 0; 3967 } 3968 3969 static int 3970 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3971 { 3972 struct emx_softc *sc = (void *)arg1; 3973 struct ifnet *ifp = &sc->arpcom.ac_if; 3974 struct emx_txdata *tdata = &sc->tx_data[0]; 3975 int error, segs; 3976 3977 segs = tdata->tx_intr_nsegs; 3978 error = sysctl_handle_int(oidp, &segs, 0, req); 3979 if (error || req->newptr == NULL) 3980 return error; 3981 if (segs <= 0) 3982 return EINVAL; 3983 3984 ifnet_serialize_all(ifp); 3985 3986 /* 3987 * Don't allow tx_intr_nsegs to become: 3988 * o Less the oact_tx_desc 3989 * o Too large that no TX desc will cause TX interrupt to 3990 * be generated (OACTIVE will never recover) 3991 * o Too small that will cause tx_dd[] overflow 3992 */ 3993 if (segs < tdata->oact_tx_desc || 3994 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3995 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3996 error = EINVAL; 3997 } else { 3998 int i; 3999 4000 error = 0; 4001 for (i = 0; i < sc->tx_ring_cnt; ++i) 4002 sc->tx_data[i].tx_intr_nsegs = segs; 4003 } 4004 4005 ifnet_deserialize_all(ifp); 4006 4007 return error; 4008 } 4009 4010 static int 4011 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4012 { 4013 struct emx_softc *sc = (void *)arg1; 4014 struct ifnet *ifp = &sc->arpcom.ac_if; 4015 int error, nsegs, i; 4016 4017 nsegs = sc->tx_data[0].tx_wreg_nsegs; 4018 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4019 if (error || req->newptr == NULL) 4020 return error; 4021 4022 ifnet_serialize_all(ifp); 4023 for (i = 0; i < sc->tx_ring_cnt; ++i) 4024 sc->tx_data[i].tx_wreg_nsegs =nsegs; 4025 ifnet_deserialize_all(ifp); 4026 4027 return 0; 4028 } 4029 4030 static int 4031 emx_dma_alloc(struct emx_softc *sc) 4032 { 4033 int error, i; 4034 4035 /* 4036 * Create top level busdma tag 4037 */ 4038 error = bus_dma_tag_create(NULL, 1, 0, 4039 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4040 NULL, NULL, 4041 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4042 0, &sc->parent_dtag); 4043 if (error) { 4044 device_printf(sc->dev, "could not create top level DMA tag\n"); 4045 return error; 4046 } 4047 4048 /* 4049 * Allocate transmit descriptors ring and buffers 4050 */ 4051 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4052 error = emx_create_tx_ring(&sc->tx_data[i]); 4053 if (error) { 4054 device_printf(sc->dev, 4055 "Could not setup transmit structures\n"); 4056 return error; 4057 } 4058 } 4059 4060 /* 4061 * Allocate receive descriptors ring and buffers 4062 */ 4063 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4064 error = emx_create_rx_ring(&sc->rx_data[i]); 4065 if (error) { 4066 device_printf(sc->dev, 4067 "Could not setup receive structures\n"); 4068 return error; 4069 } 4070 } 4071 return 0; 4072 } 4073 4074 static void 4075 emx_dma_free(struct emx_softc *sc) 4076 { 4077 int i; 4078 4079 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4080 emx_destroy_tx_ring(&sc->tx_data[i], 4081 sc->tx_data[i].num_tx_desc); 4082 } 4083 4084 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4085 emx_destroy_rx_ring(&sc->rx_data[i], 4086 sc->rx_data[i].num_rx_desc); 4087 } 4088 4089 /* Free top level busdma tag */ 4090 if (sc->parent_dtag != NULL) 4091 bus_dma_tag_destroy(sc->parent_dtag); 4092 } 4093 4094 static void 4095 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4096 { 4097 struct emx_softc *sc = ifp->if_softc; 4098 4099 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4100 } 4101 4102 static void 4103 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4104 { 4105 struct emx_softc *sc = ifp->if_softc; 4106 4107 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4108 } 4109 4110 static int 4111 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4112 { 4113 struct emx_softc *sc = ifp->if_softc; 4114 4115 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4116 } 4117 4118 static void 4119 emx_serialize_skipmain(struct emx_softc *sc) 4120 { 4121 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4122 } 4123 4124 static void 4125 emx_deserialize_skipmain(struct emx_softc *sc) 4126 { 4127 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4128 } 4129 4130 #ifdef INVARIANTS 4131 4132 static void 4133 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4134 boolean_t serialized) 4135 { 4136 struct emx_softc *sc = ifp->if_softc; 4137 4138 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4139 slz, serialized); 4140 } 4141 4142 #endif /* INVARIANTS */ 4143 4144 #ifdef IFPOLL_ENABLE 4145 4146 static void 4147 emx_npoll_status(struct ifnet *ifp) 4148 { 4149 struct emx_softc *sc = ifp->if_softc; 4150 uint32_t reg_icr; 4151 4152 ASSERT_SERIALIZED(&sc->main_serialize); 4153 4154 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4155 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4156 callout_stop(&sc->timer); 4157 sc->hw.mac.get_link_status = 1; 4158 emx_update_link_status(sc); 4159 callout_reset(&sc->timer, hz, emx_timer, sc); 4160 } 4161 } 4162 4163 static void 4164 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4165 { 4166 struct emx_txdata *tdata = arg; 4167 4168 ASSERT_SERIALIZED(&tdata->tx_serialize); 4169 4170 emx_tx_intr(tdata); 4171 emx_try_txgc(tdata, 1); 4172 } 4173 4174 static void 4175 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4176 { 4177 struct emx_rxdata *rdata = arg; 4178 4179 ASSERT_SERIALIZED(&rdata->rx_serialize); 4180 4181 emx_rxeof(rdata, cycle); 4182 } 4183 4184 static void 4185 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4186 { 4187 struct emx_softc *sc = ifp->if_softc; 4188 int i, txr_cnt; 4189 4190 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4191 4192 if (info) { 4193 int cpu; 4194 4195 info->ifpi_status.status_func = emx_npoll_status; 4196 info->ifpi_status.serializer = &sc->main_serialize; 4197 4198 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4199 for (i = 0; i < txr_cnt; ++i) { 4200 struct emx_txdata *tdata = &sc->tx_data[i]; 4201 4202 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4203 KKASSERT(cpu < netisr_ncpus); 4204 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4205 info->ifpi_tx[cpu].arg = tdata; 4206 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4207 ifsq_set_cpuid(tdata->ifsq, cpu); 4208 } 4209 4210 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4211 struct emx_rxdata *rdata = &sc->rx_data[i]; 4212 4213 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4214 KKASSERT(cpu < netisr_ncpus); 4215 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4216 info->ifpi_rx[cpu].arg = rdata; 4217 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4218 } 4219 } else { 4220 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4221 struct emx_txdata *tdata = &sc->tx_data[i]; 4222 4223 ifsq_set_cpuid(tdata->ifsq, 4224 rman_get_cpuid(sc->intr_res)); 4225 } 4226 } 4227 if (ifp->if_flags & IFF_RUNNING) 4228 emx_init(sc); 4229 } 4230 4231 #endif /* IFPOLL_ENABLE */ 4232 4233 static void 4234 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4235 { 4236 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4237 if (sc->hw.mac.type == e1000_82574) { 4238 int i; 4239 4240 /* 4241 * When using MSIX interrupts we need to 4242 * throttle using the EITR register 4243 */ 4244 for (i = 0; i < 4; ++i) 4245 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4246 } 4247 } 4248 4249 /* 4250 * Disable the L0s, 82574L Errata #20 4251 */ 4252 static void 4253 emx_disable_aspm(struct emx_softc *sc) 4254 { 4255 uint16_t link_cap, link_ctrl, disable; 4256 uint8_t pcie_ptr, reg; 4257 device_t dev = sc->dev; 4258 4259 switch (sc->hw.mac.type) { 4260 case e1000_82571: 4261 case e1000_82572: 4262 case e1000_82573: 4263 /* 4264 * 82573 specification update 4265 * errata #8 disable L0s 4266 * errata #41 disable L1 4267 * 4268 * 82571/82572 specification update 4269 # errata #13 disable L1 4270 * errata #68 disable L0s 4271 */ 4272 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4273 break; 4274 4275 case e1000_82574: 4276 /* 4277 * 82574 specification update errata #20 4278 * 4279 * There is no need to disable L1 4280 */ 4281 disable = PCIEM_LNKCTL_ASPM_L0S; 4282 break; 4283 4284 default: 4285 return; 4286 } 4287 4288 pcie_ptr = pci_get_pciecap_ptr(dev); 4289 if (pcie_ptr == 0) 4290 return; 4291 4292 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4293 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4294 return; 4295 4296 if (bootverbose) 4297 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4298 4299 reg = pcie_ptr + PCIER_LINKCTRL; 4300 link_ctrl = pci_read_config(dev, reg, 2); 4301 link_ctrl &= ~disable; 4302 pci_write_config(dev, reg, link_ctrl, 2); 4303 } 4304 4305 static int 4306 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4307 { 4308 int iphlen, hoff, thoff, ex = 0; 4309 struct mbuf *m; 4310 struct ip *ip; 4311 4312 m = *mp; 4313 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4314 4315 iphlen = m->m_pkthdr.csum_iphlen; 4316 thoff = m->m_pkthdr.csum_thlen; 4317 hoff = m->m_pkthdr.csum_lhlen; 4318 4319 KASSERT(iphlen > 0, ("invalid ip hlen")); 4320 KASSERT(thoff > 0, ("invalid tcp hlen")); 4321 KASSERT(hoff > 0, ("invalid ether hlen")); 4322 4323 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4324 ex = 4; 4325 4326 if (m->m_len < hoff + iphlen + thoff + ex) { 4327 m = m_pullup(m, hoff + iphlen + thoff + ex); 4328 if (m == NULL) { 4329 *mp = NULL; 4330 return ENOBUFS; 4331 } 4332 *mp = m; 4333 } 4334 ip = mtodoff(m, struct ip *, hoff); 4335 ip->ip_len = 0; 4336 4337 return 0; 4338 } 4339 4340 static int 4341 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4342 uint32_t *txd_upper, uint32_t *txd_lower) 4343 { 4344 struct e1000_context_desc *TXD; 4345 int hoff, iphlen, thoff, hlen; 4346 int mss, pktlen, curr_txd; 4347 4348 #ifdef EMX_TSO_DEBUG 4349 tdata->tso_segments++; 4350 #endif 4351 4352 iphlen = mp->m_pkthdr.csum_iphlen; 4353 thoff = mp->m_pkthdr.csum_thlen; 4354 hoff = mp->m_pkthdr.csum_lhlen; 4355 mss = mp->m_pkthdr.tso_segsz; 4356 pktlen = mp->m_pkthdr.len; 4357 4358 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4359 tdata->csum_flags == CSUM_TSO && 4360 tdata->csum_iphlen == iphlen && 4361 tdata->csum_lhlen == hoff && 4362 tdata->csum_thlen == thoff && 4363 tdata->csum_mss == mss && 4364 tdata->csum_pktlen == pktlen) { 4365 *txd_upper = tdata->csum_txd_upper; 4366 *txd_lower = tdata->csum_txd_lower; 4367 #ifdef EMX_TSO_DEBUG 4368 tdata->tso_ctx_reused++; 4369 #endif 4370 return 0; 4371 } 4372 hlen = hoff + iphlen + thoff; 4373 4374 /* 4375 * Setup a new TSO context. 4376 */ 4377 4378 curr_txd = tdata->next_avail_tx_desc; 4379 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4380 4381 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4382 E1000_TXD_DTYP_D | /* Data descr type */ 4383 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4384 4385 /* IP and/or TCP header checksum calculation and insertion. */ 4386 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4387 4388 /* 4389 * Start offset for header checksum calculation. 4390 * End offset for header checksum calculation. 4391 * Offset of place put the checksum. 4392 */ 4393 TXD->lower_setup.ip_fields.ipcss = hoff; 4394 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4395 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4396 4397 /* 4398 * Start offset for payload checksum calculation. 4399 * End offset for payload checksum calculation. 4400 * Offset of place to put the checksum. 4401 */ 4402 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4403 TXD->upper_setup.tcp_fields.tucse = 0; 4404 TXD->upper_setup.tcp_fields.tucso = 4405 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4406 4407 /* 4408 * Payload size per packet w/o any headers. 4409 * Length of all headers up to payload. 4410 */ 4411 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4412 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4413 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4414 E1000_TXD_CMD_DEXT | /* Extended descr */ 4415 E1000_TXD_CMD_TSE | /* TSE context */ 4416 E1000_TXD_CMD_IP | /* Do IP csum */ 4417 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4418 (pktlen - hlen)); /* Total len */ 4419 4420 /* Save the information for this TSO context */ 4421 tdata->csum_flags = CSUM_TSO; 4422 tdata->csum_lhlen = hoff; 4423 tdata->csum_iphlen = iphlen; 4424 tdata->csum_thlen = thoff; 4425 tdata->csum_mss = mss; 4426 tdata->csum_pktlen = pktlen; 4427 tdata->csum_txd_upper = *txd_upper; 4428 tdata->csum_txd_lower = *txd_lower; 4429 4430 if (++curr_txd == tdata->num_tx_desc) 4431 curr_txd = 0; 4432 4433 KKASSERT(tdata->num_tx_desc_avail > 0); 4434 tdata->num_tx_desc_avail--; 4435 4436 tdata->next_avail_tx_desc = curr_txd; 4437 return 1; 4438 } 4439 4440 static int 4441 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4442 { 4443 if (polling) 4444 return sc->tx_ring_cnt; 4445 else 4446 return 1; 4447 } 4448 4449 /* 4450 * Remove all descriptors from the TX ring. 4451 * 4452 * We want to clear all pending descriptors from the TX ring. Zeroing 4453 * happens when the HW reads the regs. We assign the ring itself as 4454 * the data of the next descriptor. We don't care about the data we 4455 * are about to reset the HW. 4456 */ 4457 static void 4458 emx_flush_tx_ring(struct emx_softc *sc) 4459 { 4460 struct e1000_hw *hw = &sc->hw; 4461 uint32_t tctl; 4462 int i; 4463 4464 tctl = E1000_READ_REG(hw, E1000_TCTL); 4465 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); 4466 4467 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4468 struct emx_txdata *tdata = &sc->tx_data[i]; 4469 struct e1000_tx_desc *txd; 4470 4471 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0) 4472 continue; 4473 4474 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++]; 4475 if (tdata->next_avail_tx_desc == tdata->num_tx_desc) 4476 tdata->next_avail_tx_desc = 0; 4477 4478 /* Just use the ring as a dummy buffer addr */ 4479 txd->buffer_addr = tdata->tx_desc_paddr; 4480 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512); 4481 txd->upper.data = 0; 4482 4483 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc); 4484 usec_delay(250); 4485 } 4486 } 4487 4488 /* 4489 * Remove all descriptors from the RX rings. 4490 * 4491 * Mark all descriptors in the RX rings as consumed and disable the RX rings. 4492 */ 4493 static void 4494 emx_flush_rx_ring(struct emx_softc *sc) 4495 { 4496 struct e1000_hw *hw = &sc->hw; 4497 uint32_t rctl; 4498 int i; 4499 4500 rctl = E1000_READ_REG(hw, E1000_RCTL); 4501 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4502 E1000_WRITE_FLUSH(hw); 4503 usec_delay(150); 4504 4505 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4506 uint32_t rxdctl; 4507 4508 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 4509 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4510 rxdctl &= 0xffffc000; 4511 /* 4512 * Update thresholds: prefetch threshold to 31, host threshold 4513 * to 1 and make sure the granularity is "descriptors" and not 4514 * "cache lines". 4515 */ 4516 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 4517 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 4518 } 4519 4520 /* Momentarily enable the RX rings for the changes to take effect */ 4521 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); 4522 E1000_WRITE_FLUSH(hw); 4523 usec_delay(150); 4524 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4525 } 4526 4527 /* 4528 * Remove all descriptors from the descriptor rings. 4529 * 4530 * In i219, the descriptor rings must be emptied before resetting the HW 4531 * or before changing the device state to D3 during runtime (runtime PM). 4532 * 4533 * Failure to do this will cause the HW to enter a unit hang state which 4534 * can only be released by PCI reset on the device. 4535 */ 4536 static void 4537 emx_flush_txrx_ring(struct emx_softc *sc) 4538 { 4539 struct e1000_hw *hw = &sc->hw; 4540 device_t dev = sc->dev; 4541 uint16_t hang_state; 4542 uint32_t fext_nvm11, tdlen; 4543 int i; 4544 4545 /* 4546 * First, disable MULR fix in FEXTNVM11. 4547 */ 4548 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 4549 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 4550 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 4551 4552 /* 4553 * Do nothing if we're not in faulty state, or if the queue is 4554 * empty. 4555 */ 4556 tdlen = 0; 4557 for (i = 0; i < sc->tx_ring_inuse; ++i) 4558 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i)); 4559 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4560 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen) 4561 emx_flush_tx_ring(sc); 4562 4563 /* 4564 * Recheck, maybe the fault is caused by the RX ring. 4565 */ 4566 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4567 if (hang_state & EMX_FLUSH_DESC_REQUIRED) 4568 emx_flush_rx_ring(sc); 4569 } 4570