1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 EMX_DEVICE(PCH_SPT_I219_LM3), 179 EMX_DEVICE(PCH_SPT_I219_LM4), 180 EMX_DEVICE(PCH_SPT_I219_V4), 181 EMX_DEVICE(PCH_SPT_I219_LM5), 182 EMX_DEVICE(PCH_SPT_I219_V5), 183 184 /* required last entry */ 185 EMX_DEVICE_NULL 186 }; 187 188 static int emx_probe(device_t); 189 static int emx_attach(device_t); 190 static int emx_detach(device_t); 191 static int emx_shutdown(device_t); 192 static int emx_suspend(device_t); 193 static int emx_resume(device_t); 194 195 static void emx_init(void *); 196 static void emx_stop(struct emx_softc *); 197 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 198 static void emx_start(struct ifnet *, struct ifaltq_subque *); 199 #ifdef IFPOLL_ENABLE 200 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 201 static void emx_npoll_status(struct ifnet *); 202 static void emx_npoll_tx(struct ifnet *, void *, int); 203 static void emx_npoll_rx(struct ifnet *, void *, int); 204 #endif 205 static void emx_watchdog(struct ifaltq_subque *); 206 static void emx_media_status(struct ifnet *, struct ifmediareq *); 207 static int emx_media_change(struct ifnet *); 208 static void emx_timer(void *); 209 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 210 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 211 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 212 #ifdef INVARIANTS 213 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 214 boolean_t); 215 #endif 216 217 static void emx_intr(void *); 218 static void emx_intr_mask(void *); 219 static void emx_intr_body(struct emx_softc *, boolean_t); 220 static void emx_rxeof(struct emx_rxdata *, int); 221 static void emx_txeof(struct emx_txdata *); 222 static void emx_tx_collect(struct emx_txdata *, boolean_t); 223 static void emx_txgc_timer(void *); 224 static void emx_tx_purge(struct emx_softc *); 225 static void emx_enable_intr(struct emx_softc *); 226 static void emx_disable_intr(struct emx_softc *); 227 228 static int emx_dma_alloc(struct emx_softc *); 229 static void emx_dma_free(struct emx_softc *); 230 static void emx_init_tx_ring(struct emx_txdata *); 231 static int emx_init_rx_ring(struct emx_rxdata *); 232 static void emx_free_tx_ring(struct emx_txdata *); 233 static void emx_free_rx_ring(struct emx_rxdata *); 234 static int emx_create_tx_ring(struct emx_txdata *); 235 static int emx_create_rx_ring(struct emx_rxdata *); 236 static void emx_destroy_tx_ring(struct emx_txdata *, int); 237 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 238 static int emx_newbuf(struct emx_rxdata *, int, int); 239 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 240 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 241 uint32_t *, uint32_t *); 242 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 243 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 244 uint32_t *, uint32_t *); 245 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 246 247 static int emx_is_valid_eaddr(const uint8_t *); 248 static int emx_reset(struct emx_softc *); 249 static void emx_setup_ifp(struct emx_softc *); 250 static void emx_init_tx_unit(struct emx_softc *); 251 static void emx_init_rx_unit(struct emx_softc *); 252 static void emx_update_stats(struct emx_softc *); 253 static void emx_set_promisc(struct emx_softc *); 254 static void emx_disable_promisc(struct emx_softc *); 255 static void emx_set_multi(struct emx_softc *); 256 static void emx_update_link_status(struct emx_softc *); 257 static void emx_smartspeed(struct emx_softc *); 258 static void emx_set_itr(struct emx_softc *, uint32_t); 259 static void emx_disable_aspm(struct emx_softc *); 260 261 static void emx_print_debug_info(struct emx_softc *); 262 static void emx_print_nvm_info(struct emx_softc *); 263 static void emx_print_hw_stats(struct emx_softc *); 264 265 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 266 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 267 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 268 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 269 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 270 static void emx_add_sysctl(struct emx_softc *); 271 272 static void emx_serialize_skipmain(struct emx_softc *); 273 static void emx_deserialize_skipmain(struct emx_softc *); 274 275 /* Management and WOL Support */ 276 static void emx_get_mgmt(struct emx_softc *); 277 static void emx_rel_mgmt(struct emx_softc *); 278 static void emx_get_hw_control(struct emx_softc *); 279 static void emx_rel_hw_control(struct emx_softc *); 280 static void emx_enable_wol(device_t); 281 282 static device_method_t emx_methods[] = { 283 /* Device interface */ 284 DEVMETHOD(device_probe, emx_probe), 285 DEVMETHOD(device_attach, emx_attach), 286 DEVMETHOD(device_detach, emx_detach), 287 DEVMETHOD(device_shutdown, emx_shutdown), 288 DEVMETHOD(device_suspend, emx_suspend), 289 DEVMETHOD(device_resume, emx_resume), 290 DEVMETHOD_END 291 }; 292 293 static driver_t emx_driver = { 294 "emx", 295 emx_methods, 296 sizeof(struct emx_softc), 297 }; 298 299 static devclass_t emx_devclass; 300 301 DECLARE_DUMMY_MODULE(if_emx); 302 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 303 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 304 305 /* 306 * Tunables 307 */ 308 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 309 static int emx_rxd = EMX_DEFAULT_RXD; 310 static int emx_txd = EMX_DEFAULT_TXD; 311 static int emx_smart_pwr_down = 0; 312 static int emx_rxr = 0; 313 static int emx_txr = 1; 314 315 /* Controls whether promiscuous also shows bad packets */ 316 static int emx_debug_sbp = 0; 317 318 static int emx_82573_workaround = 1; 319 static int emx_msi_enable = 1; 320 321 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 322 323 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 324 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 325 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 326 TUNABLE_INT("hw.emx.txd", &emx_txd); 327 TUNABLE_INT("hw.emx.txr", &emx_txr); 328 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 329 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 330 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 331 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 332 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 333 334 /* Global used in WOL setup with multiport cards */ 335 static int emx_global_quad_port_a = 0; 336 337 /* Set this to one to display debug statistics */ 338 static int emx_display_debug_stats = 0; 339 340 #if !defined(KTR_IF_EMX) 341 #define KTR_IF_EMX KTR_ALL 342 #endif 343 KTR_INFO_MASTER(if_emx); 344 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 345 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 346 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 347 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 348 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 349 #define logif(name) KTR_LOG(if_emx_ ## name) 350 351 static __inline void 352 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 353 { 354 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 355 /* DD bit must be cleared */ 356 rxd->rxd_staterr = 0; 357 } 358 359 static __inline void 360 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer) 361 { 362 363 KKASSERT(tx_buffer->m_head != NULL); 364 KKASSERT(tdata->tx_nmbuf > 0); 365 tdata->tx_nmbuf--; 366 367 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 368 m_freem(tx_buffer->m_head); 369 tx_buffer->m_head = NULL; 370 } 371 372 static __inline void 373 emx_tx_intr(struct emx_txdata *tdata) 374 { 375 376 emx_txeof(tdata); 377 if (!ifsq_is_empty(tdata->ifsq)) 378 ifsq_devstart(tdata->ifsq); 379 } 380 381 static __inline void 382 emx_try_txgc(struct emx_txdata *tdata, int16_t dec) 383 { 384 385 if (tdata->tx_running > 0) { 386 tdata->tx_running -= dec; 387 if (tdata->tx_running <= 0 && tdata->tx_nmbuf && 388 tdata->num_tx_desc_avail < tdata->num_tx_desc && 389 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs > 390 tdata->num_tx_desc) 391 emx_tx_collect(tdata, TRUE); 392 } 393 } 394 395 static void 396 emx_txgc_timer(void *xtdata) 397 { 398 struct emx_txdata *tdata = xtdata; 399 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 400 401 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 402 (IFF_RUNNING | IFF_UP)) 403 return; 404 405 if (!lwkt_serialize_try(&tdata->tx_serialize)) 406 goto done; 407 408 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 409 (IFF_RUNNING | IFF_UP)) { 410 lwkt_serialize_exit(&tdata->tx_serialize); 411 return; 412 } 413 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC); 414 415 lwkt_serialize_exit(&tdata->tx_serialize); 416 done: 417 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata); 418 } 419 420 static __inline void 421 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 422 { 423 /* Ignore Checksum bit is set */ 424 if (staterr & E1000_RXD_STAT_IXSM) 425 return; 426 427 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 428 E1000_RXD_STAT_IPCS) 429 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 430 431 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 432 E1000_RXD_STAT_TCPCS) { 433 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 434 CSUM_PSEUDO_HDR | 435 CSUM_FRAG_NOT_CHECKED; 436 mp->m_pkthdr.csum_data = htons(0xffff); 437 } 438 } 439 440 static __inline struct pktinfo * 441 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 442 uint32_t mrq, uint32_t hash, uint32_t staterr) 443 { 444 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 445 case EMX_RXDMRQ_IPV4_TCP: 446 pi->pi_netisr = NETISR_IP; 447 pi->pi_flags = 0; 448 pi->pi_l3proto = IPPROTO_TCP; 449 break; 450 451 case EMX_RXDMRQ_IPV6_TCP: 452 pi->pi_netisr = NETISR_IPV6; 453 pi->pi_flags = 0; 454 pi->pi_l3proto = IPPROTO_TCP; 455 break; 456 457 case EMX_RXDMRQ_IPV4: 458 if (staterr & E1000_RXD_STAT_IXSM) 459 return NULL; 460 461 if ((staterr & 462 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 463 E1000_RXD_STAT_TCPCS) { 464 pi->pi_netisr = NETISR_IP; 465 pi->pi_flags = 0; 466 pi->pi_l3proto = IPPROTO_UDP; 467 break; 468 } 469 /* FALL THROUGH */ 470 default: 471 return NULL; 472 } 473 474 m_sethash(m, toeplitz_hash(hash)); 475 return pi; 476 } 477 478 static int 479 emx_probe(device_t dev) 480 { 481 const struct emx_device *d; 482 uint16_t vid, did; 483 484 vid = pci_get_vendor(dev); 485 did = pci_get_device(dev); 486 487 for (d = emx_devices; d->desc != NULL; ++d) { 488 if (vid == d->vid && did == d->did) { 489 device_set_desc(dev, d->desc); 490 device_set_async_attach(dev, TRUE); 491 return 0; 492 } 493 } 494 return ENXIO; 495 } 496 497 static int 498 emx_attach(device_t dev) 499 { 500 struct emx_softc *sc = device_get_softc(dev); 501 int error = 0, i, throttle, msi_enable; 502 int tx_ring_max, ring_cnt; 503 u_int intr_flags; 504 uint16_t eeprom_data, device_id, apme_mask; 505 driver_intr_t *intr_func; 506 char flowctrl[IFM_ETH_FC_STRLEN]; 507 508 /* 509 * Setup RX rings 510 */ 511 for (i = 0; i < EMX_NRX_RING; ++i) { 512 sc->rx_data[i].sc = sc; 513 sc->rx_data[i].idx = i; 514 } 515 516 /* 517 * Setup TX ring 518 */ 519 for (i = 0; i < EMX_NTX_RING; ++i) { 520 sc->tx_data[i].sc = sc; 521 sc->tx_data[i].idx = i; 522 callout_init_mp(&sc->tx_data[i].tx_gc_timer); 523 } 524 525 /* 526 * Initialize serializers 527 */ 528 lwkt_serialize_init(&sc->main_serialize); 529 for (i = 0; i < EMX_NTX_RING; ++i) 530 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 531 for (i = 0; i < EMX_NRX_RING; ++i) 532 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 533 534 /* 535 * Initialize serializer array 536 */ 537 i = 0; 538 539 KKASSERT(i < EMX_NSERIALIZE); 540 sc->serializes[i++] = &sc->main_serialize; 541 542 KKASSERT(i < EMX_NSERIALIZE); 543 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 544 KKASSERT(i < EMX_NSERIALIZE); 545 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 546 547 KKASSERT(i < EMX_NSERIALIZE); 548 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 549 KKASSERT(i < EMX_NSERIALIZE); 550 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 551 552 KKASSERT(i == EMX_NSERIALIZE); 553 554 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 555 emx_media_change, emx_media_status); 556 callout_init_mp(&sc->timer); 557 558 sc->dev = sc->osdep.dev = dev; 559 560 /* 561 * Determine hardware and mac type 562 */ 563 sc->hw.vendor_id = pci_get_vendor(dev); 564 sc->hw.device_id = pci_get_device(dev); 565 sc->hw.revision_id = pci_get_revid(dev); 566 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 567 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 568 569 if (e1000_set_mac_type(&sc->hw)) 570 return ENXIO; 571 572 /* Enable bus mastering */ 573 pci_enable_busmaster(dev); 574 575 /* 576 * Allocate IO memory 577 */ 578 sc->memory_rid = EMX_BAR_MEM; 579 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 580 &sc->memory_rid, RF_ACTIVE); 581 if (sc->memory == NULL) { 582 device_printf(dev, "Unable to allocate bus resource: memory\n"); 583 error = ENXIO; 584 goto fail; 585 } 586 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 587 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 588 589 /* XXX This is quite goofy, it is not actually used */ 590 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 591 592 /* 593 * Don't enable MSI-X on 82574, see: 594 * 82574 specification update errata #15 595 * 596 * Don't enable MSI on 82571/82572, see: 597 * 82571/82572 specification update errata #63 598 */ 599 msi_enable = emx_msi_enable; 600 if (msi_enable && 601 (sc->hw.mac.type == e1000_82571 || 602 sc->hw.mac.type == e1000_82572)) 603 msi_enable = 0; 604 again: 605 /* 606 * Allocate interrupt 607 */ 608 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 609 &sc->intr_rid, &intr_flags); 610 611 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 612 int unshared; 613 614 unshared = device_getenv_int(dev, "irq.unshared", 0); 615 if (!unshared) { 616 sc->flags |= EMX_FLAG_SHARED_INTR; 617 if (bootverbose) 618 device_printf(dev, "IRQ shared\n"); 619 } else { 620 intr_flags &= ~RF_SHAREABLE; 621 if (bootverbose) 622 device_printf(dev, "IRQ unshared\n"); 623 } 624 } 625 626 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 627 intr_flags); 628 if (sc->intr_res == NULL) { 629 device_printf(dev, "Unable to allocate bus resource: %s\n", 630 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 631 if (!msi_enable) { 632 /* Retry with MSI. */ 633 msi_enable = 1; 634 sc->flags &= ~EMX_FLAG_SHARED_INTR; 635 goto again; 636 } 637 error = ENXIO; 638 goto fail; 639 } 640 641 /* Save PCI command register for Shared Code */ 642 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 643 sc->hw.back = &sc->osdep; 644 645 /* 646 * For I217/I218, we need to map the flash memory and this 647 * must happen after the MAC is identified. 648 */ 649 if (sc->hw.mac.type == e1000_pch_lpt) { 650 sc->flash_rid = EMX_BAR_FLASH; 651 652 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 653 &sc->flash_rid, RF_ACTIVE); 654 if (sc->flash == NULL) { 655 device_printf(dev, "Mapping of Flash failed\n"); 656 error = ENXIO; 657 goto fail; 658 } 659 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 660 sc->osdep.flash_bus_space_handle = 661 rman_get_bushandle(sc->flash); 662 663 /* 664 * This is used in the shared code 665 * XXX this goof is actually not used. 666 */ 667 sc->hw.flash_address = (uint8_t *)sc->flash; 668 } 669 670 /* Do Shared Code initialization */ 671 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 672 device_printf(dev, "Setup of Shared code failed\n"); 673 error = ENXIO; 674 goto fail; 675 } 676 e1000_get_bus_info(&sc->hw); 677 678 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 679 sc->hw.phy.autoneg_wait_to_complete = FALSE; 680 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 681 682 /* 683 * Interrupt throttle rate 684 */ 685 throttle = device_getenv_int(dev, "int_throttle_ceil", 686 emx_int_throttle_ceil); 687 if (throttle == 0) { 688 sc->int_throttle_ceil = 0; 689 } else { 690 if (throttle < 0) 691 throttle = EMX_DEFAULT_ITR; 692 693 /* Recalculate the tunable value to get the exact frequency. */ 694 throttle = 1000000000 / 256 / throttle; 695 696 /* Upper 16bits of ITR is reserved and should be zero */ 697 if (throttle & 0xffff0000) 698 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 699 700 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 701 } 702 703 e1000_init_script_state_82541(&sc->hw, TRUE); 704 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 705 706 /* Copper options */ 707 if (sc->hw.phy.media_type == e1000_media_type_copper) { 708 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 709 sc->hw.phy.disable_polarity_correction = FALSE; 710 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 711 } 712 713 /* Set the frame limits assuming standard ethernet sized frames. */ 714 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 715 716 /* This controls when hardware reports transmit completion status. */ 717 sc->hw.mac.report_tx_early = 1; 718 719 /* 720 * Calculate # of RX/TX rings 721 */ 722 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 723 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 724 725 tx_ring_max = 1; 726 if (sc->hw.mac.type == e1000_82571 || 727 sc->hw.mac.type == e1000_82572 || 728 sc->hw.mac.type == e1000_80003es2lan || 729 sc->hw.mac.type == e1000_pch_lpt || 730 sc->hw.mac.type == e1000_pch_spt || 731 sc->hw.mac.type == e1000_82574) 732 tx_ring_max = EMX_NTX_RING; 733 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 734 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 735 736 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 737 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 738 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 739 740 /* Allocate RX/TX rings' busdma(9) stuffs */ 741 error = emx_dma_alloc(sc); 742 if (error) 743 goto fail; 744 745 /* Allocate multicast array memory. */ 746 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 747 M_DEVBUF, M_WAITOK); 748 749 /* Indicate SOL/IDER usage */ 750 if (e1000_check_reset_block(&sc->hw)) { 751 device_printf(dev, 752 "PHY reset is blocked due to SOL/IDER session.\n"); 753 } 754 755 /* Disable EEE on I217/I218 */ 756 sc->hw.dev_spec.ich8lan.eee_disable = 1; 757 758 /* 759 * Start from a known state, this is important in reading the 760 * nvm and mac from that. 761 */ 762 e1000_reset_hw(&sc->hw); 763 764 /* Make sure we have a good EEPROM before we read from it */ 765 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 766 /* 767 * Some PCI-E parts fail the first check due to 768 * the link being in sleep state, call it again, 769 * if it fails a second time its a real issue. 770 */ 771 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 772 device_printf(dev, 773 "The EEPROM Checksum Is Not Valid\n"); 774 error = EIO; 775 goto fail; 776 } 777 } 778 779 /* Copy the permanent MAC address out of the EEPROM */ 780 if (e1000_read_mac_addr(&sc->hw) < 0) { 781 device_printf(dev, "EEPROM read error while reading MAC" 782 " address\n"); 783 error = EIO; 784 goto fail; 785 } 786 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 787 device_printf(dev, "Invalid MAC address\n"); 788 error = EIO; 789 goto fail; 790 } 791 792 /* Disable ULP support */ 793 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 794 795 /* Determine if we have to control management hardware */ 796 if (e1000_enable_mng_pass_thru(&sc->hw)) 797 sc->flags |= EMX_FLAG_HAS_MGMT; 798 799 /* 800 * Setup Wake-on-Lan 801 */ 802 apme_mask = EMX_EEPROM_APME; 803 eeprom_data = 0; 804 switch (sc->hw.mac.type) { 805 case e1000_82573: 806 sc->flags |= EMX_FLAG_HAS_AMT; 807 /* FALL THROUGH */ 808 809 case e1000_82571: 810 case e1000_82572: 811 case e1000_80003es2lan: 812 if (sc->hw.bus.func == 1) { 813 e1000_read_nvm(&sc->hw, 814 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 815 } else { 816 e1000_read_nvm(&sc->hw, 817 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 818 } 819 break; 820 821 default: 822 e1000_read_nvm(&sc->hw, 823 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 824 break; 825 } 826 if (eeprom_data & apme_mask) 827 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 828 829 /* 830 * We have the eeprom settings, now apply the special cases 831 * where the eeprom may be wrong or the board won't support 832 * wake on lan on a particular port 833 */ 834 device_id = pci_get_device(dev); 835 switch (device_id) { 836 case E1000_DEV_ID_82571EB_FIBER: 837 /* 838 * Wake events only supported on port A for dual fiber 839 * regardless of eeprom setting 840 */ 841 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 842 E1000_STATUS_FUNC_1) 843 sc->wol = 0; 844 break; 845 846 case E1000_DEV_ID_82571EB_QUAD_COPPER: 847 case E1000_DEV_ID_82571EB_QUAD_FIBER: 848 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 849 /* if quad port sc, disable WoL on all but port A */ 850 if (emx_global_quad_port_a != 0) 851 sc->wol = 0; 852 /* Reset for multiple quad port adapters */ 853 if (++emx_global_quad_port_a == 4) 854 emx_global_quad_port_a = 0; 855 break; 856 } 857 858 /* XXX disable wol */ 859 sc->wol = 0; 860 861 /* Initialized #of TX rings to use. */ 862 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 863 864 /* Setup flow control. */ 865 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 866 emx_flowctrl); 867 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 868 869 /* Setup OS specific network interface */ 870 emx_setup_ifp(sc); 871 872 /* Add sysctl tree, must after em_setup_ifp() */ 873 emx_add_sysctl(sc); 874 875 /* Reset the hardware */ 876 error = emx_reset(sc); 877 if (error) { 878 /* 879 * Some 82573 parts fail the first reset, call it again, 880 * if it fails a second time its a real issue. 881 */ 882 error = emx_reset(sc); 883 if (error) { 884 device_printf(dev, "Unable to reset the hardware\n"); 885 ether_ifdetach(&sc->arpcom.ac_if); 886 goto fail; 887 } 888 } 889 890 /* Initialize statistics */ 891 emx_update_stats(sc); 892 893 sc->hw.mac.get_link_status = 1; 894 emx_update_link_status(sc); 895 896 /* Non-AMT based hardware can now take control from firmware */ 897 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 898 EMX_FLAG_HAS_MGMT) 899 emx_get_hw_control(sc); 900 901 /* 902 * Missing Interrupt Following ICR read: 903 * 904 * 82571/82572 specification update errata #76 905 * 82573 specification update errata #31 906 * 82574 specification update errata #12 907 */ 908 intr_func = emx_intr; 909 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 910 (sc->hw.mac.type == e1000_82571 || 911 sc->hw.mac.type == e1000_82572 || 912 sc->hw.mac.type == e1000_82573 || 913 sc->hw.mac.type == e1000_82574)) 914 intr_func = emx_intr_mask; 915 916 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 917 &sc->intr_tag, &sc->main_serialize); 918 if (error) { 919 device_printf(dev, "Failed to register interrupt handler"); 920 ether_ifdetach(&sc->arpcom.ac_if); 921 goto fail; 922 } 923 return (0); 924 fail: 925 emx_detach(dev); 926 return (error); 927 } 928 929 static int 930 emx_detach(device_t dev) 931 { 932 struct emx_softc *sc = device_get_softc(dev); 933 934 if (device_is_attached(dev)) { 935 struct ifnet *ifp = &sc->arpcom.ac_if; 936 937 ifnet_serialize_all(ifp); 938 939 emx_stop(sc); 940 941 e1000_phy_hw_reset(&sc->hw); 942 943 emx_rel_mgmt(sc); 944 emx_rel_hw_control(sc); 945 946 if (sc->wol) { 947 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 948 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 949 emx_enable_wol(dev); 950 } 951 952 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 953 954 ifnet_deserialize_all(ifp); 955 956 ether_ifdetach(ifp); 957 } else if (sc->memory != NULL) { 958 emx_rel_hw_control(sc); 959 } 960 961 ifmedia_removeall(&sc->media); 962 bus_generic_detach(dev); 963 964 if (sc->intr_res != NULL) { 965 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 966 sc->intr_res); 967 } 968 969 if (sc->intr_type == PCI_INTR_TYPE_MSI) 970 pci_release_msi(dev); 971 972 if (sc->memory != NULL) { 973 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 974 sc->memory); 975 } 976 977 if (sc->flash != NULL) { 978 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 979 sc->flash); 980 } 981 982 emx_dma_free(sc); 983 984 if (sc->mta != NULL) 985 kfree(sc->mta, M_DEVBUF); 986 987 if (sc->rx_rmap != NULL) 988 if_ringmap_free(sc->rx_rmap); 989 if (sc->tx_rmap != NULL) 990 if_ringmap_free(sc->tx_rmap); 991 992 return (0); 993 } 994 995 static int 996 emx_shutdown(device_t dev) 997 { 998 return emx_suspend(dev); 999 } 1000 1001 static int 1002 emx_suspend(device_t dev) 1003 { 1004 struct emx_softc *sc = device_get_softc(dev); 1005 struct ifnet *ifp = &sc->arpcom.ac_if; 1006 1007 ifnet_serialize_all(ifp); 1008 1009 emx_stop(sc); 1010 1011 emx_rel_mgmt(sc); 1012 emx_rel_hw_control(sc); 1013 1014 if (sc->wol) { 1015 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 1016 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 1017 emx_enable_wol(dev); 1018 } 1019 1020 ifnet_deserialize_all(ifp); 1021 1022 return bus_generic_suspend(dev); 1023 } 1024 1025 static int 1026 emx_resume(device_t dev) 1027 { 1028 struct emx_softc *sc = device_get_softc(dev); 1029 struct ifnet *ifp = &sc->arpcom.ac_if; 1030 int i; 1031 1032 ifnet_serialize_all(ifp); 1033 1034 emx_init(sc); 1035 emx_get_mgmt(sc); 1036 for (i = 0; i < sc->tx_ring_inuse; ++i) 1037 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1038 1039 ifnet_deserialize_all(ifp); 1040 1041 return bus_generic_resume(dev); 1042 } 1043 1044 static void 1045 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1046 { 1047 struct emx_softc *sc = ifp->if_softc; 1048 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1049 struct mbuf *m_head; 1050 int idx = -1, nsegs = 0; 1051 1052 KKASSERT(tdata->ifsq == ifsq); 1053 ASSERT_SERIALIZED(&tdata->tx_serialize); 1054 1055 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1056 return; 1057 1058 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1059 ifsq_purge(ifsq); 1060 return; 1061 } 1062 1063 while (!ifsq_is_empty(ifsq)) { 1064 /* Now do we at least have a minimal? */ 1065 if (EMX_IS_OACTIVE(tdata)) { 1066 emx_tx_collect(tdata, FALSE); 1067 if (EMX_IS_OACTIVE(tdata)) { 1068 ifsq_set_oactive(ifsq); 1069 break; 1070 } 1071 } 1072 1073 logif(pkt_txqueue); 1074 m_head = ifsq_dequeue(ifsq); 1075 if (m_head == NULL) 1076 break; 1077 1078 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1079 IFNET_STAT_INC(ifp, oerrors, 1); 1080 emx_tx_collect(tdata, FALSE); 1081 continue; 1082 } 1083 1084 /* 1085 * TX interrupt are aggressively aggregated, so increasing 1086 * opackets at TX interrupt time will make the opackets 1087 * statistics vastly inaccurate; we do the opackets increment 1088 * now. 1089 */ 1090 IFNET_STAT_INC(ifp, opackets, 1); 1091 1092 if (nsegs >= tdata->tx_wreg_nsegs) { 1093 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1094 nsegs = 0; 1095 idx = -1; 1096 } 1097 1098 /* Send a copy of the frame to the BPF listener */ 1099 ETHER_BPF_MTAP(ifp, m_head); 1100 1101 /* Set timeout in case hardware has problems transmitting. */ 1102 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1103 } 1104 if (idx >= 0) 1105 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1106 tdata->tx_running = EMX_TX_RUNNING; 1107 } 1108 1109 static int 1110 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1111 { 1112 struct emx_softc *sc = ifp->if_softc; 1113 struct ifreq *ifr = (struct ifreq *)data; 1114 uint16_t eeprom_data = 0; 1115 int max_frame_size, mask, reinit; 1116 int error = 0; 1117 1118 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1119 1120 switch (command) { 1121 case SIOCSIFMTU: 1122 switch (sc->hw.mac.type) { 1123 case e1000_82573: 1124 /* 1125 * 82573 only supports jumbo frames 1126 * if ASPM is disabled. 1127 */ 1128 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1129 &eeprom_data); 1130 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1131 max_frame_size = ETHER_MAX_LEN; 1132 break; 1133 } 1134 /* FALL THROUGH */ 1135 1136 /* Limit Jumbo Frame size */ 1137 case e1000_82571: 1138 case e1000_82572: 1139 case e1000_82574: 1140 case e1000_pch_lpt: 1141 case e1000_pch_spt: 1142 case e1000_80003es2lan: 1143 max_frame_size = 9234; 1144 break; 1145 1146 default: 1147 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1148 break; 1149 } 1150 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1151 ETHER_CRC_LEN) { 1152 error = EINVAL; 1153 break; 1154 } 1155 1156 ifp->if_mtu = ifr->ifr_mtu; 1157 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1158 ETHER_CRC_LEN; 1159 1160 if (ifp->if_flags & IFF_RUNNING) 1161 emx_init(sc); 1162 break; 1163 1164 case SIOCSIFFLAGS: 1165 if (ifp->if_flags & IFF_UP) { 1166 if ((ifp->if_flags & IFF_RUNNING)) { 1167 if ((ifp->if_flags ^ sc->if_flags) & 1168 (IFF_PROMISC | IFF_ALLMULTI)) { 1169 emx_disable_promisc(sc); 1170 emx_set_promisc(sc); 1171 } 1172 } else { 1173 emx_init(sc); 1174 } 1175 } else if (ifp->if_flags & IFF_RUNNING) { 1176 emx_stop(sc); 1177 } 1178 sc->if_flags = ifp->if_flags; 1179 break; 1180 1181 case SIOCADDMULTI: 1182 case SIOCDELMULTI: 1183 if (ifp->if_flags & IFF_RUNNING) { 1184 emx_disable_intr(sc); 1185 emx_set_multi(sc); 1186 #ifdef IFPOLL_ENABLE 1187 if (!(ifp->if_flags & IFF_NPOLLING)) 1188 #endif 1189 emx_enable_intr(sc); 1190 } 1191 break; 1192 1193 case SIOCSIFMEDIA: 1194 /* Check SOL/IDER usage */ 1195 if (e1000_check_reset_block(&sc->hw)) { 1196 device_printf(sc->dev, "Media change is" 1197 " blocked due to SOL/IDER session.\n"); 1198 break; 1199 } 1200 /* FALL THROUGH */ 1201 1202 case SIOCGIFMEDIA: 1203 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1204 break; 1205 1206 case SIOCSIFCAP: 1207 reinit = 0; 1208 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1209 if (mask & IFCAP_RXCSUM) { 1210 ifp->if_capenable ^= IFCAP_RXCSUM; 1211 reinit = 1; 1212 } 1213 if (mask & IFCAP_VLAN_HWTAGGING) { 1214 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1215 reinit = 1; 1216 } 1217 if (mask & IFCAP_TXCSUM) { 1218 ifp->if_capenable ^= IFCAP_TXCSUM; 1219 if (ifp->if_capenable & IFCAP_TXCSUM) 1220 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1221 else 1222 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1223 } 1224 if (mask & IFCAP_TSO) { 1225 ifp->if_capenable ^= IFCAP_TSO; 1226 if (ifp->if_capenable & IFCAP_TSO) 1227 ifp->if_hwassist |= CSUM_TSO; 1228 else 1229 ifp->if_hwassist &= ~CSUM_TSO; 1230 } 1231 if (mask & IFCAP_RSS) 1232 ifp->if_capenable ^= IFCAP_RSS; 1233 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1234 emx_init(sc); 1235 break; 1236 1237 default: 1238 error = ether_ioctl(ifp, command, data); 1239 break; 1240 } 1241 return (error); 1242 } 1243 1244 static void 1245 emx_watchdog(struct ifaltq_subque *ifsq) 1246 { 1247 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1248 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1249 struct emx_softc *sc = ifp->if_softc; 1250 int i; 1251 1252 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1253 1254 /* 1255 * The timer is set to 5 every time start queues a packet. 1256 * Then txeof keeps resetting it as long as it cleans at 1257 * least one descriptor. 1258 * Finally, anytime all descriptors are clean the timer is 1259 * set to 0. 1260 */ 1261 1262 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1263 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1264 /* 1265 * If we reach here, all TX jobs are completed and 1266 * the TX engine should have been idled for some time. 1267 * We don't need to call ifsq_devstart_sched() here. 1268 */ 1269 ifsq_clr_oactive(ifsq); 1270 tdata->tx_watchdog.wd_timer = 0; 1271 return; 1272 } 1273 1274 /* 1275 * If we are in this routine because of pause frames, then 1276 * don't reset the hardware. 1277 */ 1278 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1279 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1280 return; 1281 } 1282 1283 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1284 1285 IFNET_STAT_INC(ifp, oerrors, 1); 1286 1287 emx_init(sc); 1288 for (i = 0; i < sc->tx_ring_inuse; ++i) 1289 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1290 } 1291 1292 static void 1293 emx_init(void *xsc) 1294 { 1295 struct emx_softc *sc = xsc; 1296 struct ifnet *ifp = &sc->arpcom.ac_if; 1297 device_t dev = sc->dev; 1298 boolean_t polling; 1299 int i; 1300 1301 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1302 1303 emx_stop(sc); 1304 1305 /* Get the latest mac address, User can use a LAA */ 1306 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1307 1308 /* Put the address into the Receive Address Array */ 1309 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1310 1311 /* 1312 * With the 82571 sc, RAR[0] may be overwritten 1313 * when the other port is reset, we make a duplicate 1314 * in RAR[14] for that eventuality, this assures 1315 * the interface continues to function. 1316 */ 1317 if (sc->hw.mac.type == e1000_82571) { 1318 e1000_set_laa_state_82571(&sc->hw, TRUE); 1319 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1320 E1000_RAR_ENTRIES - 1); 1321 } 1322 1323 /* Initialize the hardware */ 1324 if (emx_reset(sc)) { 1325 device_printf(dev, "Unable to reset the hardware\n"); 1326 /* XXX emx_stop()? */ 1327 return; 1328 } 1329 emx_update_link_status(sc); 1330 1331 /* Setup VLAN support, basic and offload if available */ 1332 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1333 1334 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1335 uint32_t ctrl; 1336 1337 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1338 ctrl |= E1000_CTRL_VME; 1339 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1340 } 1341 1342 /* Configure for OS presence */ 1343 emx_get_mgmt(sc); 1344 1345 polling = FALSE; 1346 #ifdef IFPOLL_ENABLE 1347 if (ifp->if_flags & IFF_NPOLLING) 1348 polling = TRUE; 1349 #endif 1350 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1351 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1352 1353 /* Prepare transmit descriptors and buffers */ 1354 for (i = 0; i < sc->tx_ring_inuse; ++i) 1355 emx_init_tx_ring(&sc->tx_data[i]); 1356 emx_init_tx_unit(sc); 1357 1358 /* Setup Multicast table */ 1359 emx_set_multi(sc); 1360 1361 /* Prepare receive descriptors and buffers */ 1362 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1363 if (emx_init_rx_ring(&sc->rx_data[i])) { 1364 device_printf(dev, 1365 "Could not setup receive structures\n"); 1366 emx_stop(sc); 1367 return; 1368 } 1369 } 1370 emx_init_rx_unit(sc); 1371 1372 /* Don't lose promiscuous settings */ 1373 emx_set_promisc(sc); 1374 1375 /* Reset hardware counters */ 1376 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1377 1378 /* MSI/X configuration for 82574 */ 1379 if (sc->hw.mac.type == e1000_82574) { 1380 int tmp; 1381 1382 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1383 tmp |= E1000_CTRL_EXT_PBA_CLR; 1384 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1385 /* 1386 * XXX MSIX 1387 * Set the IVAR - interrupt vector routing. 1388 * Each nibble represents a vector, high bit 1389 * is enable, other 3 bits are the MSIX table 1390 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1391 * Link (other) to 2, hence the magic number. 1392 */ 1393 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1394 } 1395 1396 /* 1397 * Only enable interrupts if we are not polling, make sure 1398 * they are off otherwise. 1399 */ 1400 if (polling) 1401 emx_disable_intr(sc); 1402 else 1403 emx_enable_intr(sc); 1404 1405 /* AMT based hardware can now take control from firmware */ 1406 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1407 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1408 emx_get_hw_control(sc); 1409 1410 ifp->if_flags |= IFF_RUNNING; 1411 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1412 struct emx_txdata *tdata = &sc->tx_data[i]; 1413 1414 ifsq_clr_oactive(tdata->ifsq); 1415 ifsq_watchdog_start(&tdata->tx_watchdog); 1416 if (!polling) { 1417 callout_reset_bycpu(&tdata->tx_gc_timer, 1, 1418 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq)); 1419 } 1420 } 1421 callout_reset(&sc->timer, hz, emx_timer, sc); 1422 } 1423 1424 static void 1425 emx_intr(void *xsc) 1426 { 1427 emx_intr_body(xsc, TRUE); 1428 } 1429 1430 static void 1431 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1432 { 1433 struct ifnet *ifp = &sc->arpcom.ac_if; 1434 uint32_t reg_icr; 1435 1436 logif(intr_beg); 1437 ASSERT_SERIALIZED(&sc->main_serialize); 1438 1439 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1440 1441 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1442 logif(intr_end); 1443 return; 1444 } 1445 1446 /* 1447 * XXX: some laptops trigger several spurious interrupts 1448 * on emx(4) when in the resume cycle. The ICR register 1449 * reports all-ones value in this case. Processing such 1450 * interrupts would lead to a freeze. I don't know why. 1451 */ 1452 if (reg_icr == 0xffffffff) { 1453 logif(intr_end); 1454 return; 1455 } 1456 1457 if (ifp->if_flags & IFF_RUNNING) { 1458 if (reg_icr & 1459 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1460 int i; 1461 1462 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1463 lwkt_serialize_enter( 1464 &sc->rx_data[i].rx_serialize); 1465 emx_rxeof(&sc->rx_data[i], -1); 1466 lwkt_serialize_exit( 1467 &sc->rx_data[i].rx_serialize); 1468 } 1469 } 1470 if (reg_icr & E1000_ICR_TXDW) { 1471 struct emx_txdata *tdata = &sc->tx_data[0]; 1472 1473 lwkt_serialize_enter(&tdata->tx_serialize); 1474 emx_tx_intr(tdata); 1475 lwkt_serialize_exit(&tdata->tx_serialize); 1476 } 1477 } 1478 1479 /* Link status change */ 1480 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1481 emx_serialize_skipmain(sc); 1482 1483 callout_stop(&sc->timer); 1484 sc->hw.mac.get_link_status = 1; 1485 emx_update_link_status(sc); 1486 1487 /* Deal with TX cruft when link lost */ 1488 emx_tx_purge(sc); 1489 1490 callout_reset(&sc->timer, hz, emx_timer, sc); 1491 1492 emx_deserialize_skipmain(sc); 1493 } 1494 1495 if (reg_icr & E1000_ICR_RXO) 1496 sc->rx_overruns++; 1497 1498 logif(intr_end); 1499 } 1500 1501 static void 1502 emx_intr_mask(void *xsc) 1503 { 1504 struct emx_softc *sc = xsc; 1505 1506 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1507 /* 1508 * NOTE: 1509 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1510 * so don't check it. 1511 */ 1512 emx_intr_body(sc, FALSE); 1513 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1514 } 1515 1516 static void 1517 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1518 { 1519 struct emx_softc *sc = ifp->if_softc; 1520 1521 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1522 1523 emx_update_link_status(sc); 1524 1525 ifmr->ifm_status = IFM_AVALID; 1526 ifmr->ifm_active = IFM_ETHER; 1527 1528 if (!sc->link_active) { 1529 if (sc->hw.mac.autoneg) 1530 ifmr->ifm_active |= IFM_NONE; 1531 else 1532 ifmr->ifm_active |= sc->media.ifm_media; 1533 return; 1534 } 1535 1536 ifmr->ifm_status |= IFM_ACTIVE; 1537 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1538 ifmr->ifm_active |= sc->ifm_flowctrl; 1539 1540 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1541 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1542 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1543 } else { 1544 switch (sc->link_speed) { 1545 case 10: 1546 ifmr->ifm_active |= IFM_10_T; 1547 break; 1548 case 100: 1549 ifmr->ifm_active |= IFM_100_TX; 1550 break; 1551 1552 case 1000: 1553 ifmr->ifm_active |= IFM_1000_T; 1554 break; 1555 } 1556 if (sc->link_duplex == FULL_DUPLEX) 1557 ifmr->ifm_active |= IFM_FDX; 1558 else 1559 ifmr->ifm_active |= IFM_HDX; 1560 } 1561 if (ifmr->ifm_active & IFM_FDX) 1562 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1563 } 1564 1565 static int 1566 emx_media_change(struct ifnet *ifp) 1567 { 1568 struct emx_softc *sc = ifp->if_softc; 1569 struct ifmedia *ifm = &sc->media; 1570 1571 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1572 1573 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1574 return (EINVAL); 1575 1576 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1577 case IFM_AUTO: 1578 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1579 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1580 break; 1581 1582 case IFM_1000_SX: 1583 case IFM_1000_T: 1584 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1585 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1586 break; 1587 1588 case IFM_100_TX: 1589 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1590 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1591 } else { 1592 if (IFM_OPTIONS(ifm->ifm_media) & 1593 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1594 if (bootverbose) { 1595 if_printf(ifp, "Flow control is not " 1596 "allowed for half-duplex\n"); 1597 } 1598 return EINVAL; 1599 } 1600 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1601 } 1602 sc->hw.mac.autoneg = FALSE; 1603 sc->hw.phy.autoneg_advertised = 0; 1604 break; 1605 1606 case IFM_10_T: 1607 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1608 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1609 } else { 1610 if (IFM_OPTIONS(ifm->ifm_media) & 1611 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1612 if (bootverbose) { 1613 if_printf(ifp, "Flow control is not " 1614 "allowed for half-duplex\n"); 1615 } 1616 return EINVAL; 1617 } 1618 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1619 } 1620 sc->hw.mac.autoneg = FALSE; 1621 sc->hw.phy.autoneg_advertised = 0; 1622 break; 1623 1624 default: 1625 if (bootverbose) { 1626 if_printf(ifp, "Unsupported media type %d\n", 1627 IFM_SUBTYPE(ifm->ifm_media)); 1628 } 1629 return EINVAL; 1630 } 1631 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1632 1633 if (ifp->if_flags & IFF_RUNNING) 1634 emx_init(sc); 1635 1636 return (0); 1637 } 1638 1639 static int 1640 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1641 int *segs_used, int *idx) 1642 { 1643 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1644 bus_dmamap_t map; 1645 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1646 struct e1000_tx_desc *ctxd = NULL; 1647 struct mbuf *m_head = *m_headp; 1648 uint32_t txd_upper, txd_lower, cmd = 0; 1649 int maxsegs, nsegs, i, j, first, last = 0, error; 1650 1651 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1652 error = emx_tso_pullup(tdata, m_headp); 1653 if (error) 1654 return error; 1655 m_head = *m_headp; 1656 } 1657 1658 txd_upper = txd_lower = 0; 1659 1660 /* 1661 * Capture the first descriptor index, this descriptor 1662 * will have the index of the EOP which is the only one 1663 * that now gets a DONE bit writeback. 1664 */ 1665 first = tdata->next_avail_tx_desc; 1666 tx_buffer = &tdata->tx_buf[first]; 1667 tx_buffer_mapped = tx_buffer; 1668 map = tx_buffer->map; 1669 1670 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1671 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1672 if (maxsegs > EMX_MAX_SCATTER) 1673 maxsegs = EMX_MAX_SCATTER; 1674 1675 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1676 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1677 if (error) { 1678 m_freem(*m_headp); 1679 *m_headp = NULL; 1680 return error; 1681 } 1682 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1683 1684 m_head = *m_headp; 1685 tdata->tx_nsegs += nsegs; 1686 *segs_used += nsegs; 1687 1688 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1689 /* TSO will consume one TX desc */ 1690 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1691 tdata->tx_nsegs += i; 1692 *segs_used += i; 1693 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1694 /* TX csum offloading will consume one TX desc */ 1695 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1696 tdata->tx_nsegs += i; 1697 *segs_used += i; 1698 } 1699 1700 /* Handle VLAN tag */ 1701 if (m_head->m_flags & M_VLANTAG) { 1702 /* Set the vlan id. */ 1703 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1704 /* Tell hardware to add tag */ 1705 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1706 } 1707 1708 i = tdata->next_avail_tx_desc; 1709 1710 /* Set up our transmit descriptors */ 1711 for (j = 0; j < nsegs; j++) { 1712 tx_buffer = &tdata->tx_buf[i]; 1713 ctxd = &tdata->tx_desc_base[i]; 1714 1715 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1716 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1717 txd_lower | segs[j].ds_len); 1718 ctxd->upper.data = htole32(txd_upper); 1719 1720 last = i; 1721 if (++i == tdata->num_tx_desc) 1722 i = 0; 1723 } 1724 1725 tdata->next_avail_tx_desc = i; 1726 1727 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1728 tdata->num_tx_desc_avail -= nsegs; 1729 tdata->tx_nmbuf++; 1730 1731 tx_buffer->m_head = m_head; 1732 tx_buffer_mapped->map = tx_buffer->map; 1733 tx_buffer->map = map; 1734 1735 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1736 tdata->tx_nsegs = 0; 1737 1738 /* 1739 * Report Status (RS) is turned on 1740 * every tx_intr_nsegs descriptors. 1741 */ 1742 cmd = E1000_TXD_CMD_RS; 1743 1744 /* 1745 * Keep track of the descriptor, which will 1746 * be written back by hardware. 1747 */ 1748 tdata->tx_dd[tdata->tx_dd_tail] = last; 1749 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1750 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1751 } 1752 1753 /* 1754 * Last Descriptor of Packet needs End Of Packet (EOP) 1755 */ 1756 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1757 1758 /* 1759 * Defer TDT updating, until enough descriptors are setup 1760 */ 1761 *idx = i; 1762 1763 #ifdef EMX_TSS_DEBUG 1764 tdata->tx_pkts++; 1765 #endif 1766 1767 return (0); 1768 } 1769 1770 static void 1771 emx_set_promisc(struct emx_softc *sc) 1772 { 1773 struct ifnet *ifp = &sc->arpcom.ac_if; 1774 uint32_t reg_rctl; 1775 1776 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1777 1778 if (ifp->if_flags & IFF_PROMISC) { 1779 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1780 /* Turn this on if you want to see bad packets */ 1781 if (emx_debug_sbp) 1782 reg_rctl |= E1000_RCTL_SBP; 1783 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1784 } else if (ifp->if_flags & IFF_ALLMULTI) { 1785 reg_rctl |= E1000_RCTL_MPE; 1786 reg_rctl &= ~E1000_RCTL_UPE; 1787 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1788 } 1789 } 1790 1791 static void 1792 emx_disable_promisc(struct emx_softc *sc) 1793 { 1794 uint32_t reg_rctl; 1795 1796 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1797 1798 reg_rctl &= ~E1000_RCTL_UPE; 1799 reg_rctl &= ~E1000_RCTL_MPE; 1800 reg_rctl &= ~E1000_RCTL_SBP; 1801 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1802 } 1803 1804 static void 1805 emx_set_multi(struct emx_softc *sc) 1806 { 1807 struct ifnet *ifp = &sc->arpcom.ac_if; 1808 struct ifmultiaddr *ifma; 1809 uint32_t reg_rctl = 0; 1810 uint8_t *mta; 1811 int mcnt = 0; 1812 1813 mta = sc->mta; 1814 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1815 1816 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1817 if (ifma->ifma_addr->sa_family != AF_LINK) 1818 continue; 1819 1820 if (mcnt == EMX_MCAST_ADDR_MAX) 1821 break; 1822 1823 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1824 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1825 mcnt++; 1826 } 1827 1828 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1829 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1830 reg_rctl |= E1000_RCTL_MPE; 1831 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1832 } else { 1833 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1834 } 1835 } 1836 1837 /* 1838 * This routine checks for link status and updates statistics. 1839 */ 1840 static void 1841 emx_timer(void *xsc) 1842 { 1843 struct emx_softc *sc = xsc; 1844 struct ifnet *ifp = &sc->arpcom.ac_if; 1845 1846 lwkt_serialize_enter(&sc->main_serialize); 1847 1848 emx_update_link_status(sc); 1849 emx_update_stats(sc); 1850 1851 /* Reset LAA into RAR[0] on 82571 */ 1852 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1853 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1854 1855 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1856 emx_print_hw_stats(sc); 1857 1858 emx_smartspeed(sc); 1859 1860 callout_reset(&sc->timer, hz, emx_timer, sc); 1861 1862 lwkt_serialize_exit(&sc->main_serialize); 1863 } 1864 1865 static void 1866 emx_update_link_status(struct emx_softc *sc) 1867 { 1868 struct e1000_hw *hw = &sc->hw; 1869 struct ifnet *ifp = &sc->arpcom.ac_if; 1870 device_t dev = sc->dev; 1871 uint32_t link_check = 0; 1872 1873 /* Get the cached link value or read phy for real */ 1874 switch (hw->phy.media_type) { 1875 case e1000_media_type_copper: 1876 if (hw->mac.get_link_status) { 1877 /* Do the work to read phy */ 1878 e1000_check_for_link(hw); 1879 link_check = !hw->mac.get_link_status; 1880 if (link_check) /* ESB2 fix */ 1881 e1000_cfg_on_link_up(hw); 1882 } else { 1883 link_check = TRUE; 1884 } 1885 break; 1886 1887 case e1000_media_type_fiber: 1888 e1000_check_for_link(hw); 1889 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1890 break; 1891 1892 case e1000_media_type_internal_serdes: 1893 e1000_check_for_link(hw); 1894 link_check = sc->hw.mac.serdes_has_link; 1895 break; 1896 1897 case e1000_media_type_unknown: 1898 default: 1899 break; 1900 } 1901 1902 /* Now check for a transition */ 1903 if (link_check && sc->link_active == 0) { 1904 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1905 &sc->link_duplex); 1906 1907 /* 1908 * Check if we should enable/disable SPEED_MODE bit on 1909 * 82571EB/82572EI 1910 */ 1911 if (sc->link_speed != SPEED_1000 && 1912 (hw->mac.type == e1000_82571 || 1913 hw->mac.type == e1000_82572)) { 1914 int tarc0; 1915 1916 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1917 tarc0 &= ~EMX_TARC_SPEED_MODE; 1918 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1919 } 1920 if (bootverbose) { 1921 char flowctrl[IFM_ETH_FC_STRLEN]; 1922 1923 e1000_fc2str(hw->fc.current_mode, flowctrl, 1924 sizeof(flowctrl)); 1925 device_printf(dev, "Link is up %d Mbps %s, " 1926 "Flow control: %s\n", 1927 sc->link_speed, 1928 (sc->link_duplex == FULL_DUPLEX) ? 1929 "Full Duplex" : "Half Duplex", 1930 flowctrl); 1931 } 1932 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1933 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1934 sc->link_active = 1; 1935 sc->smartspeed = 0; 1936 ifp->if_baudrate = sc->link_speed * 1000000; 1937 ifp->if_link_state = LINK_STATE_UP; 1938 if_link_state_change(ifp); 1939 } else if (!link_check && sc->link_active == 1) { 1940 ifp->if_baudrate = sc->link_speed = 0; 1941 sc->link_duplex = 0; 1942 if (bootverbose) 1943 device_printf(dev, "Link is Down\n"); 1944 sc->link_active = 0; 1945 ifp->if_link_state = LINK_STATE_DOWN; 1946 if_link_state_change(ifp); 1947 } 1948 } 1949 1950 static void 1951 emx_stop(struct emx_softc *sc) 1952 { 1953 struct ifnet *ifp = &sc->arpcom.ac_if; 1954 int i; 1955 1956 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1957 1958 emx_disable_intr(sc); 1959 1960 callout_stop(&sc->timer); 1961 1962 ifp->if_flags &= ~IFF_RUNNING; 1963 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1964 struct emx_txdata *tdata = &sc->tx_data[i]; 1965 1966 ifsq_clr_oactive(tdata->ifsq); 1967 ifsq_watchdog_stop(&tdata->tx_watchdog); 1968 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1969 1970 tdata->tx_running = 0; 1971 callout_stop(&tdata->tx_gc_timer); 1972 } 1973 1974 /* 1975 * Disable multiple receive queues. 1976 * 1977 * NOTE: 1978 * We should disable multiple receive queues before 1979 * resetting the hardware. 1980 */ 1981 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1982 1983 e1000_reset_hw(&sc->hw); 1984 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1985 1986 for (i = 0; i < sc->tx_ring_cnt; ++i) 1987 emx_free_tx_ring(&sc->tx_data[i]); 1988 for (i = 0; i < sc->rx_ring_cnt; ++i) 1989 emx_free_rx_ring(&sc->rx_data[i]); 1990 } 1991 1992 static int 1993 emx_reset(struct emx_softc *sc) 1994 { 1995 device_t dev = sc->dev; 1996 uint16_t rx_buffer_size; 1997 uint32_t pba; 1998 1999 /* Set up smart power down as default off on newer adapters. */ 2000 if (!emx_smart_pwr_down && 2001 (sc->hw.mac.type == e1000_82571 || 2002 sc->hw.mac.type == e1000_82572)) { 2003 uint16_t phy_tmp = 0; 2004 2005 /* Speed up time to link by disabling smart power down. */ 2006 e1000_read_phy_reg(&sc->hw, 2007 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2008 phy_tmp &= ~IGP02E1000_PM_SPD; 2009 e1000_write_phy_reg(&sc->hw, 2010 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2011 } 2012 2013 /* 2014 * Packet Buffer Allocation (PBA) 2015 * Writing PBA sets the receive portion of the buffer 2016 * the remainder is used for the transmit buffer. 2017 */ 2018 switch (sc->hw.mac.type) { 2019 /* Total Packet Buffer on these is 48K */ 2020 case e1000_82571: 2021 case e1000_82572: 2022 case e1000_80003es2lan: 2023 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2024 break; 2025 2026 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2027 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2028 break; 2029 2030 case e1000_82574: 2031 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2032 break; 2033 2034 case e1000_pch_lpt: 2035 case e1000_pch_spt: 2036 pba = E1000_PBA_26K; 2037 break; 2038 2039 default: 2040 /* Devices before 82547 had a Packet Buffer of 64K. */ 2041 if (sc->hw.mac.max_frame_size > 8192) 2042 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2043 else 2044 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2045 } 2046 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2047 2048 /* 2049 * These parameters control the automatic generation (Tx) and 2050 * response (Rx) to Ethernet PAUSE frames. 2051 * - High water mark should allow for at least two frames to be 2052 * received after sending an XOFF. 2053 * - Low water mark works best when it is very near the high water mark. 2054 * This allows the receiver to restart by sending XON when it has 2055 * drained a bit. Here we use an arbitary value of 1500 which will 2056 * restart after one full frame is pulled from the buffer. There 2057 * could be several smaller frames in the buffer and if so they will 2058 * not trigger the XON until their total number reduces the buffer 2059 * by 1500. 2060 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2061 */ 2062 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2063 2064 sc->hw.fc.high_water = rx_buffer_size - 2065 roundup2(sc->hw.mac.max_frame_size, 1024); 2066 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2067 2068 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2069 sc->hw.fc.send_xon = TRUE; 2070 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2071 2072 /* 2073 * Device specific overrides/settings 2074 */ 2075 if (sc->hw.mac.type == e1000_pch_lpt || 2076 sc->hw.mac.type == e1000_pch_spt) { 2077 sc->hw.fc.high_water = 0x5C20; 2078 sc->hw.fc.low_water = 0x5048; 2079 sc->hw.fc.pause_time = 0x0650; 2080 sc->hw.fc.refresh_time = 0x0400; 2081 /* Jumbos need adjusted PBA */ 2082 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2083 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2084 else 2085 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2086 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2087 sc->hw.fc.pause_time = 0xFFFF; 2088 } 2089 2090 /* Issue a global reset */ 2091 e1000_reset_hw(&sc->hw); 2092 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2093 emx_disable_aspm(sc); 2094 2095 if (e1000_init_hw(&sc->hw) < 0) { 2096 device_printf(dev, "Hardware Initialization Failed\n"); 2097 return (EIO); 2098 } 2099 2100 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2101 e1000_get_phy_info(&sc->hw); 2102 e1000_check_for_link(&sc->hw); 2103 2104 return (0); 2105 } 2106 2107 static void 2108 emx_setup_ifp(struct emx_softc *sc) 2109 { 2110 struct ifnet *ifp = &sc->arpcom.ac_if; 2111 int i; 2112 2113 if_initname(ifp, device_get_name(sc->dev), 2114 device_get_unit(sc->dev)); 2115 ifp->if_softc = sc; 2116 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2117 ifp->if_init = emx_init; 2118 ifp->if_ioctl = emx_ioctl; 2119 ifp->if_start = emx_start; 2120 #ifdef IFPOLL_ENABLE 2121 ifp->if_npoll = emx_npoll; 2122 #endif 2123 ifp->if_serialize = emx_serialize; 2124 ifp->if_deserialize = emx_deserialize; 2125 ifp->if_tryserialize = emx_tryserialize; 2126 #ifdef INVARIANTS 2127 ifp->if_serialize_assert = emx_serialize_assert; 2128 #endif 2129 2130 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2131 2132 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2133 ifq_set_ready(&ifp->if_snd); 2134 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2135 2136 ifp->if_mapsubq = ifq_mapsubq_modulo; 2137 ifq_set_subq_divisor(&ifp->if_snd, 1); 2138 2139 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2140 2141 ifp->if_capabilities = IFCAP_HWCSUM | 2142 IFCAP_VLAN_HWTAGGING | 2143 IFCAP_VLAN_MTU | 2144 IFCAP_TSO; 2145 if (sc->rx_ring_cnt > 1) 2146 ifp->if_capabilities |= IFCAP_RSS; 2147 ifp->if_capenable = ifp->if_capabilities; 2148 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2149 2150 /* 2151 * Tell the upper layer(s) we support long frames. 2152 */ 2153 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2154 2155 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2156 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2157 struct emx_txdata *tdata = &sc->tx_data[i]; 2158 2159 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2160 ifsq_set_priv(ifsq, tdata); 2161 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2162 tdata->ifsq = ifsq; 2163 2164 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2165 } 2166 2167 /* 2168 * Specify the media types supported by this sc and register 2169 * callbacks to update media and link information 2170 */ 2171 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2172 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2173 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2174 0, NULL); 2175 } else { 2176 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2177 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2178 0, NULL); 2179 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2180 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2181 0, NULL); 2182 if (sc->hw.phy.type != e1000_phy_ife) { 2183 ifmedia_add(&sc->media, 2184 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2185 } 2186 } 2187 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2188 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2189 } 2190 2191 /* 2192 * Workaround for SmartSpeed on 82541 and 82547 controllers 2193 */ 2194 static void 2195 emx_smartspeed(struct emx_softc *sc) 2196 { 2197 uint16_t phy_tmp; 2198 2199 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2200 sc->hw.mac.autoneg == 0 || 2201 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2202 return; 2203 2204 if (sc->smartspeed == 0) { 2205 /* 2206 * If Master/Slave config fault is asserted twice, 2207 * we assume back-to-back 2208 */ 2209 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2210 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2211 return; 2212 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2213 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2214 e1000_read_phy_reg(&sc->hw, 2215 PHY_1000T_CTRL, &phy_tmp); 2216 if (phy_tmp & CR_1000T_MS_ENABLE) { 2217 phy_tmp &= ~CR_1000T_MS_ENABLE; 2218 e1000_write_phy_reg(&sc->hw, 2219 PHY_1000T_CTRL, phy_tmp); 2220 sc->smartspeed++; 2221 if (sc->hw.mac.autoneg && 2222 !e1000_phy_setup_autoneg(&sc->hw) && 2223 !e1000_read_phy_reg(&sc->hw, 2224 PHY_CONTROL, &phy_tmp)) { 2225 phy_tmp |= MII_CR_AUTO_NEG_EN | 2226 MII_CR_RESTART_AUTO_NEG; 2227 e1000_write_phy_reg(&sc->hw, 2228 PHY_CONTROL, phy_tmp); 2229 } 2230 } 2231 } 2232 return; 2233 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2234 /* If still no link, perhaps using 2/3 pair cable */ 2235 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2236 phy_tmp |= CR_1000T_MS_ENABLE; 2237 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2238 if (sc->hw.mac.autoneg && 2239 !e1000_phy_setup_autoneg(&sc->hw) && 2240 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2241 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2242 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2243 } 2244 } 2245 2246 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2247 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2248 sc->smartspeed = 0; 2249 } 2250 2251 static int 2252 emx_create_tx_ring(struct emx_txdata *tdata) 2253 { 2254 device_t dev = tdata->sc->dev; 2255 struct emx_txbuf *tx_buffer; 2256 int error, i, tsize, ntxd; 2257 2258 /* 2259 * Validate number of transmit descriptors. It must not exceed 2260 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2261 */ 2262 ntxd = device_getenv_int(dev, "txd", emx_txd); 2263 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2264 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2265 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2266 EMX_DEFAULT_TXD, ntxd); 2267 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2268 } else { 2269 tdata->num_tx_desc = ntxd; 2270 } 2271 2272 /* 2273 * Allocate Transmit Descriptor ring 2274 */ 2275 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2276 EMX_DBA_ALIGN); 2277 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2278 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2279 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2280 &tdata->tx_desc_paddr); 2281 if (tdata->tx_desc_base == NULL) { 2282 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2283 return ENOMEM; 2284 } 2285 2286 tsize = __VM_CACHELINE_ALIGN( 2287 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2288 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2289 2290 /* 2291 * Create DMA tags for tx buffers 2292 */ 2293 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2294 1, 0, /* alignment, bounds */ 2295 BUS_SPACE_MAXADDR, /* lowaddr */ 2296 BUS_SPACE_MAXADDR, /* highaddr */ 2297 NULL, NULL, /* filter, filterarg */ 2298 EMX_TSO_SIZE, /* maxsize */ 2299 EMX_MAX_SCATTER, /* nsegments */ 2300 EMX_MAX_SEGSIZE, /* maxsegsize */ 2301 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2302 BUS_DMA_ONEBPAGE, /* flags */ 2303 &tdata->txtag); 2304 if (error) { 2305 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2306 kfree(tdata->tx_buf, M_DEVBUF); 2307 tdata->tx_buf = NULL; 2308 return error; 2309 } 2310 2311 /* 2312 * Create DMA maps for tx buffers 2313 */ 2314 for (i = 0; i < tdata->num_tx_desc; i++) { 2315 tx_buffer = &tdata->tx_buf[i]; 2316 2317 error = bus_dmamap_create(tdata->txtag, 2318 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2319 &tx_buffer->map); 2320 if (error) { 2321 device_printf(dev, "Unable to create TX DMA map\n"); 2322 emx_destroy_tx_ring(tdata, i); 2323 return error; 2324 } 2325 } 2326 2327 /* 2328 * Setup TX parameters 2329 */ 2330 tdata->spare_tx_desc = EMX_TX_SPARE; 2331 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2332 2333 /* 2334 * Keep following relationship between spare_tx_desc, oact_tx_desc 2335 * and tx_intr_nsegs: 2336 * (spare_tx_desc + EMX_TX_RESERVED) <= 2337 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2338 */ 2339 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2340 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2341 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2342 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2343 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2344 2345 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2346 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2347 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2348 2349 /* 2350 * Pullup extra 4bytes into the first data segment for TSO, see: 2351 * 82571/82572 specification update errata #7 2352 * 2353 * Same applies to I217 (and maybe I218 and I219). 2354 * 2355 * NOTE: 2356 * 4bytes instead of 2bytes, which are mentioned in the errata, 2357 * are pulled; mainly to keep rest of the data properly aligned. 2358 */ 2359 if (tdata->sc->hw.mac.type == e1000_82571 || 2360 tdata->sc->hw.mac.type == e1000_82572 || 2361 tdata->sc->hw.mac.type == e1000_pch_lpt || 2362 tdata->sc->hw.mac.type == e1000_pch_spt) 2363 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2364 2365 return (0); 2366 } 2367 2368 static void 2369 emx_init_tx_ring(struct emx_txdata *tdata) 2370 { 2371 /* Clear the old ring contents */ 2372 bzero(tdata->tx_desc_base, 2373 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2374 2375 /* Reset state */ 2376 tdata->next_avail_tx_desc = 0; 2377 tdata->next_tx_to_clean = 0; 2378 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2379 tdata->tx_nmbuf = 0; 2380 tdata->tx_running = 0; 2381 2382 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2383 if (tdata->sc->tx_ring_inuse > 1) { 2384 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2385 if (bootverbose) { 2386 if_printf(&tdata->sc->arpcom.ac_if, 2387 "TX %d force ctx setup\n", tdata->idx); 2388 } 2389 } 2390 } 2391 2392 static void 2393 emx_init_tx_unit(struct emx_softc *sc) 2394 { 2395 uint32_t tctl, tarc, tipg = 0, txdctl; 2396 int i; 2397 2398 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2399 struct emx_txdata *tdata = &sc->tx_data[i]; 2400 uint64_t bus_addr; 2401 2402 /* Setup the Base and Length of the Tx Descriptor Ring */ 2403 bus_addr = tdata->tx_desc_paddr; 2404 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2405 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2406 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2407 (uint32_t)(bus_addr >> 32)); 2408 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2409 (uint32_t)bus_addr); 2410 /* Setup the HW Tx Head and Tail descriptor pointers */ 2411 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2412 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2413 } 2414 2415 /* Set the default values for the Tx Inter Packet Gap timer */ 2416 switch (sc->hw.mac.type) { 2417 case e1000_80003es2lan: 2418 tipg = DEFAULT_82543_TIPG_IPGR1; 2419 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2420 E1000_TIPG_IPGR2_SHIFT; 2421 break; 2422 2423 default: 2424 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2425 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2426 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2427 else 2428 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2429 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2430 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2431 break; 2432 } 2433 2434 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2435 2436 /* NOTE: 0 is not allowed for TIDV */ 2437 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2438 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2439 2440 /* 2441 * Errata workaround (obtained from Linux). This is necessary 2442 * to make multiple TX queues work on 82574. 2443 * XXX can't find it in any published errata though. 2444 */ 2445 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2446 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2447 2448 if (sc->hw.mac.type == e1000_82571 || 2449 sc->hw.mac.type == e1000_82572) { 2450 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2451 tarc |= EMX_TARC_SPEED_MODE; 2452 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2453 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2454 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2455 tarc |= 1; 2456 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2457 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2458 tarc |= 1; 2459 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2460 } 2461 2462 /* Program the Transmit Control Register */ 2463 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2464 tctl &= ~E1000_TCTL_CT; 2465 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2466 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2467 tctl |= E1000_TCTL_MULR; 2468 2469 /* This write will effectively turn on the transmit unit. */ 2470 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2471 2472 if (sc->hw.mac.type == e1000_82571 || 2473 sc->hw.mac.type == e1000_82572 || 2474 sc->hw.mac.type == e1000_80003es2lan) { 2475 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2476 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2477 tarc &= ~(1 << 28); 2478 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2479 } 2480 2481 if (sc->tx_ring_inuse > 1) { 2482 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2483 tarc &= ~EMX_TARC_COUNT_MASK; 2484 tarc |= 1; 2485 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2486 2487 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2488 tarc &= ~EMX_TARC_COUNT_MASK; 2489 tarc |= 1; 2490 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2491 } 2492 } 2493 2494 static void 2495 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2496 { 2497 struct emx_txbuf *tx_buffer; 2498 int i; 2499 2500 /* Free Transmit Descriptor ring */ 2501 if (tdata->tx_desc_base) { 2502 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2503 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2504 tdata->tx_desc_dmap); 2505 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2506 2507 tdata->tx_desc_base = NULL; 2508 } 2509 2510 if (tdata->tx_buf == NULL) 2511 return; 2512 2513 for (i = 0; i < ndesc; i++) { 2514 tx_buffer = &tdata->tx_buf[i]; 2515 2516 KKASSERT(tx_buffer->m_head == NULL); 2517 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2518 } 2519 bus_dma_tag_destroy(tdata->txtag); 2520 2521 kfree(tdata->tx_buf, M_DEVBUF); 2522 tdata->tx_buf = NULL; 2523 } 2524 2525 /* 2526 * The offload context needs to be set when we transfer the first 2527 * packet of a particular protocol (TCP/UDP). This routine has been 2528 * enhanced to deal with inserted VLAN headers. 2529 * 2530 * If the new packet's ether header length, ip header length and 2531 * csum offloading type are same as the previous packet, we should 2532 * avoid allocating a new csum context descriptor; mainly to take 2533 * advantage of the pipeline effect of the TX data read request. 2534 * 2535 * This function returns number of TX descrptors allocated for 2536 * csum context. 2537 */ 2538 static int 2539 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2540 uint32_t *txd_upper, uint32_t *txd_lower) 2541 { 2542 struct e1000_context_desc *TXD; 2543 int curr_txd, ehdrlen, csum_flags; 2544 uint32_t cmd, hdr_len, ip_hlen; 2545 2546 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2547 ip_hlen = mp->m_pkthdr.csum_iphlen; 2548 ehdrlen = mp->m_pkthdr.csum_lhlen; 2549 2550 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2551 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2552 tdata->csum_flags == csum_flags) { 2553 /* 2554 * Same csum offload context as the previous packets; 2555 * just return. 2556 */ 2557 *txd_upper = tdata->csum_txd_upper; 2558 *txd_lower = tdata->csum_txd_lower; 2559 return 0; 2560 } 2561 2562 /* 2563 * Setup a new csum offload context. 2564 */ 2565 2566 curr_txd = tdata->next_avail_tx_desc; 2567 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2568 2569 cmd = 0; 2570 2571 /* Setup of IP header checksum. */ 2572 if (csum_flags & CSUM_IP) { 2573 /* 2574 * Start offset for header checksum calculation. 2575 * End offset for header checksum calculation. 2576 * Offset of place to put the checksum. 2577 */ 2578 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2579 TXD->lower_setup.ip_fields.ipcse = 2580 htole16(ehdrlen + ip_hlen - 1); 2581 TXD->lower_setup.ip_fields.ipcso = 2582 ehdrlen + offsetof(struct ip, ip_sum); 2583 cmd |= E1000_TXD_CMD_IP; 2584 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2585 } 2586 hdr_len = ehdrlen + ip_hlen; 2587 2588 if (csum_flags & CSUM_TCP) { 2589 /* 2590 * Start offset for payload checksum calculation. 2591 * End offset for payload checksum calculation. 2592 * Offset of place to put the checksum. 2593 */ 2594 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2595 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2596 TXD->upper_setup.tcp_fields.tucso = 2597 hdr_len + offsetof(struct tcphdr, th_sum); 2598 cmd |= E1000_TXD_CMD_TCP; 2599 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2600 } else if (csum_flags & CSUM_UDP) { 2601 /* 2602 * Start offset for header checksum calculation. 2603 * End offset for header checksum calculation. 2604 * Offset of place to put the checksum. 2605 */ 2606 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2607 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2608 TXD->upper_setup.tcp_fields.tucso = 2609 hdr_len + offsetof(struct udphdr, uh_sum); 2610 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2611 } 2612 2613 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2614 E1000_TXD_DTYP_D; /* Data descr */ 2615 2616 /* Save the information for this csum offloading context */ 2617 tdata->csum_lhlen = ehdrlen; 2618 tdata->csum_iphlen = ip_hlen; 2619 tdata->csum_flags = csum_flags; 2620 tdata->csum_txd_upper = *txd_upper; 2621 tdata->csum_txd_lower = *txd_lower; 2622 2623 TXD->tcp_seg_setup.data = htole32(0); 2624 TXD->cmd_and_length = 2625 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2626 2627 if (++curr_txd == tdata->num_tx_desc) 2628 curr_txd = 0; 2629 2630 KKASSERT(tdata->num_tx_desc_avail > 0); 2631 tdata->num_tx_desc_avail--; 2632 2633 tdata->next_avail_tx_desc = curr_txd; 2634 return 1; 2635 } 2636 2637 static void 2638 emx_txeof(struct emx_txdata *tdata) 2639 { 2640 struct emx_txbuf *tx_buffer; 2641 int first, num_avail; 2642 2643 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2644 return; 2645 2646 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2647 return; 2648 2649 num_avail = tdata->num_tx_desc_avail; 2650 first = tdata->next_tx_to_clean; 2651 2652 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2653 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2654 struct e1000_tx_desc *tx_desc; 2655 2656 tx_desc = &tdata->tx_desc_base[dd_idx]; 2657 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2658 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2659 2660 if (++dd_idx == tdata->num_tx_desc) 2661 dd_idx = 0; 2662 2663 while (first != dd_idx) { 2664 logif(pkt_txclean); 2665 2666 KKASSERT(num_avail < tdata->num_tx_desc); 2667 num_avail++; 2668 2669 tx_buffer = &tdata->tx_buf[first]; 2670 if (tx_buffer->m_head) 2671 emx_free_txbuf(tdata, tx_buffer); 2672 2673 if (++first == tdata->num_tx_desc) 2674 first = 0; 2675 } 2676 } else { 2677 break; 2678 } 2679 } 2680 tdata->next_tx_to_clean = first; 2681 tdata->num_tx_desc_avail = num_avail; 2682 2683 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2684 tdata->tx_dd_head = 0; 2685 tdata->tx_dd_tail = 0; 2686 } 2687 2688 if (!EMX_IS_OACTIVE(tdata)) { 2689 ifsq_clr_oactive(tdata->ifsq); 2690 2691 /* All clean, turn off the timer */ 2692 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2693 tdata->tx_watchdog.wd_timer = 0; 2694 } 2695 tdata->tx_running = EMX_TX_RUNNING; 2696 } 2697 2698 static void 2699 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc) 2700 { 2701 struct emx_txbuf *tx_buffer; 2702 int tdh, first, num_avail, dd_idx = -1; 2703 2704 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2705 return; 2706 2707 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2708 if (tdh == tdata->next_tx_to_clean) { 2709 if (gc && tdata->tx_nmbuf > 0) 2710 tdata->tx_running = EMX_TX_RUNNING; 2711 return; 2712 } 2713 if (gc) 2714 tdata->tx_gc++; 2715 2716 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2717 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2718 2719 num_avail = tdata->num_tx_desc_avail; 2720 first = tdata->next_tx_to_clean; 2721 2722 while (first != tdh) { 2723 logif(pkt_txclean); 2724 2725 KKASSERT(num_avail < tdata->num_tx_desc); 2726 num_avail++; 2727 2728 tx_buffer = &tdata->tx_buf[first]; 2729 if (tx_buffer->m_head) 2730 emx_free_txbuf(tdata, tx_buffer); 2731 2732 if (first == dd_idx) { 2733 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2734 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2735 tdata->tx_dd_head = 0; 2736 tdata->tx_dd_tail = 0; 2737 dd_idx = -1; 2738 } else { 2739 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2740 } 2741 } 2742 2743 if (++first == tdata->num_tx_desc) 2744 first = 0; 2745 } 2746 tdata->next_tx_to_clean = first; 2747 tdata->num_tx_desc_avail = num_avail; 2748 2749 if (!EMX_IS_OACTIVE(tdata)) { 2750 ifsq_clr_oactive(tdata->ifsq); 2751 2752 /* All clean, turn off the timer */ 2753 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2754 tdata->tx_watchdog.wd_timer = 0; 2755 } 2756 if (!gc || tdata->tx_nmbuf > 0) 2757 tdata->tx_running = EMX_TX_RUNNING; 2758 } 2759 2760 /* 2761 * When Link is lost sometimes there is work still in the TX ring 2762 * which will result in a watchdog, rather than allow that do an 2763 * attempted cleanup and then reinit here. Note that this has been 2764 * seens mostly with fiber adapters. 2765 */ 2766 static void 2767 emx_tx_purge(struct emx_softc *sc) 2768 { 2769 int i; 2770 2771 if (sc->link_active) 2772 return; 2773 2774 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2775 struct emx_txdata *tdata = &sc->tx_data[i]; 2776 2777 if (tdata->tx_watchdog.wd_timer) { 2778 emx_tx_collect(tdata, FALSE); 2779 if (tdata->tx_watchdog.wd_timer) { 2780 if_printf(&sc->arpcom.ac_if, 2781 "Link lost, TX pending, reinit\n"); 2782 emx_init(sc); 2783 return; 2784 } 2785 } 2786 } 2787 } 2788 2789 static int 2790 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2791 { 2792 struct mbuf *m; 2793 bus_dma_segment_t seg; 2794 bus_dmamap_t map; 2795 struct emx_rxbuf *rx_buffer; 2796 int error, nseg; 2797 2798 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2799 if (m == NULL) { 2800 if (init) { 2801 if_printf(&rdata->sc->arpcom.ac_if, 2802 "Unable to allocate RX mbuf\n"); 2803 } 2804 return (ENOBUFS); 2805 } 2806 m->m_len = m->m_pkthdr.len = MCLBYTES; 2807 2808 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2809 m_adj(m, ETHER_ALIGN); 2810 2811 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2812 rdata->rx_sparemap, m, 2813 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2814 if (error) { 2815 m_freem(m); 2816 if (init) { 2817 if_printf(&rdata->sc->arpcom.ac_if, 2818 "Unable to load RX mbuf\n"); 2819 } 2820 return (error); 2821 } 2822 2823 rx_buffer = &rdata->rx_buf[i]; 2824 if (rx_buffer->m_head != NULL) 2825 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2826 2827 map = rx_buffer->map; 2828 rx_buffer->map = rdata->rx_sparemap; 2829 rdata->rx_sparemap = map; 2830 2831 rx_buffer->m_head = m; 2832 rx_buffer->paddr = seg.ds_addr; 2833 2834 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2835 return (0); 2836 } 2837 2838 static int 2839 emx_create_rx_ring(struct emx_rxdata *rdata) 2840 { 2841 device_t dev = rdata->sc->dev; 2842 struct emx_rxbuf *rx_buffer; 2843 int i, error, rsize, nrxd; 2844 2845 /* 2846 * Validate number of receive descriptors. It must not exceed 2847 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2848 */ 2849 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2850 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2851 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2852 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2853 EMX_DEFAULT_RXD, nrxd); 2854 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2855 } else { 2856 rdata->num_rx_desc = nrxd; 2857 } 2858 2859 /* 2860 * Allocate Receive Descriptor ring 2861 */ 2862 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2863 EMX_DBA_ALIGN); 2864 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2865 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2866 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2867 &rdata->rx_desc_paddr); 2868 if (rdata->rx_desc == NULL) { 2869 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2870 return ENOMEM; 2871 } 2872 2873 rsize = __VM_CACHELINE_ALIGN( 2874 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2875 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2876 2877 /* 2878 * Create DMA tag for rx buffers 2879 */ 2880 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2881 1, 0, /* alignment, bounds */ 2882 BUS_SPACE_MAXADDR, /* lowaddr */ 2883 BUS_SPACE_MAXADDR, /* highaddr */ 2884 NULL, NULL, /* filter, filterarg */ 2885 MCLBYTES, /* maxsize */ 2886 1, /* nsegments */ 2887 MCLBYTES, /* maxsegsize */ 2888 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2889 &rdata->rxtag); 2890 if (error) { 2891 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2892 kfree(rdata->rx_buf, M_DEVBUF); 2893 rdata->rx_buf = NULL; 2894 return error; 2895 } 2896 2897 /* 2898 * Create spare DMA map for rx buffers 2899 */ 2900 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2901 &rdata->rx_sparemap); 2902 if (error) { 2903 device_printf(dev, "Unable to create spare RX DMA map\n"); 2904 bus_dma_tag_destroy(rdata->rxtag); 2905 kfree(rdata->rx_buf, M_DEVBUF); 2906 rdata->rx_buf = NULL; 2907 return error; 2908 } 2909 2910 /* 2911 * Create DMA maps for rx buffers 2912 */ 2913 for (i = 0; i < rdata->num_rx_desc; i++) { 2914 rx_buffer = &rdata->rx_buf[i]; 2915 2916 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2917 &rx_buffer->map); 2918 if (error) { 2919 device_printf(dev, "Unable to create RX DMA map\n"); 2920 emx_destroy_rx_ring(rdata, i); 2921 return error; 2922 } 2923 } 2924 return (0); 2925 } 2926 2927 static void 2928 emx_free_rx_ring(struct emx_rxdata *rdata) 2929 { 2930 int i; 2931 2932 for (i = 0; i < rdata->num_rx_desc; i++) { 2933 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2934 2935 if (rx_buffer->m_head != NULL) { 2936 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2937 m_freem(rx_buffer->m_head); 2938 rx_buffer->m_head = NULL; 2939 } 2940 } 2941 2942 if (rdata->fmp != NULL) 2943 m_freem(rdata->fmp); 2944 rdata->fmp = NULL; 2945 rdata->lmp = NULL; 2946 } 2947 2948 static void 2949 emx_free_tx_ring(struct emx_txdata *tdata) 2950 { 2951 int i; 2952 2953 for (i = 0; i < tdata->num_tx_desc; i++) { 2954 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2955 2956 if (tx_buffer->m_head != NULL) 2957 emx_free_txbuf(tdata, tx_buffer); 2958 } 2959 2960 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2961 2962 tdata->csum_flags = 0; 2963 tdata->csum_lhlen = 0; 2964 tdata->csum_iphlen = 0; 2965 tdata->csum_thlen = 0; 2966 tdata->csum_mss = 0; 2967 tdata->csum_pktlen = 0; 2968 2969 tdata->tx_dd_head = 0; 2970 tdata->tx_dd_tail = 0; 2971 tdata->tx_nsegs = 0; 2972 } 2973 2974 static int 2975 emx_init_rx_ring(struct emx_rxdata *rdata) 2976 { 2977 int i, error; 2978 2979 /* Reset descriptor ring */ 2980 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2981 2982 /* Allocate new ones. */ 2983 for (i = 0; i < rdata->num_rx_desc; i++) { 2984 error = emx_newbuf(rdata, i, 1); 2985 if (error) 2986 return (error); 2987 } 2988 2989 /* Setup our descriptor pointers */ 2990 rdata->next_rx_desc_to_check = 0; 2991 2992 return (0); 2993 } 2994 2995 static void 2996 emx_init_rx_unit(struct emx_softc *sc) 2997 { 2998 struct ifnet *ifp = &sc->arpcom.ac_if; 2999 uint64_t bus_addr; 3000 uint32_t rctl, itr, rfctl; 3001 int i; 3002 3003 /* 3004 * Make sure receives are disabled while setting 3005 * up the descriptor ring 3006 */ 3007 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 3008 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3009 3010 /* 3011 * Set the interrupt throttling rate. Value is calculated 3012 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3013 */ 3014 if (sc->int_throttle_ceil) 3015 itr = 1000000000 / 256 / sc->int_throttle_ceil; 3016 else 3017 itr = 0; 3018 emx_set_itr(sc, itr); 3019 3020 /* Use extended RX descriptor */ 3021 rfctl = E1000_RFCTL_EXTEN; 3022 3023 /* Disable accelerated ackknowledge */ 3024 if (sc->hw.mac.type == e1000_82574) 3025 rfctl |= E1000_RFCTL_ACK_DIS; 3026 3027 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 3028 3029 /* 3030 * Receive Checksum Offload for TCP and UDP 3031 * 3032 * Checksum offloading is also enabled if multiple receive 3033 * queue is to be supported, since we need it to figure out 3034 * packet type. 3035 */ 3036 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3037 sc->rx_ring_cnt > 1) { 3038 uint32_t rxcsum; 3039 3040 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3041 3042 /* 3043 * NOTE: 3044 * PCSD must be enabled to enable multiple 3045 * receive queues. 3046 */ 3047 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3048 E1000_RXCSUM_PCSD; 3049 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3050 } 3051 3052 /* 3053 * Configure multiple receive queue (RSS) 3054 */ 3055 if (sc->rx_ring_cnt > 1) { 3056 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3057 int r, j; 3058 3059 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3060 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3061 3062 /* 3063 * NOTE: 3064 * When we reach here, RSS has already been disabled 3065 * in emx_stop(), so we could safely configure RSS key 3066 * and redirect table. 3067 */ 3068 3069 /* 3070 * Configure RSS key 3071 */ 3072 toeplitz_get_key(key, sizeof(key)); 3073 for (i = 0; i < EMX_NRSSRK; ++i) { 3074 uint32_t rssrk; 3075 3076 rssrk = EMX_RSSRK_VAL(key, i); 3077 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3078 3079 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3080 } 3081 3082 /* 3083 * Configure RSS redirect table. 3084 */ 3085 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3086 EMX_RDRTABLE_SIZE); 3087 3088 r = 0; 3089 for (j = 0; j < EMX_NRETA; ++j) { 3090 uint32_t reta = 0; 3091 3092 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3093 uint32_t q; 3094 3095 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3096 reta |= q << (8 * i); 3097 ++r; 3098 } 3099 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3100 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3101 } 3102 3103 /* 3104 * Enable multiple receive queues. 3105 * Enable IPv4 RSS standard hash functions. 3106 * Disable RSS interrupt. 3107 */ 3108 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3109 E1000_MRQC_ENABLE_RSS_2Q | 3110 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3111 E1000_MRQC_RSS_FIELD_IPV4); 3112 } 3113 3114 /* 3115 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3116 * long latencies are observed, like Lenovo X60. This 3117 * change eliminates the problem, but since having positive 3118 * values in RDTR is a known source of problems on other 3119 * platforms another solution is being sought. 3120 */ 3121 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3122 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3123 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3124 } 3125 3126 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3127 struct emx_rxdata *rdata = &sc->rx_data[i]; 3128 3129 /* 3130 * Setup the Base and Length of the Rx Descriptor Ring 3131 */ 3132 bus_addr = rdata->rx_desc_paddr; 3133 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3134 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3135 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3136 (uint32_t)(bus_addr >> 32)); 3137 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3138 (uint32_t)bus_addr); 3139 3140 /* 3141 * Setup the HW Rx Head and Tail Descriptor Pointers 3142 */ 3143 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3144 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3145 sc->rx_data[i].num_rx_desc - 1); 3146 } 3147 3148 if (sc->hw.mac.type >= e1000_pch2lan) { 3149 if (ifp->if_mtu > ETHERMTU) 3150 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3151 else 3152 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3153 } 3154 3155 /* Setup the Receive Control Register */ 3156 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3157 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3158 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3159 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3160 3161 /* Make sure VLAN Filters are off */ 3162 rctl &= ~E1000_RCTL_VFE; 3163 3164 /* Don't store bad paket */ 3165 rctl &= ~E1000_RCTL_SBP; 3166 3167 /* MCLBYTES */ 3168 rctl |= E1000_RCTL_SZ_2048; 3169 3170 if (ifp->if_mtu > ETHERMTU) 3171 rctl |= E1000_RCTL_LPE; 3172 else 3173 rctl &= ~E1000_RCTL_LPE; 3174 3175 /* Enable Receives */ 3176 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3177 } 3178 3179 static void 3180 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3181 { 3182 struct emx_rxbuf *rx_buffer; 3183 int i; 3184 3185 /* Free Receive Descriptor ring */ 3186 if (rdata->rx_desc) { 3187 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3188 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3189 rdata->rx_desc_dmap); 3190 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3191 3192 rdata->rx_desc = NULL; 3193 } 3194 3195 if (rdata->rx_buf == NULL) 3196 return; 3197 3198 for (i = 0; i < ndesc; i++) { 3199 rx_buffer = &rdata->rx_buf[i]; 3200 3201 KKASSERT(rx_buffer->m_head == NULL); 3202 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3203 } 3204 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3205 bus_dma_tag_destroy(rdata->rxtag); 3206 3207 kfree(rdata->rx_buf, M_DEVBUF); 3208 rdata->rx_buf = NULL; 3209 } 3210 3211 static void 3212 emx_rxeof(struct emx_rxdata *rdata, int count) 3213 { 3214 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3215 uint32_t staterr; 3216 emx_rxdesc_t *current_desc; 3217 struct mbuf *mp; 3218 int i, cpuid = mycpuid; 3219 3220 i = rdata->next_rx_desc_to_check; 3221 current_desc = &rdata->rx_desc[i]; 3222 staterr = le32toh(current_desc->rxd_staterr); 3223 3224 if (!(staterr & E1000_RXD_STAT_DD)) 3225 return; 3226 3227 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3228 struct pktinfo *pi = NULL, pi0; 3229 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3230 struct mbuf *m = NULL; 3231 int eop, len; 3232 3233 logif(pkt_receive); 3234 3235 mp = rx_buf->m_head; 3236 3237 /* 3238 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3239 * needs to access the last received byte in the mbuf. 3240 */ 3241 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3242 BUS_DMASYNC_POSTREAD); 3243 3244 len = le16toh(current_desc->rxd_length); 3245 if (staterr & E1000_RXD_STAT_EOP) { 3246 count--; 3247 eop = 1; 3248 } else { 3249 eop = 0; 3250 } 3251 3252 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3253 uint16_t vlan = 0; 3254 uint32_t mrq, rss_hash; 3255 3256 /* 3257 * Save several necessary information, 3258 * before emx_newbuf() destroy it. 3259 */ 3260 if ((staterr & E1000_RXD_STAT_VP) && eop) 3261 vlan = le16toh(current_desc->rxd_vlan); 3262 3263 mrq = le32toh(current_desc->rxd_mrq); 3264 rss_hash = le32toh(current_desc->rxd_rss); 3265 3266 EMX_RSS_DPRINTF(rdata->sc, 10, 3267 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3268 rdata->idx, mrq, rss_hash); 3269 3270 if (emx_newbuf(rdata, i, 0) != 0) { 3271 IFNET_STAT_INC(ifp, iqdrops, 1); 3272 goto discard; 3273 } 3274 3275 /* Assign correct length to the current fragment */ 3276 mp->m_len = len; 3277 3278 if (rdata->fmp == NULL) { 3279 mp->m_pkthdr.len = len; 3280 rdata->fmp = mp; /* Store the first mbuf */ 3281 rdata->lmp = mp; 3282 } else { 3283 /* 3284 * Chain mbuf's together 3285 */ 3286 rdata->lmp->m_next = mp; 3287 rdata->lmp = rdata->lmp->m_next; 3288 rdata->fmp->m_pkthdr.len += len; 3289 } 3290 3291 if (eop) { 3292 rdata->fmp->m_pkthdr.rcvif = ifp; 3293 IFNET_STAT_INC(ifp, ipackets, 1); 3294 3295 if (ifp->if_capenable & IFCAP_RXCSUM) 3296 emx_rxcsum(staterr, rdata->fmp); 3297 3298 if (staterr & E1000_RXD_STAT_VP) { 3299 rdata->fmp->m_pkthdr.ether_vlantag = 3300 vlan; 3301 rdata->fmp->m_flags |= M_VLANTAG; 3302 } 3303 m = rdata->fmp; 3304 rdata->fmp = NULL; 3305 rdata->lmp = NULL; 3306 3307 if (ifp->if_capenable & IFCAP_RSS) { 3308 pi = emx_rssinfo(m, &pi0, mrq, 3309 rss_hash, staterr); 3310 } 3311 #ifdef EMX_RSS_DEBUG 3312 rdata->rx_pkts++; 3313 #endif 3314 } 3315 } else { 3316 IFNET_STAT_INC(ifp, ierrors, 1); 3317 discard: 3318 emx_setup_rxdesc(current_desc, rx_buf); 3319 if (rdata->fmp != NULL) { 3320 m_freem(rdata->fmp); 3321 rdata->fmp = NULL; 3322 rdata->lmp = NULL; 3323 } 3324 m = NULL; 3325 } 3326 3327 if (m != NULL) 3328 ifp->if_input(ifp, m, pi, cpuid); 3329 3330 /* Advance our pointers to the next descriptor. */ 3331 if (++i == rdata->num_rx_desc) 3332 i = 0; 3333 3334 current_desc = &rdata->rx_desc[i]; 3335 staterr = le32toh(current_desc->rxd_staterr); 3336 } 3337 rdata->next_rx_desc_to_check = i; 3338 3339 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3340 if (--i < 0) 3341 i = rdata->num_rx_desc - 1; 3342 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3343 } 3344 3345 static void 3346 emx_enable_intr(struct emx_softc *sc) 3347 { 3348 uint32_t ims_mask = IMS_ENABLE_MASK; 3349 3350 lwkt_serialize_handler_enable(&sc->main_serialize); 3351 3352 #if 0 3353 if (sc->hw.mac.type == e1000_82574) { 3354 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3355 ims_mask |= EM_MSIX_MASK; 3356 } 3357 #endif 3358 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3359 } 3360 3361 static void 3362 emx_disable_intr(struct emx_softc *sc) 3363 { 3364 if (sc->hw.mac.type == e1000_82574) 3365 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3366 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3367 3368 lwkt_serialize_handler_disable(&sc->main_serialize); 3369 } 3370 3371 /* 3372 * Bit of a misnomer, what this really means is 3373 * to enable OS management of the system... aka 3374 * to disable special hardware management features 3375 */ 3376 static void 3377 emx_get_mgmt(struct emx_softc *sc) 3378 { 3379 /* A shared code workaround */ 3380 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3381 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3382 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3383 3384 /* disable hardware interception of ARP */ 3385 manc &= ~(E1000_MANC_ARP_EN); 3386 3387 /* enable receiving management packets to the host */ 3388 manc |= E1000_MANC_EN_MNG2HOST; 3389 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3390 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3391 manc2h |= E1000_MNG2HOST_PORT_623; 3392 manc2h |= E1000_MNG2HOST_PORT_664; 3393 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3394 3395 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3396 } 3397 } 3398 3399 /* 3400 * Give control back to hardware management 3401 * controller if there is one. 3402 */ 3403 static void 3404 emx_rel_mgmt(struct emx_softc *sc) 3405 { 3406 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3407 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3408 3409 /* re-enable hardware interception of ARP */ 3410 manc |= E1000_MANC_ARP_EN; 3411 manc &= ~E1000_MANC_EN_MNG2HOST; 3412 3413 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3414 } 3415 } 3416 3417 /* 3418 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3419 * For ASF and Pass Through versions of f/w this means that 3420 * the driver is loaded. For AMT version (only with 82573) 3421 * of the f/w this means that the network i/f is open. 3422 */ 3423 static void 3424 emx_get_hw_control(struct emx_softc *sc) 3425 { 3426 /* Let firmware know the driver has taken over */ 3427 if (sc->hw.mac.type == e1000_82573) { 3428 uint32_t swsm; 3429 3430 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3431 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3432 swsm | E1000_SWSM_DRV_LOAD); 3433 } else { 3434 uint32_t ctrl_ext; 3435 3436 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3437 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3438 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3439 } 3440 sc->flags |= EMX_FLAG_HW_CTRL; 3441 } 3442 3443 /* 3444 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3445 * For ASF and Pass Through versions of f/w this means that the 3446 * driver is no longer loaded. For AMT version (only with 82573) 3447 * of the f/w this means that the network i/f is closed. 3448 */ 3449 static void 3450 emx_rel_hw_control(struct emx_softc *sc) 3451 { 3452 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3453 return; 3454 sc->flags &= ~EMX_FLAG_HW_CTRL; 3455 3456 /* Let firmware taken over control of h/w */ 3457 if (sc->hw.mac.type == e1000_82573) { 3458 uint32_t swsm; 3459 3460 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3461 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3462 swsm & ~E1000_SWSM_DRV_LOAD); 3463 } else { 3464 uint32_t ctrl_ext; 3465 3466 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3467 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3468 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3469 } 3470 } 3471 3472 static int 3473 emx_is_valid_eaddr(const uint8_t *addr) 3474 { 3475 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3476 3477 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3478 return (FALSE); 3479 3480 return (TRUE); 3481 } 3482 3483 /* 3484 * Enable PCI Wake On Lan capability 3485 */ 3486 static void 3487 emx_enable_wol(device_t dev) 3488 { 3489 uint16_t cap, status; 3490 uint8_t id; 3491 3492 /* First find the capabilities pointer*/ 3493 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3494 3495 /* Read the PM Capabilities */ 3496 id = pci_read_config(dev, cap, 1); 3497 if (id != PCIY_PMG) /* Something wrong */ 3498 return; 3499 3500 /* 3501 * OK, we have the power capabilities, 3502 * so now get the status register 3503 */ 3504 cap += PCIR_POWER_STATUS; 3505 status = pci_read_config(dev, cap, 2); 3506 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3507 pci_write_config(dev, cap, status, 2); 3508 } 3509 3510 static void 3511 emx_update_stats(struct emx_softc *sc) 3512 { 3513 struct ifnet *ifp = &sc->arpcom.ac_if; 3514 3515 if (sc->hw.phy.media_type == e1000_media_type_copper || 3516 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3517 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3518 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3519 } 3520 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3521 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3522 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3523 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3524 3525 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3526 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3527 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3528 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3529 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3530 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3531 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3532 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3533 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3534 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3535 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3536 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3537 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3538 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3539 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3540 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3541 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3542 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3543 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3544 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3545 3546 /* For the 64-bit byte counters the low dword must be read first. */ 3547 /* Both registers clear on the read of the high dword */ 3548 3549 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3550 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3551 3552 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3553 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3554 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3555 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3556 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3557 3558 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3559 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3560 3561 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3562 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3563 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3564 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3565 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3566 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3567 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3568 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3569 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3570 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3571 3572 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3573 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3574 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3575 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3576 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3577 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3578 3579 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3580 3581 /* Rx Errors */ 3582 IFNET_STAT_SET(ifp, ierrors, 3583 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3584 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3585 3586 /* Tx Errors */ 3587 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3588 } 3589 3590 static void 3591 emx_print_debug_info(struct emx_softc *sc) 3592 { 3593 device_t dev = sc->dev; 3594 uint8_t *hw_addr = sc->hw.hw_addr; 3595 int i; 3596 3597 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3598 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3599 E1000_READ_REG(&sc->hw, E1000_CTRL), 3600 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3601 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3602 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3603 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3604 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3605 sc->hw.fc.high_water, sc->hw.fc.low_water); 3606 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3607 E1000_READ_REG(&sc->hw, E1000_TIDV), 3608 E1000_READ_REG(&sc->hw, E1000_TADV)); 3609 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3610 E1000_READ_REG(&sc->hw, E1000_RDTR), 3611 E1000_READ_REG(&sc->hw, E1000_RADV)); 3612 3613 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3614 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3615 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3616 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3617 } 3618 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3619 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3620 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3621 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3622 } 3623 3624 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3625 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3626 sc->tx_data[i].num_tx_desc_avail); 3627 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3628 sc->tx_data[i].tso_segments); 3629 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3630 sc->tx_data[i].tso_ctx_reused); 3631 } 3632 } 3633 3634 static void 3635 emx_print_hw_stats(struct emx_softc *sc) 3636 { 3637 device_t dev = sc->dev; 3638 3639 device_printf(dev, "Excessive collisions = %lld\n", 3640 (long long)sc->stats.ecol); 3641 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3642 device_printf(dev, "Symbol errors = %lld\n", 3643 (long long)sc->stats.symerrs); 3644 #endif 3645 device_printf(dev, "Sequence errors = %lld\n", 3646 (long long)sc->stats.sec); 3647 device_printf(dev, "Defer count = %lld\n", 3648 (long long)sc->stats.dc); 3649 device_printf(dev, "Missed Packets = %lld\n", 3650 (long long)sc->stats.mpc); 3651 device_printf(dev, "Receive No Buffers = %lld\n", 3652 (long long)sc->stats.rnbc); 3653 /* RLEC is inaccurate on some hardware, calculate our own. */ 3654 device_printf(dev, "Receive Length Errors = %lld\n", 3655 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3656 device_printf(dev, "Receive errors = %lld\n", 3657 (long long)sc->stats.rxerrc); 3658 device_printf(dev, "Crc errors = %lld\n", 3659 (long long)sc->stats.crcerrs); 3660 device_printf(dev, "Alignment errors = %lld\n", 3661 (long long)sc->stats.algnerrc); 3662 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3663 (long long)sc->stats.cexterr); 3664 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3665 device_printf(dev, "XON Rcvd = %lld\n", 3666 (long long)sc->stats.xonrxc); 3667 device_printf(dev, "XON Xmtd = %lld\n", 3668 (long long)sc->stats.xontxc); 3669 device_printf(dev, "XOFF Rcvd = %lld\n", 3670 (long long)sc->stats.xoffrxc); 3671 device_printf(dev, "XOFF Xmtd = %lld\n", 3672 (long long)sc->stats.xofftxc); 3673 device_printf(dev, "Good Packets Rcvd = %lld\n", 3674 (long long)sc->stats.gprc); 3675 device_printf(dev, "Good Packets Xmtd = %lld\n", 3676 (long long)sc->stats.gptc); 3677 } 3678 3679 static void 3680 emx_print_nvm_info(struct emx_softc *sc) 3681 { 3682 uint16_t eeprom_data; 3683 int i, j, row = 0; 3684 3685 /* Its a bit crude, but it gets the job done */ 3686 kprintf("\nInterface EEPROM Dump:\n"); 3687 kprintf("Offset\n0x0000 "); 3688 for (i = 0, j = 0; i < 32; i++, j++) { 3689 if (j == 8) { /* Make the offset block */ 3690 j = 0; ++row; 3691 kprintf("\n0x00%x0 ",row); 3692 } 3693 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3694 kprintf("%04x ", eeprom_data); 3695 } 3696 kprintf("\n"); 3697 } 3698 3699 static int 3700 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3701 { 3702 struct emx_softc *sc; 3703 struct ifnet *ifp; 3704 int error, result; 3705 3706 result = -1; 3707 error = sysctl_handle_int(oidp, &result, 0, req); 3708 if (error || !req->newptr) 3709 return (error); 3710 3711 sc = (struct emx_softc *)arg1; 3712 ifp = &sc->arpcom.ac_if; 3713 3714 ifnet_serialize_all(ifp); 3715 3716 if (result == 1) 3717 emx_print_debug_info(sc); 3718 3719 /* 3720 * This value will cause a hex dump of the 3721 * first 32 16-bit words of the EEPROM to 3722 * the screen. 3723 */ 3724 if (result == 2) 3725 emx_print_nvm_info(sc); 3726 3727 ifnet_deserialize_all(ifp); 3728 3729 return (error); 3730 } 3731 3732 static int 3733 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3734 { 3735 int error, result; 3736 3737 result = -1; 3738 error = sysctl_handle_int(oidp, &result, 0, req); 3739 if (error || !req->newptr) 3740 return (error); 3741 3742 if (result == 1) { 3743 struct emx_softc *sc = (struct emx_softc *)arg1; 3744 struct ifnet *ifp = &sc->arpcom.ac_if; 3745 3746 ifnet_serialize_all(ifp); 3747 emx_print_hw_stats(sc); 3748 ifnet_deserialize_all(ifp); 3749 } 3750 return (error); 3751 } 3752 3753 static void 3754 emx_add_sysctl(struct emx_softc *sc) 3755 { 3756 struct sysctl_ctx_list *ctx; 3757 struct sysctl_oid *tree; 3758 char pkt_desc[32]; 3759 int i; 3760 3761 ctx = device_get_sysctl_ctx(sc->dev); 3762 tree = device_get_sysctl_tree(sc->dev); 3763 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3764 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3765 emx_sysctl_debug_info, "I", "Debug Information"); 3766 3767 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3768 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3769 emx_sysctl_stats, "I", "Statistics"); 3770 3771 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3772 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3773 "# of RX descs"); 3774 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3775 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3776 "# of TX descs"); 3777 3778 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3779 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3780 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3781 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3782 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3783 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3784 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3785 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3786 emx_sysctl_tx_wreg_nsegs, "I", 3787 "# segments sent before write to hardware register"); 3788 3789 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3790 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3791 "# of RX rings"); 3792 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3793 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3794 "# of TX rings"); 3795 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3796 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3797 "# of TX rings used"); 3798 3799 #ifdef IFPOLL_ENABLE 3800 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3801 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3802 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3803 "TX polling CPU map"); 3804 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3805 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3806 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3807 "RX polling CPU map"); 3808 #endif 3809 3810 #ifdef EMX_RSS_DEBUG 3811 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3812 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3813 0, "RSS debug level"); 3814 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3815 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3816 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3817 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3818 "RXed packets"); 3819 } 3820 #endif 3821 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3822 #ifdef EMX_TSS_DEBUG 3823 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3824 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3825 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3826 "TXed packets"); 3827 #endif 3828 3829 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i); 3830 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3831 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0, 3832 "# of pending TX mbufs"); 3833 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i); 3834 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3835 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc, 3836 "# of TX desc GC"); 3837 } 3838 } 3839 3840 static int 3841 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3842 { 3843 struct emx_softc *sc = (void *)arg1; 3844 struct ifnet *ifp = &sc->arpcom.ac_if; 3845 int error, throttle; 3846 3847 throttle = sc->int_throttle_ceil; 3848 error = sysctl_handle_int(oidp, &throttle, 0, req); 3849 if (error || req->newptr == NULL) 3850 return error; 3851 if (throttle < 0 || throttle > 1000000000 / 256) 3852 return EINVAL; 3853 3854 if (throttle) { 3855 /* 3856 * Set the interrupt throttling rate in 256ns increments, 3857 * recalculate sysctl value assignment to get exact frequency. 3858 */ 3859 throttle = 1000000000 / 256 / throttle; 3860 3861 /* Upper 16bits of ITR is reserved and should be zero */ 3862 if (throttle & 0xffff0000) 3863 return EINVAL; 3864 } 3865 3866 ifnet_serialize_all(ifp); 3867 3868 if (throttle) 3869 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3870 else 3871 sc->int_throttle_ceil = 0; 3872 3873 if (ifp->if_flags & IFF_RUNNING) 3874 emx_set_itr(sc, throttle); 3875 3876 ifnet_deserialize_all(ifp); 3877 3878 if (bootverbose) { 3879 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3880 sc->int_throttle_ceil); 3881 } 3882 return 0; 3883 } 3884 3885 static int 3886 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3887 { 3888 struct emx_softc *sc = (void *)arg1; 3889 struct ifnet *ifp = &sc->arpcom.ac_if; 3890 struct emx_txdata *tdata = &sc->tx_data[0]; 3891 int error, segs; 3892 3893 segs = tdata->tx_intr_nsegs; 3894 error = sysctl_handle_int(oidp, &segs, 0, req); 3895 if (error || req->newptr == NULL) 3896 return error; 3897 if (segs <= 0) 3898 return EINVAL; 3899 3900 ifnet_serialize_all(ifp); 3901 3902 /* 3903 * Don't allow tx_intr_nsegs to become: 3904 * o Less the oact_tx_desc 3905 * o Too large that no TX desc will cause TX interrupt to 3906 * be generated (OACTIVE will never recover) 3907 * o Too small that will cause tx_dd[] overflow 3908 */ 3909 if (segs < tdata->oact_tx_desc || 3910 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3911 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3912 error = EINVAL; 3913 } else { 3914 int i; 3915 3916 error = 0; 3917 for (i = 0; i < sc->tx_ring_cnt; ++i) 3918 sc->tx_data[i].tx_intr_nsegs = segs; 3919 } 3920 3921 ifnet_deserialize_all(ifp); 3922 3923 return error; 3924 } 3925 3926 static int 3927 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3928 { 3929 struct emx_softc *sc = (void *)arg1; 3930 struct ifnet *ifp = &sc->arpcom.ac_if; 3931 int error, nsegs, i; 3932 3933 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3934 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3935 if (error || req->newptr == NULL) 3936 return error; 3937 3938 ifnet_serialize_all(ifp); 3939 for (i = 0; i < sc->tx_ring_cnt; ++i) 3940 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3941 ifnet_deserialize_all(ifp); 3942 3943 return 0; 3944 } 3945 3946 static int 3947 emx_dma_alloc(struct emx_softc *sc) 3948 { 3949 int error, i; 3950 3951 /* 3952 * Create top level busdma tag 3953 */ 3954 error = bus_dma_tag_create(NULL, 1, 0, 3955 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3956 NULL, NULL, 3957 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3958 0, &sc->parent_dtag); 3959 if (error) { 3960 device_printf(sc->dev, "could not create top level DMA tag\n"); 3961 return error; 3962 } 3963 3964 /* 3965 * Allocate transmit descriptors ring and buffers 3966 */ 3967 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3968 error = emx_create_tx_ring(&sc->tx_data[i]); 3969 if (error) { 3970 device_printf(sc->dev, 3971 "Could not setup transmit structures\n"); 3972 return error; 3973 } 3974 } 3975 3976 /* 3977 * Allocate receive descriptors ring and buffers 3978 */ 3979 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3980 error = emx_create_rx_ring(&sc->rx_data[i]); 3981 if (error) { 3982 device_printf(sc->dev, 3983 "Could not setup receive structures\n"); 3984 return error; 3985 } 3986 } 3987 return 0; 3988 } 3989 3990 static void 3991 emx_dma_free(struct emx_softc *sc) 3992 { 3993 int i; 3994 3995 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3996 emx_destroy_tx_ring(&sc->tx_data[i], 3997 sc->tx_data[i].num_tx_desc); 3998 } 3999 4000 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4001 emx_destroy_rx_ring(&sc->rx_data[i], 4002 sc->rx_data[i].num_rx_desc); 4003 } 4004 4005 /* Free top level busdma tag */ 4006 if (sc->parent_dtag != NULL) 4007 bus_dma_tag_destroy(sc->parent_dtag); 4008 } 4009 4010 static void 4011 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4012 { 4013 struct emx_softc *sc = ifp->if_softc; 4014 4015 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4016 } 4017 4018 static void 4019 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4020 { 4021 struct emx_softc *sc = ifp->if_softc; 4022 4023 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4024 } 4025 4026 static int 4027 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4028 { 4029 struct emx_softc *sc = ifp->if_softc; 4030 4031 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4032 } 4033 4034 static void 4035 emx_serialize_skipmain(struct emx_softc *sc) 4036 { 4037 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4038 } 4039 4040 static void 4041 emx_deserialize_skipmain(struct emx_softc *sc) 4042 { 4043 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4044 } 4045 4046 #ifdef INVARIANTS 4047 4048 static void 4049 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4050 boolean_t serialized) 4051 { 4052 struct emx_softc *sc = ifp->if_softc; 4053 4054 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4055 slz, serialized); 4056 } 4057 4058 #endif /* INVARIANTS */ 4059 4060 #ifdef IFPOLL_ENABLE 4061 4062 static void 4063 emx_npoll_status(struct ifnet *ifp) 4064 { 4065 struct emx_softc *sc = ifp->if_softc; 4066 uint32_t reg_icr; 4067 4068 ASSERT_SERIALIZED(&sc->main_serialize); 4069 4070 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4071 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4072 callout_stop(&sc->timer); 4073 sc->hw.mac.get_link_status = 1; 4074 emx_update_link_status(sc); 4075 callout_reset(&sc->timer, hz, emx_timer, sc); 4076 } 4077 } 4078 4079 static void 4080 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4081 { 4082 struct emx_txdata *tdata = arg; 4083 4084 ASSERT_SERIALIZED(&tdata->tx_serialize); 4085 4086 emx_tx_intr(tdata); 4087 emx_try_txgc(tdata, 1); 4088 } 4089 4090 static void 4091 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4092 { 4093 struct emx_rxdata *rdata = arg; 4094 4095 ASSERT_SERIALIZED(&rdata->rx_serialize); 4096 4097 emx_rxeof(rdata, cycle); 4098 } 4099 4100 static void 4101 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4102 { 4103 struct emx_softc *sc = ifp->if_softc; 4104 int i, txr_cnt; 4105 4106 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4107 4108 if (info) { 4109 int cpu; 4110 4111 info->ifpi_status.status_func = emx_npoll_status; 4112 info->ifpi_status.serializer = &sc->main_serialize; 4113 4114 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4115 for (i = 0; i < txr_cnt; ++i) { 4116 struct emx_txdata *tdata = &sc->tx_data[i]; 4117 4118 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4119 KKASSERT(cpu < netisr_ncpus); 4120 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4121 info->ifpi_tx[cpu].arg = tdata; 4122 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4123 ifsq_set_cpuid(tdata->ifsq, cpu); 4124 } 4125 4126 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4127 struct emx_rxdata *rdata = &sc->rx_data[i]; 4128 4129 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4130 KKASSERT(cpu < netisr_ncpus); 4131 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4132 info->ifpi_rx[cpu].arg = rdata; 4133 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4134 } 4135 } else { 4136 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4137 struct emx_txdata *tdata = &sc->tx_data[i]; 4138 4139 ifsq_set_cpuid(tdata->ifsq, 4140 rman_get_cpuid(sc->intr_res)); 4141 } 4142 } 4143 if (ifp->if_flags & IFF_RUNNING) 4144 emx_init(sc); 4145 } 4146 4147 #endif /* IFPOLL_ENABLE */ 4148 4149 static void 4150 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4151 { 4152 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4153 if (sc->hw.mac.type == e1000_82574) { 4154 int i; 4155 4156 /* 4157 * When using MSIX interrupts we need to 4158 * throttle using the EITR register 4159 */ 4160 for (i = 0; i < 4; ++i) 4161 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4162 } 4163 } 4164 4165 /* 4166 * Disable the L0s, 82574L Errata #20 4167 */ 4168 static void 4169 emx_disable_aspm(struct emx_softc *sc) 4170 { 4171 uint16_t link_cap, link_ctrl, disable; 4172 uint8_t pcie_ptr, reg; 4173 device_t dev = sc->dev; 4174 4175 switch (sc->hw.mac.type) { 4176 case e1000_82571: 4177 case e1000_82572: 4178 case e1000_82573: 4179 /* 4180 * 82573 specification update 4181 * errata #8 disable L0s 4182 * errata #41 disable L1 4183 * 4184 * 82571/82572 specification update 4185 # errata #13 disable L1 4186 * errata #68 disable L0s 4187 */ 4188 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4189 break; 4190 4191 case e1000_82574: 4192 /* 4193 * 82574 specification update errata #20 4194 * 4195 * There is no need to disable L1 4196 */ 4197 disable = PCIEM_LNKCTL_ASPM_L0S; 4198 break; 4199 4200 default: 4201 return; 4202 } 4203 4204 pcie_ptr = pci_get_pciecap_ptr(dev); 4205 if (pcie_ptr == 0) 4206 return; 4207 4208 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4209 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4210 return; 4211 4212 if (bootverbose) 4213 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4214 4215 reg = pcie_ptr + PCIER_LINKCTRL; 4216 link_ctrl = pci_read_config(dev, reg, 2); 4217 link_ctrl &= ~disable; 4218 pci_write_config(dev, reg, link_ctrl, 2); 4219 } 4220 4221 static int 4222 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4223 { 4224 int iphlen, hoff, thoff, ex = 0; 4225 struct mbuf *m; 4226 struct ip *ip; 4227 4228 m = *mp; 4229 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4230 4231 iphlen = m->m_pkthdr.csum_iphlen; 4232 thoff = m->m_pkthdr.csum_thlen; 4233 hoff = m->m_pkthdr.csum_lhlen; 4234 4235 KASSERT(iphlen > 0, ("invalid ip hlen")); 4236 KASSERT(thoff > 0, ("invalid tcp hlen")); 4237 KASSERT(hoff > 0, ("invalid ether hlen")); 4238 4239 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4240 ex = 4; 4241 4242 if (m->m_len < hoff + iphlen + thoff + ex) { 4243 m = m_pullup(m, hoff + iphlen + thoff + ex); 4244 if (m == NULL) { 4245 *mp = NULL; 4246 return ENOBUFS; 4247 } 4248 *mp = m; 4249 } 4250 ip = mtodoff(m, struct ip *, hoff); 4251 ip->ip_len = 0; 4252 4253 return 0; 4254 } 4255 4256 static int 4257 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4258 uint32_t *txd_upper, uint32_t *txd_lower) 4259 { 4260 struct e1000_context_desc *TXD; 4261 int hoff, iphlen, thoff, hlen; 4262 int mss, pktlen, curr_txd; 4263 4264 #ifdef EMX_TSO_DEBUG 4265 tdata->tso_segments++; 4266 #endif 4267 4268 iphlen = mp->m_pkthdr.csum_iphlen; 4269 thoff = mp->m_pkthdr.csum_thlen; 4270 hoff = mp->m_pkthdr.csum_lhlen; 4271 mss = mp->m_pkthdr.tso_segsz; 4272 pktlen = mp->m_pkthdr.len; 4273 4274 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4275 tdata->csum_flags == CSUM_TSO && 4276 tdata->csum_iphlen == iphlen && 4277 tdata->csum_lhlen == hoff && 4278 tdata->csum_thlen == thoff && 4279 tdata->csum_mss == mss && 4280 tdata->csum_pktlen == pktlen) { 4281 *txd_upper = tdata->csum_txd_upper; 4282 *txd_lower = tdata->csum_txd_lower; 4283 #ifdef EMX_TSO_DEBUG 4284 tdata->tso_ctx_reused++; 4285 #endif 4286 return 0; 4287 } 4288 hlen = hoff + iphlen + thoff; 4289 4290 /* 4291 * Setup a new TSO context. 4292 */ 4293 4294 curr_txd = tdata->next_avail_tx_desc; 4295 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4296 4297 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4298 E1000_TXD_DTYP_D | /* Data descr type */ 4299 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4300 4301 /* IP and/or TCP header checksum calculation and insertion. */ 4302 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4303 4304 /* 4305 * Start offset for header checksum calculation. 4306 * End offset for header checksum calculation. 4307 * Offset of place put the checksum. 4308 */ 4309 TXD->lower_setup.ip_fields.ipcss = hoff; 4310 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4311 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4312 4313 /* 4314 * Start offset for payload checksum calculation. 4315 * End offset for payload checksum calculation. 4316 * Offset of place to put the checksum. 4317 */ 4318 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4319 TXD->upper_setup.tcp_fields.tucse = 0; 4320 TXD->upper_setup.tcp_fields.tucso = 4321 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4322 4323 /* 4324 * Payload size per packet w/o any headers. 4325 * Length of all headers up to payload. 4326 */ 4327 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4328 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4329 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4330 E1000_TXD_CMD_DEXT | /* Extended descr */ 4331 E1000_TXD_CMD_TSE | /* TSE context */ 4332 E1000_TXD_CMD_IP | /* Do IP csum */ 4333 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4334 (pktlen - hlen)); /* Total len */ 4335 4336 /* Save the information for this TSO context */ 4337 tdata->csum_flags = CSUM_TSO; 4338 tdata->csum_lhlen = hoff; 4339 tdata->csum_iphlen = iphlen; 4340 tdata->csum_thlen = thoff; 4341 tdata->csum_mss = mss; 4342 tdata->csum_pktlen = pktlen; 4343 tdata->csum_txd_upper = *txd_upper; 4344 tdata->csum_txd_lower = *txd_lower; 4345 4346 if (++curr_txd == tdata->num_tx_desc) 4347 curr_txd = 0; 4348 4349 KKASSERT(tdata->num_tx_desc_avail > 0); 4350 tdata->num_tx_desc_avail--; 4351 4352 tdata->next_avail_tx_desc = curr_txd; 4353 return 1; 4354 } 4355 4356 static int 4357 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4358 { 4359 if (polling) 4360 return sc->tx_ring_cnt; 4361 else 4362 return 1; 4363 } 4364