1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/if_ringmap.h> 95 #include <net/toeplitz.h> 96 #include <net/toeplitz2.h> 97 #include <net/vlan/if_vlan_var.h> 98 #include <net/vlan/if_vlan_ether.h> 99 #include <net/if_poll.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/ig_hal/e1000_dragonfly.h> 113 #include <dev/netif/emx/if_emx.h> 114 115 #define DEBUG_HW 0 116 117 #ifdef EMX_RSS_DEBUG 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 119 do { \ 120 if (sc->rss_debug >= lvl) \ 121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 122 } while (0) 123 #else /* !EMX_RSS_DEBUG */ 124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 125 #endif /* EMX_RSS_DEBUG */ 126 127 #define EMX_NAME "Intel(R) PRO/1000 " 128 129 #define EMX_DEVICE(id) \ 130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 131 #define EMX_DEVICE_NULL { 0, 0, NULL } 132 133 static const struct emx_device { 134 uint16_t vid; 135 uint16_t did; 136 const char *desc; 137 } emx_devices[] = { 138 EMX_DEVICE(82571EB_COPPER), 139 EMX_DEVICE(82571EB_FIBER), 140 EMX_DEVICE(82571EB_SERDES), 141 EMX_DEVICE(82571EB_SERDES_DUAL), 142 EMX_DEVICE(82571EB_SERDES_QUAD), 143 EMX_DEVICE(82571EB_QUAD_COPPER), 144 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 145 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 146 EMX_DEVICE(82571EB_QUAD_FIBER), 147 EMX_DEVICE(82571PT_QUAD_COPPER), 148 149 EMX_DEVICE(82572EI_COPPER), 150 EMX_DEVICE(82572EI_FIBER), 151 EMX_DEVICE(82572EI_SERDES), 152 EMX_DEVICE(82572EI), 153 154 EMX_DEVICE(82573E), 155 EMX_DEVICE(82573E_IAMT), 156 EMX_DEVICE(82573L), 157 158 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 159 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 160 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 161 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 162 163 EMX_DEVICE(82574L), 164 EMX_DEVICE(82574LA), 165 166 EMX_DEVICE(PCH_LPT_I217_LM), 167 EMX_DEVICE(PCH_LPT_I217_V), 168 EMX_DEVICE(PCH_LPTLP_I218_LM), 169 EMX_DEVICE(PCH_LPTLP_I218_V), 170 EMX_DEVICE(PCH_I218_LM2), 171 EMX_DEVICE(PCH_I218_V2), 172 EMX_DEVICE(PCH_I218_LM3), 173 EMX_DEVICE(PCH_I218_V3), 174 EMX_DEVICE(PCH_SPT_I219_LM), 175 EMX_DEVICE(PCH_SPT_I219_V), 176 EMX_DEVICE(PCH_SPT_I219_LM2), 177 EMX_DEVICE(PCH_SPT_I219_V2), 178 EMX_DEVICE(PCH_LBG_I219_LM3), 179 EMX_DEVICE(PCH_SPT_I219_LM4), 180 EMX_DEVICE(PCH_SPT_I219_V4), 181 EMX_DEVICE(PCH_SPT_I219_LM5), 182 EMX_DEVICE(PCH_SPT_I219_V5), 183 EMX_DEVICE(PCH_CNP_I219_LM6), 184 EMX_DEVICE(PCH_CNP_I219_V6), 185 EMX_DEVICE(PCH_CNP_I219_LM7), 186 EMX_DEVICE(PCH_CNP_I219_V7), 187 188 /* required last entry */ 189 EMX_DEVICE_NULL 190 }; 191 192 static int emx_probe(device_t); 193 static int emx_attach(device_t); 194 static int emx_detach(device_t); 195 static int emx_shutdown(device_t); 196 static int emx_suspend(device_t); 197 static int emx_resume(device_t); 198 199 static void emx_init(void *); 200 static void emx_stop(struct emx_softc *); 201 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 202 static void emx_start(struct ifnet *, struct ifaltq_subque *); 203 #ifdef IFPOLL_ENABLE 204 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 205 static void emx_npoll_status(struct ifnet *); 206 static void emx_npoll_tx(struct ifnet *, void *, int); 207 static void emx_npoll_rx(struct ifnet *, void *, int); 208 #endif 209 static void emx_watchdog(struct ifaltq_subque *); 210 static void emx_media_status(struct ifnet *, struct ifmediareq *); 211 static int emx_media_change(struct ifnet *); 212 static void emx_timer(void *); 213 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 214 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 215 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 216 #ifdef INVARIANTS 217 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 218 boolean_t); 219 #endif 220 221 static void emx_intr(void *); 222 static void emx_intr_mask(void *); 223 static void emx_intr_body(struct emx_softc *, boolean_t); 224 static void emx_rxeof(struct emx_rxdata *, int); 225 static void emx_txeof(struct emx_txdata *); 226 static void emx_tx_collect(struct emx_txdata *, boolean_t); 227 static void emx_txgc_timer(void *); 228 static void emx_tx_purge(struct emx_softc *); 229 static void emx_enable_intr(struct emx_softc *); 230 static void emx_disable_intr(struct emx_softc *); 231 232 static int emx_dma_alloc(struct emx_softc *); 233 static void emx_dma_free(struct emx_softc *); 234 static void emx_init_tx_ring(struct emx_txdata *); 235 static int emx_init_rx_ring(struct emx_rxdata *); 236 static void emx_free_tx_ring(struct emx_txdata *); 237 static void emx_free_rx_ring(struct emx_rxdata *); 238 static int emx_create_tx_ring(struct emx_txdata *); 239 static int emx_create_rx_ring(struct emx_rxdata *); 240 static void emx_destroy_tx_ring(struct emx_txdata *, int); 241 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 242 static int emx_newbuf(struct emx_rxdata *, int, int); 243 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 244 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 245 uint32_t *, uint32_t *); 246 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 247 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 248 uint32_t *, uint32_t *); 249 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 250 251 static int emx_is_valid_eaddr(const uint8_t *); 252 static int emx_reset(struct emx_softc *); 253 static void emx_setup_ifp(struct emx_softc *); 254 static void emx_init_tx_unit(struct emx_softc *); 255 static void emx_init_rx_unit(struct emx_softc *); 256 static void emx_update_stats(struct emx_softc *); 257 static void emx_set_promisc(struct emx_softc *); 258 static void emx_disable_promisc(struct emx_softc *); 259 static void emx_set_multi(struct emx_softc *); 260 static void emx_update_link_status(struct emx_softc *); 261 static void emx_smartspeed(struct emx_softc *); 262 static void emx_set_itr(struct emx_softc *, uint32_t); 263 static void emx_disable_aspm(struct emx_softc *); 264 static void emx_flush_tx_ring(struct emx_softc *); 265 static void emx_flush_rx_ring(struct emx_softc *); 266 static void emx_flush_txrx_ring(struct emx_softc *); 267 268 static void emx_print_debug_info(struct emx_softc *); 269 static void emx_print_nvm_info(struct emx_softc *); 270 static void emx_print_hw_stats(struct emx_softc *); 271 272 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 273 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 274 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 275 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 276 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 277 static void emx_add_sysctl(struct emx_softc *); 278 279 static void emx_serialize_skipmain(struct emx_softc *); 280 static void emx_deserialize_skipmain(struct emx_softc *); 281 282 /* Management and WOL Support */ 283 static void emx_get_mgmt(struct emx_softc *); 284 static void emx_rel_mgmt(struct emx_softc *); 285 static void emx_get_hw_control(struct emx_softc *); 286 static void emx_rel_hw_control(struct emx_softc *); 287 static void emx_enable_wol(device_t); 288 289 static device_method_t emx_methods[] = { 290 /* Device interface */ 291 DEVMETHOD(device_probe, emx_probe), 292 DEVMETHOD(device_attach, emx_attach), 293 DEVMETHOD(device_detach, emx_detach), 294 DEVMETHOD(device_shutdown, emx_shutdown), 295 DEVMETHOD(device_suspend, emx_suspend), 296 DEVMETHOD(device_resume, emx_resume), 297 DEVMETHOD_END 298 }; 299 300 static driver_t emx_driver = { 301 "emx", 302 emx_methods, 303 sizeof(struct emx_softc), 304 }; 305 306 static devclass_t emx_devclass; 307 308 DECLARE_DUMMY_MODULE(if_emx); 309 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 310 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 311 312 /* 313 * Tunables 314 */ 315 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 316 static int emx_rxd = EMX_DEFAULT_RXD; 317 static int emx_txd = EMX_DEFAULT_TXD; 318 static int emx_smart_pwr_down = 0; 319 static int emx_rxr = 0; 320 static int emx_txr = 1; 321 322 /* Controls whether promiscuous also shows bad packets */ 323 static int emx_debug_sbp = 0; 324 325 static int emx_82573_workaround = 1; 326 static int emx_msi_enable = 1; 327 328 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE; 329 330 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 331 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 332 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 333 TUNABLE_INT("hw.emx.txd", &emx_txd); 334 TUNABLE_INT("hw.emx.txr", &emx_txr); 335 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 336 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 337 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 338 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 339 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 340 341 /* Global used in WOL setup with multiport cards */ 342 static int emx_global_quad_port_a = 0; 343 344 /* Set this to one to display debug statistics */ 345 static int emx_display_debug_stats = 0; 346 347 #if !defined(KTR_IF_EMX) 348 #define KTR_IF_EMX KTR_ALL 349 #endif 350 KTR_INFO_MASTER(if_emx); 351 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 352 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 353 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 354 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 355 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 356 #define logif(name) KTR_LOG(if_emx_ ## name) 357 358 static __inline void 359 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 360 { 361 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 362 /* DD bit must be cleared */ 363 rxd->rxd_staterr = 0; 364 } 365 366 static __inline void 367 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer) 368 { 369 370 KKASSERT(tx_buffer->m_head != NULL); 371 KKASSERT(tdata->tx_nmbuf > 0); 372 tdata->tx_nmbuf--; 373 374 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 375 m_freem(tx_buffer->m_head); 376 tx_buffer->m_head = NULL; 377 } 378 379 static __inline void 380 emx_tx_intr(struct emx_txdata *tdata) 381 { 382 383 emx_txeof(tdata); 384 if (!ifsq_is_empty(tdata->ifsq)) 385 ifsq_devstart(tdata->ifsq); 386 } 387 388 static __inline void 389 emx_try_txgc(struct emx_txdata *tdata, int16_t dec) 390 { 391 392 if (tdata->tx_running > 0) { 393 tdata->tx_running -= dec; 394 if (tdata->tx_running <= 0 && tdata->tx_nmbuf && 395 tdata->num_tx_desc_avail < tdata->num_tx_desc && 396 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs > 397 tdata->num_tx_desc) 398 emx_tx_collect(tdata, TRUE); 399 } 400 } 401 402 static void 403 emx_txgc_timer(void *xtdata) 404 { 405 struct emx_txdata *tdata = xtdata; 406 struct ifnet *ifp = &tdata->sc->arpcom.ac_if; 407 408 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 409 (IFF_RUNNING | IFF_UP)) 410 return; 411 412 if (!lwkt_serialize_try(&tdata->tx_serialize)) 413 goto done; 414 415 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) != 416 (IFF_RUNNING | IFF_UP)) { 417 lwkt_serialize_exit(&tdata->tx_serialize); 418 return; 419 } 420 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC); 421 422 lwkt_serialize_exit(&tdata->tx_serialize); 423 done: 424 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata); 425 } 426 427 static __inline void 428 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 429 { 430 /* Ignore Checksum bit is set */ 431 if (staterr & E1000_RXD_STAT_IXSM) 432 return; 433 434 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 435 E1000_RXD_STAT_IPCS) 436 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 437 438 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 439 E1000_RXD_STAT_TCPCS) { 440 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 441 CSUM_PSEUDO_HDR | 442 CSUM_FRAG_NOT_CHECKED; 443 mp->m_pkthdr.csum_data = htons(0xffff); 444 } 445 } 446 447 static __inline struct pktinfo * 448 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 449 uint32_t mrq, uint32_t hash, uint32_t staterr) 450 { 451 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 452 case EMX_RXDMRQ_IPV4_TCP: 453 pi->pi_netisr = NETISR_IP; 454 pi->pi_flags = 0; 455 pi->pi_l3proto = IPPROTO_TCP; 456 break; 457 458 case EMX_RXDMRQ_IPV6_TCP: 459 pi->pi_netisr = NETISR_IPV6; 460 pi->pi_flags = 0; 461 pi->pi_l3proto = IPPROTO_TCP; 462 break; 463 464 case EMX_RXDMRQ_IPV4: 465 if (staterr & E1000_RXD_STAT_IXSM) 466 return NULL; 467 468 if ((staterr & 469 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 470 E1000_RXD_STAT_TCPCS) { 471 pi->pi_netisr = NETISR_IP; 472 pi->pi_flags = 0; 473 pi->pi_l3proto = IPPROTO_UDP; 474 break; 475 } 476 /* FALL THROUGH */ 477 default: 478 return NULL; 479 } 480 481 m_sethash(m, toeplitz_hash(hash)); 482 return pi; 483 } 484 485 static int 486 emx_probe(device_t dev) 487 { 488 const struct emx_device *d; 489 uint16_t vid, did; 490 491 vid = pci_get_vendor(dev); 492 did = pci_get_device(dev); 493 494 for (d = emx_devices; d->desc != NULL; ++d) { 495 if (vid == d->vid && did == d->did) { 496 device_set_desc(dev, d->desc); 497 device_set_async_attach(dev, TRUE); 498 return 0; 499 } 500 } 501 return ENXIO; 502 } 503 504 static int 505 emx_attach(device_t dev) 506 { 507 struct emx_softc *sc = device_get_softc(dev); 508 int error = 0, i, throttle, msi_enable; 509 int tx_ring_max, ring_cnt; 510 u_int intr_flags; 511 uint16_t eeprom_data, device_id, apme_mask; 512 driver_intr_t *intr_func; 513 char flowctrl[IFM_ETH_FC_STRLEN]; 514 515 /* 516 * Setup RX rings 517 */ 518 for (i = 0; i < EMX_NRX_RING; ++i) { 519 sc->rx_data[i].sc = sc; 520 sc->rx_data[i].idx = i; 521 } 522 523 /* 524 * Setup TX ring 525 */ 526 for (i = 0; i < EMX_NTX_RING; ++i) { 527 sc->tx_data[i].sc = sc; 528 sc->tx_data[i].idx = i; 529 callout_init_mp(&sc->tx_data[i].tx_gc_timer); 530 } 531 532 /* 533 * Initialize serializers 534 */ 535 lwkt_serialize_init(&sc->main_serialize); 536 for (i = 0; i < EMX_NTX_RING; ++i) 537 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 538 for (i = 0; i < EMX_NRX_RING; ++i) 539 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 540 541 /* 542 * Initialize serializer array 543 */ 544 i = 0; 545 546 KKASSERT(i < EMX_NSERIALIZE); 547 sc->serializes[i++] = &sc->main_serialize; 548 549 KKASSERT(i < EMX_NSERIALIZE); 550 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 551 KKASSERT(i < EMX_NSERIALIZE); 552 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 553 554 KKASSERT(i < EMX_NSERIALIZE); 555 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 556 KKASSERT(i < EMX_NSERIALIZE); 557 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 558 559 KKASSERT(i == EMX_NSERIALIZE); 560 561 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 562 emx_media_change, emx_media_status); 563 callout_init_mp(&sc->timer); 564 565 sc->dev = sc->osdep.dev = dev; 566 567 /* 568 * Determine hardware and mac type 569 */ 570 sc->hw.vendor_id = pci_get_vendor(dev); 571 sc->hw.device_id = pci_get_device(dev); 572 sc->hw.revision_id = pci_get_revid(dev); 573 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 574 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 575 576 if (e1000_set_mac_type(&sc->hw)) 577 return ENXIO; 578 579 /* Enable bus mastering */ 580 pci_enable_busmaster(dev); 581 582 /* 583 * Allocate IO memory 584 */ 585 sc->memory_rid = EMX_BAR_MEM; 586 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 587 &sc->memory_rid, RF_ACTIVE); 588 if (sc->memory == NULL) { 589 device_printf(dev, "Unable to allocate bus resource: memory\n"); 590 error = ENXIO; 591 goto fail; 592 } 593 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 594 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 595 596 /* XXX This is quite goofy, it is not actually used */ 597 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 598 599 /* 600 * Don't enable MSI-X on 82574, see: 601 * 82574 specification update errata #15 602 * 603 * Don't enable MSI on 82571/82572, see: 604 * 82571/82572 specification update errata #63 605 */ 606 msi_enable = emx_msi_enable; 607 if (msi_enable && 608 (sc->hw.mac.type == e1000_82571 || 609 sc->hw.mac.type == e1000_82572)) 610 msi_enable = 0; 611 again: 612 /* 613 * Allocate interrupt 614 */ 615 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 616 &sc->intr_rid, &intr_flags); 617 618 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 619 int unshared; 620 621 unshared = device_getenv_int(dev, "irq.unshared", 0); 622 if (!unshared) { 623 sc->flags |= EMX_FLAG_SHARED_INTR; 624 if (bootverbose) 625 device_printf(dev, "IRQ shared\n"); 626 } else { 627 intr_flags &= ~RF_SHAREABLE; 628 if (bootverbose) 629 device_printf(dev, "IRQ unshared\n"); 630 } 631 } 632 633 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 634 intr_flags); 635 if (sc->intr_res == NULL) { 636 device_printf(dev, "Unable to allocate bus resource: %s\n", 637 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 638 if (!msi_enable) { 639 /* Retry with MSI. */ 640 msi_enable = 1; 641 sc->flags &= ~EMX_FLAG_SHARED_INTR; 642 goto again; 643 } 644 error = ENXIO; 645 goto fail; 646 } 647 648 /* Save PCI command register for Shared Code */ 649 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 650 sc->hw.back = &sc->osdep; 651 652 /* 653 * For I217/I218, we need to map the flash memory and this 654 * must happen after the MAC is identified. 655 */ 656 if (sc->hw.mac.type == e1000_pch_lpt) { 657 sc->flash_rid = EMX_BAR_FLASH; 658 659 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 660 &sc->flash_rid, RF_ACTIVE); 661 if (sc->flash == NULL) { 662 device_printf(dev, "Mapping of Flash failed\n"); 663 error = ENXIO; 664 goto fail; 665 } 666 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 667 sc->osdep.flash_bus_space_handle = 668 rman_get_bushandle(sc->flash); 669 670 /* 671 * This is used in the shared code 672 * XXX this goof is actually not used. 673 */ 674 sc->hw.flash_address = (uint8_t *)sc->flash; 675 } else if (sc->hw.mac.type >= e1000_pch_spt) { 676 /* 677 * In the new SPT device flash is not a seperate BAR, 678 * rather it is also in BAR0, so use the same tag and 679 * an offset handle for the FLASH read/write macros 680 * in the shared code. 681 */ 682 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag; 683 sc->osdep.flash_bus_space_handle = 684 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; 685 } 686 687 /* Do Shared Code initialization */ 688 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 689 device_printf(dev, "Setup of Shared code failed\n"); 690 error = ENXIO; 691 goto fail; 692 } 693 e1000_get_bus_info(&sc->hw); 694 695 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 696 sc->hw.phy.autoneg_wait_to_complete = FALSE; 697 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 698 699 /* 700 * Interrupt throttle rate 701 */ 702 throttle = device_getenv_int(dev, "int_throttle_ceil", 703 emx_int_throttle_ceil); 704 if (throttle == 0) { 705 sc->int_throttle_ceil = 0; 706 } else { 707 if (throttle < 0) 708 throttle = EMX_DEFAULT_ITR; 709 710 /* Recalculate the tunable value to get the exact frequency. */ 711 throttle = 1000000000 / 256 / throttle; 712 713 /* Upper 16bits of ITR is reserved and should be zero */ 714 if (throttle & 0xffff0000) 715 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 716 717 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 718 } 719 720 e1000_init_script_state_82541(&sc->hw, TRUE); 721 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 722 723 /* Copper options */ 724 if (sc->hw.phy.media_type == e1000_media_type_copper) { 725 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 726 sc->hw.phy.disable_polarity_correction = FALSE; 727 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 728 } 729 730 /* Set the frame limits assuming standard ethernet sized frames. */ 731 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 732 733 /* This controls when hardware reports transmit completion status. */ 734 sc->hw.mac.report_tx_early = 1; 735 736 /* 737 * Calculate # of RX/TX rings 738 */ 739 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 740 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING); 741 742 tx_ring_max = 1; 743 if (sc->hw.mac.type == e1000_82571 || 744 sc->hw.mac.type == e1000_82572 || 745 sc->hw.mac.type == e1000_80003es2lan || 746 sc->hw.mac.type == e1000_pch_lpt || 747 sc->hw.mac.type == e1000_pch_spt || 748 sc->hw.mac.type == e1000_pch_cnp || 749 sc->hw.mac.type == e1000_82574) 750 tx_ring_max = EMX_NTX_RING; 751 ring_cnt = device_getenv_int(dev, "txr", emx_txr); 752 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max); 753 754 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap); 755 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap); 756 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap); 757 758 /* Allocate RX/TX rings' busdma(9) stuffs */ 759 error = emx_dma_alloc(sc); 760 if (error) 761 goto fail; 762 763 /* Allocate multicast array memory. */ 764 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 765 M_DEVBUF, M_WAITOK); 766 767 /* Indicate SOL/IDER usage */ 768 if (e1000_check_reset_block(&sc->hw)) { 769 device_printf(dev, 770 "PHY reset is blocked due to SOL/IDER session.\n"); 771 } 772 773 /* Disable EEE on I217/I218 */ 774 sc->hw.dev_spec.ich8lan.eee_disable = 1; 775 776 /* 777 * Start from a known state, this is important in reading the 778 * nvm and mac from that. 779 */ 780 e1000_reset_hw(&sc->hw); 781 782 /* Make sure we have a good EEPROM before we read from it */ 783 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 784 /* 785 * Some PCI-E parts fail the first check due to 786 * the link being in sleep state, call it again, 787 * if it fails a second time its a real issue. 788 */ 789 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 790 device_printf(dev, 791 "The EEPROM Checksum Is Not Valid\n"); 792 error = EIO; 793 goto fail; 794 } 795 } 796 797 /* Copy the permanent MAC address out of the EEPROM */ 798 if (e1000_read_mac_addr(&sc->hw) < 0) { 799 device_printf(dev, "EEPROM read error while reading MAC" 800 " address\n"); 801 error = EIO; 802 goto fail; 803 } 804 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 805 device_printf(dev, "Invalid MAC address\n"); 806 error = EIO; 807 goto fail; 808 } 809 810 /* Disable ULP support */ 811 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 812 813 /* Determine if we have to control management hardware */ 814 if (e1000_enable_mng_pass_thru(&sc->hw)) 815 sc->flags |= EMX_FLAG_HAS_MGMT; 816 817 /* 818 * Setup Wake-on-Lan 819 */ 820 apme_mask = EMX_EEPROM_APME; 821 eeprom_data = 0; 822 switch (sc->hw.mac.type) { 823 case e1000_82573: 824 sc->flags |= EMX_FLAG_HAS_AMT; 825 /* FALL THROUGH */ 826 827 case e1000_82571: 828 case e1000_82572: 829 case e1000_80003es2lan: 830 if (sc->hw.bus.func == 1) { 831 e1000_read_nvm(&sc->hw, 832 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 833 } else { 834 e1000_read_nvm(&sc->hw, 835 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 836 } 837 break; 838 839 case e1000_pch_lpt: 840 case e1000_pch_spt: 841 case e1000_pch_cnp: 842 apme_mask = E1000_WUC_APME; 843 sc->flags |= EMX_FLAG_HAS_AMT; 844 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC); 845 break; 846 847 default: 848 e1000_read_nvm(&sc->hw, 849 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 850 break; 851 } 852 if (eeprom_data & apme_mask) 853 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 854 855 /* 856 * We have the eeprom settings, now apply the special cases 857 * where the eeprom may be wrong or the board won't support 858 * wake on lan on a particular port 859 */ 860 device_id = pci_get_device(dev); 861 switch (device_id) { 862 case E1000_DEV_ID_82571EB_FIBER: 863 /* 864 * Wake events only supported on port A for dual fiber 865 * regardless of eeprom setting 866 */ 867 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 868 E1000_STATUS_FUNC_1) 869 sc->wol = 0; 870 break; 871 872 case E1000_DEV_ID_82571EB_QUAD_COPPER: 873 case E1000_DEV_ID_82571EB_QUAD_FIBER: 874 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 875 /* if quad port sc, disable WoL on all but port A */ 876 if (emx_global_quad_port_a != 0) 877 sc->wol = 0; 878 /* Reset for multiple quad port adapters */ 879 if (++emx_global_quad_port_a == 4) 880 emx_global_quad_port_a = 0; 881 break; 882 } 883 884 /* XXX disable wol */ 885 sc->wol = 0; 886 887 /* Initialized #of TX rings to use. */ 888 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 889 890 /* Setup flow control. */ 891 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 892 emx_flowctrl); 893 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 894 895 /* Setup OS specific network interface */ 896 emx_setup_ifp(sc); 897 898 /* Add sysctl tree, must after em_setup_ifp() */ 899 emx_add_sysctl(sc); 900 901 /* Reset the hardware */ 902 error = emx_reset(sc); 903 if (error) { 904 /* 905 * Some 82573 parts fail the first reset, call it again, 906 * if it fails a second time its a real issue. 907 */ 908 error = emx_reset(sc); 909 if (error) { 910 device_printf(dev, "Unable to reset the hardware\n"); 911 ether_ifdetach(&sc->arpcom.ac_if); 912 goto fail; 913 } 914 } 915 916 /* Initialize statistics */ 917 emx_update_stats(sc); 918 919 sc->hw.mac.get_link_status = 1; 920 emx_update_link_status(sc); 921 922 /* Non-AMT based hardware can now take control from firmware */ 923 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 924 EMX_FLAG_HAS_MGMT) 925 emx_get_hw_control(sc); 926 927 /* 928 * Missing Interrupt Following ICR read: 929 * 930 * 82571/82572 specification update errata #76 931 * 82573 specification update errata #31 932 * 82574 specification update errata #12 933 */ 934 intr_func = emx_intr; 935 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 936 (sc->hw.mac.type == e1000_82571 || 937 sc->hw.mac.type == e1000_82572 || 938 sc->hw.mac.type == e1000_82573 || 939 sc->hw.mac.type == e1000_82574)) 940 intr_func = emx_intr_mask; 941 942 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 943 &sc->intr_tag, &sc->main_serialize); 944 if (error) { 945 device_printf(dev, "Failed to register interrupt handler"); 946 ether_ifdetach(&sc->arpcom.ac_if); 947 goto fail; 948 } 949 return (0); 950 fail: 951 emx_detach(dev); 952 return (error); 953 } 954 955 static int 956 emx_detach(device_t dev) 957 { 958 struct emx_softc *sc = device_get_softc(dev); 959 960 if (device_is_attached(dev)) { 961 struct ifnet *ifp = &sc->arpcom.ac_if; 962 963 ifnet_serialize_all(ifp); 964 965 emx_stop(sc); 966 967 e1000_phy_hw_reset(&sc->hw); 968 969 emx_rel_mgmt(sc); 970 emx_rel_hw_control(sc); 971 972 if (sc->wol) { 973 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 974 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 975 emx_enable_wol(dev); 976 } 977 978 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 979 980 ifnet_deserialize_all(ifp); 981 982 ether_ifdetach(ifp); 983 } else if (sc->memory != NULL) { 984 emx_rel_hw_control(sc); 985 } 986 987 ifmedia_removeall(&sc->media); 988 bus_generic_detach(dev); 989 990 if (sc->intr_res != NULL) { 991 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 992 sc->intr_res); 993 } 994 995 if (sc->intr_type == PCI_INTR_TYPE_MSI) 996 pci_release_msi(dev); 997 998 if (sc->memory != NULL) { 999 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 1000 sc->memory); 1001 } 1002 1003 if (sc->flash != NULL) { 1004 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 1005 sc->flash); 1006 } 1007 1008 emx_dma_free(sc); 1009 1010 if (sc->mta != NULL) 1011 kfree(sc->mta, M_DEVBUF); 1012 1013 if (sc->rx_rmap != NULL) 1014 if_ringmap_free(sc->rx_rmap); 1015 if (sc->tx_rmap != NULL) 1016 if_ringmap_free(sc->tx_rmap); 1017 1018 return (0); 1019 } 1020 1021 static int 1022 emx_shutdown(device_t dev) 1023 { 1024 return emx_suspend(dev); 1025 } 1026 1027 static int 1028 emx_suspend(device_t dev) 1029 { 1030 struct emx_softc *sc = device_get_softc(dev); 1031 struct ifnet *ifp = &sc->arpcom.ac_if; 1032 1033 ifnet_serialize_all(ifp); 1034 1035 emx_stop(sc); 1036 1037 emx_rel_mgmt(sc); 1038 emx_rel_hw_control(sc); 1039 1040 if (sc->wol) { 1041 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 1042 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 1043 emx_enable_wol(dev); 1044 } 1045 1046 ifnet_deserialize_all(ifp); 1047 1048 return bus_generic_suspend(dev); 1049 } 1050 1051 static int 1052 emx_resume(device_t dev) 1053 { 1054 struct emx_softc *sc = device_get_softc(dev); 1055 struct ifnet *ifp = &sc->arpcom.ac_if; 1056 int i; 1057 1058 ifnet_serialize_all(ifp); 1059 1060 emx_init(sc); 1061 emx_get_mgmt(sc); 1062 for (i = 0; i < sc->tx_ring_inuse; ++i) 1063 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1064 1065 ifnet_deserialize_all(ifp); 1066 1067 return bus_generic_resume(dev); 1068 } 1069 1070 static void 1071 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1072 { 1073 struct emx_softc *sc = ifp->if_softc; 1074 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1075 struct mbuf *m_head; 1076 int idx = -1, nsegs = 0; 1077 1078 KKASSERT(tdata->ifsq == ifsq); 1079 ASSERT_SERIALIZED(&tdata->tx_serialize); 1080 1081 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1082 return; 1083 1084 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1085 ifsq_purge(ifsq); 1086 return; 1087 } 1088 1089 while (!ifsq_is_empty(ifsq)) { 1090 /* Now do we at least have a minimal? */ 1091 if (EMX_IS_OACTIVE(tdata)) { 1092 emx_tx_collect(tdata, FALSE); 1093 if (EMX_IS_OACTIVE(tdata)) { 1094 ifsq_set_oactive(ifsq); 1095 break; 1096 } 1097 } 1098 1099 logif(pkt_txqueue); 1100 m_head = ifsq_dequeue(ifsq); 1101 if (m_head == NULL) 1102 break; 1103 1104 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1105 IFNET_STAT_INC(ifp, oerrors, 1); 1106 emx_tx_collect(tdata, FALSE); 1107 continue; 1108 } 1109 1110 /* 1111 * TX interrupt are aggressively aggregated, so increasing 1112 * opackets at TX interrupt time will make the opackets 1113 * statistics vastly inaccurate; we do the opackets increment 1114 * now. 1115 */ 1116 IFNET_STAT_INC(ifp, opackets, 1); 1117 1118 if (nsegs >= tdata->tx_wreg_nsegs) { 1119 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1120 nsegs = 0; 1121 idx = -1; 1122 } 1123 1124 /* Send a copy of the frame to the BPF listener */ 1125 ETHER_BPF_MTAP(ifp, m_head); 1126 1127 /* Set timeout in case hardware has problems transmitting. */ 1128 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1129 } 1130 if (idx >= 0) 1131 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1132 tdata->tx_running = EMX_TX_RUNNING; 1133 } 1134 1135 static int 1136 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1137 { 1138 struct emx_softc *sc = ifp->if_softc; 1139 struct ifreq *ifr = (struct ifreq *)data; 1140 uint16_t eeprom_data = 0; 1141 int max_frame_size, mask, reinit; 1142 int error = 0; 1143 1144 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1145 1146 switch (command) { 1147 case SIOCSIFMTU: 1148 switch (sc->hw.mac.type) { 1149 case e1000_82573: 1150 /* 1151 * 82573 only supports jumbo frames 1152 * if ASPM is disabled. 1153 */ 1154 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1155 &eeprom_data); 1156 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1157 max_frame_size = ETHER_MAX_LEN; 1158 break; 1159 } 1160 /* FALL THROUGH */ 1161 1162 /* Limit Jumbo Frame size */ 1163 case e1000_82571: 1164 case e1000_82572: 1165 case e1000_82574: 1166 case e1000_pch_lpt: 1167 case e1000_pch_spt: 1168 case e1000_pch_cnp: 1169 case e1000_80003es2lan: 1170 max_frame_size = 9234; 1171 break; 1172 1173 default: 1174 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1175 break; 1176 } 1177 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1178 ETHER_CRC_LEN) { 1179 error = EINVAL; 1180 break; 1181 } 1182 1183 ifp->if_mtu = ifr->ifr_mtu; 1184 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1185 ETHER_CRC_LEN; 1186 1187 if (ifp->if_flags & IFF_RUNNING) 1188 emx_init(sc); 1189 break; 1190 1191 case SIOCSIFFLAGS: 1192 if (ifp->if_flags & IFF_UP) { 1193 if ((ifp->if_flags & IFF_RUNNING)) { 1194 if ((ifp->if_flags ^ sc->if_flags) & 1195 (IFF_PROMISC | IFF_ALLMULTI)) { 1196 emx_disable_promisc(sc); 1197 emx_set_promisc(sc); 1198 } 1199 } else { 1200 emx_init(sc); 1201 } 1202 } else if (ifp->if_flags & IFF_RUNNING) { 1203 emx_stop(sc); 1204 } 1205 sc->if_flags = ifp->if_flags; 1206 break; 1207 1208 case SIOCADDMULTI: 1209 case SIOCDELMULTI: 1210 if (ifp->if_flags & IFF_RUNNING) { 1211 emx_disable_intr(sc); 1212 emx_set_multi(sc); 1213 #ifdef IFPOLL_ENABLE 1214 if (!(ifp->if_flags & IFF_NPOLLING)) 1215 #endif 1216 emx_enable_intr(sc); 1217 } 1218 break; 1219 1220 case SIOCSIFMEDIA: 1221 /* Check SOL/IDER usage */ 1222 if (e1000_check_reset_block(&sc->hw)) { 1223 device_printf(sc->dev, "Media change is" 1224 " blocked due to SOL/IDER session.\n"); 1225 break; 1226 } 1227 /* FALL THROUGH */ 1228 1229 case SIOCGIFMEDIA: 1230 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1231 break; 1232 1233 case SIOCSIFCAP: 1234 reinit = 0; 1235 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1236 if (mask & IFCAP_RXCSUM) { 1237 ifp->if_capenable ^= IFCAP_RXCSUM; 1238 reinit = 1; 1239 } 1240 if (mask & IFCAP_VLAN_HWTAGGING) { 1241 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1242 reinit = 1; 1243 } 1244 if (mask & IFCAP_TXCSUM) { 1245 ifp->if_capenable ^= IFCAP_TXCSUM; 1246 if (ifp->if_capenable & IFCAP_TXCSUM) 1247 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1248 else 1249 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1250 } 1251 if (mask & IFCAP_TSO) { 1252 ifp->if_capenable ^= IFCAP_TSO; 1253 if (ifp->if_capenable & IFCAP_TSO) 1254 ifp->if_hwassist |= CSUM_TSO; 1255 else 1256 ifp->if_hwassist &= ~CSUM_TSO; 1257 } 1258 if (mask & IFCAP_RSS) 1259 ifp->if_capenable ^= IFCAP_RSS; 1260 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1261 emx_init(sc); 1262 break; 1263 1264 default: 1265 error = ether_ioctl(ifp, command, data); 1266 break; 1267 } 1268 return (error); 1269 } 1270 1271 static void 1272 emx_watchdog(struct ifaltq_subque *ifsq) 1273 { 1274 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1275 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1276 struct emx_softc *sc = ifp->if_softc; 1277 int i; 1278 1279 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1280 1281 /* 1282 * The timer is set to 5 every time start queues a packet. 1283 * Then txeof keeps resetting it as long as it cleans at 1284 * least one descriptor. 1285 * Finally, anytime all descriptors are clean the timer is 1286 * set to 0. 1287 */ 1288 1289 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1290 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1291 /* 1292 * If we reach here, all TX jobs are completed and 1293 * the TX engine should have been idled for some time. 1294 * We don't need to call ifsq_devstart_sched() here. 1295 */ 1296 ifsq_clr_oactive(ifsq); 1297 tdata->tx_watchdog.wd_timer = 0; 1298 return; 1299 } 1300 1301 /* 1302 * If we are in this routine because of pause frames, then 1303 * don't reset the hardware. 1304 */ 1305 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1306 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1307 return; 1308 } 1309 1310 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1311 1312 IFNET_STAT_INC(ifp, oerrors, 1); 1313 1314 emx_init(sc); 1315 for (i = 0; i < sc->tx_ring_inuse; ++i) 1316 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1317 } 1318 1319 static void 1320 emx_init(void *xsc) 1321 { 1322 struct emx_softc *sc = xsc; 1323 struct ifnet *ifp = &sc->arpcom.ac_if; 1324 device_t dev = sc->dev; 1325 boolean_t polling; 1326 int i; 1327 1328 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1329 1330 emx_stop(sc); 1331 1332 /* Get the latest mac address, User can use a LAA */ 1333 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1334 1335 /* Put the address into the Receive Address Array */ 1336 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1337 1338 /* 1339 * With the 82571 sc, RAR[0] may be overwritten 1340 * when the other port is reset, we make a duplicate 1341 * in RAR[14] for that eventuality, this assures 1342 * the interface continues to function. 1343 */ 1344 if (sc->hw.mac.type == e1000_82571) { 1345 e1000_set_laa_state_82571(&sc->hw, TRUE); 1346 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1347 E1000_RAR_ENTRIES - 1); 1348 } 1349 1350 /* Initialize the hardware */ 1351 if (emx_reset(sc)) { 1352 device_printf(dev, "Unable to reset the hardware\n"); 1353 /* XXX emx_stop()? */ 1354 return; 1355 } 1356 emx_update_link_status(sc); 1357 1358 /* Setup VLAN support, basic and offload if available */ 1359 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1360 1361 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1362 uint32_t ctrl; 1363 1364 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1365 ctrl |= E1000_CTRL_VME; 1366 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1367 } 1368 1369 /* Configure for OS presence */ 1370 emx_get_mgmt(sc); 1371 1372 polling = FALSE; 1373 #ifdef IFPOLL_ENABLE 1374 if (ifp->if_flags & IFF_NPOLLING) 1375 polling = TRUE; 1376 #endif 1377 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1378 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse); 1379 1380 /* Prepare transmit descriptors and buffers */ 1381 for (i = 0; i < sc->tx_ring_inuse; ++i) 1382 emx_init_tx_ring(&sc->tx_data[i]); 1383 emx_init_tx_unit(sc); 1384 1385 /* Setup Multicast table */ 1386 emx_set_multi(sc); 1387 1388 /* Prepare receive descriptors and buffers */ 1389 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1390 if (emx_init_rx_ring(&sc->rx_data[i])) { 1391 device_printf(dev, 1392 "Could not setup receive structures\n"); 1393 emx_stop(sc); 1394 return; 1395 } 1396 } 1397 emx_init_rx_unit(sc); 1398 1399 /* Don't lose promiscuous settings */ 1400 emx_set_promisc(sc); 1401 1402 /* Reset hardware counters */ 1403 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1404 1405 /* MSI/X configuration for 82574 */ 1406 if (sc->hw.mac.type == e1000_82574) { 1407 int tmp; 1408 1409 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1410 tmp |= E1000_CTRL_EXT_PBA_CLR; 1411 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1412 /* 1413 * XXX MSIX 1414 * Set the IVAR - interrupt vector routing. 1415 * Each nibble represents a vector, high bit 1416 * is enable, other 3 bits are the MSIX table 1417 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1418 * Link (other) to 2, hence the magic number. 1419 */ 1420 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1421 } 1422 1423 /* 1424 * Only enable interrupts if we are not polling, make sure 1425 * they are off otherwise. 1426 */ 1427 if (polling) 1428 emx_disable_intr(sc); 1429 else 1430 emx_enable_intr(sc); 1431 1432 /* AMT based hardware can now take control from firmware */ 1433 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1434 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1435 emx_get_hw_control(sc); 1436 1437 ifp->if_flags |= IFF_RUNNING; 1438 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1439 struct emx_txdata *tdata = &sc->tx_data[i]; 1440 1441 ifsq_clr_oactive(tdata->ifsq); 1442 ifsq_watchdog_start(&tdata->tx_watchdog); 1443 if (!polling) { 1444 callout_reset_bycpu(&tdata->tx_gc_timer, 1, 1445 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq)); 1446 } 1447 } 1448 callout_reset(&sc->timer, hz, emx_timer, sc); 1449 } 1450 1451 static void 1452 emx_intr(void *xsc) 1453 { 1454 emx_intr_body(xsc, TRUE); 1455 } 1456 1457 static void 1458 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1459 { 1460 struct ifnet *ifp = &sc->arpcom.ac_if; 1461 uint32_t reg_icr; 1462 1463 logif(intr_beg); 1464 ASSERT_SERIALIZED(&sc->main_serialize); 1465 1466 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1467 1468 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1469 logif(intr_end); 1470 return; 1471 } 1472 1473 /* 1474 * XXX: some laptops trigger several spurious interrupts 1475 * on emx(4) when in the resume cycle. The ICR register 1476 * reports all-ones value in this case. Processing such 1477 * interrupts would lead to a freeze. I don't know why. 1478 */ 1479 if (reg_icr == 0xffffffff) { 1480 logif(intr_end); 1481 return; 1482 } 1483 1484 if (ifp->if_flags & IFF_RUNNING) { 1485 if (reg_icr & 1486 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1487 int i; 1488 1489 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1490 lwkt_serialize_enter( 1491 &sc->rx_data[i].rx_serialize); 1492 emx_rxeof(&sc->rx_data[i], -1); 1493 lwkt_serialize_exit( 1494 &sc->rx_data[i].rx_serialize); 1495 } 1496 } 1497 if (reg_icr & E1000_ICR_TXDW) { 1498 struct emx_txdata *tdata = &sc->tx_data[0]; 1499 1500 lwkt_serialize_enter(&tdata->tx_serialize); 1501 emx_tx_intr(tdata); 1502 lwkt_serialize_exit(&tdata->tx_serialize); 1503 } 1504 } 1505 1506 /* Link status change */ 1507 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1508 emx_serialize_skipmain(sc); 1509 1510 callout_stop(&sc->timer); 1511 sc->hw.mac.get_link_status = 1; 1512 emx_update_link_status(sc); 1513 1514 /* Deal with TX cruft when link lost */ 1515 emx_tx_purge(sc); 1516 1517 callout_reset(&sc->timer, hz, emx_timer, sc); 1518 1519 emx_deserialize_skipmain(sc); 1520 } 1521 1522 if (reg_icr & E1000_ICR_RXO) 1523 sc->rx_overruns++; 1524 1525 logif(intr_end); 1526 } 1527 1528 static void 1529 emx_intr_mask(void *xsc) 1530 { 1531 struct emx_softc *sc = xsc; 1532 1533 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1534 /* 1535 * NOTE: 1536 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1537 * so don't check it. 1538 */ 1539 emx_intr_body(sc, FALSE); 1540 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1541 } 1542 1543 static void 1544 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1545 { 1546 struct emx_softc *sc = ifp->if_softc; 1547 1548 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1549 1550 emx_update_link_status(sc); 1551 1552 ifmr->ifm_status = IFM_AVALID; 1553 ifmr->ifm_active = IFM_ETHER; 1554 1555 if (!sc->link_active) { 1556 if (sc->hw.mac.autoneg) 1557 ifmr->ifm_active |= IFM_NONE; 1558 else 1559 ifmr->ifm_active |= sc->media.ifm_media; 1560 return; 1561 } 1562 1563 ifmr->ifm_status |= IFM_ACTIVE; 1564 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1565 ifmr->ifm_active |= sc->ifm_flowctrl; 1566 1567 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1568 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1569 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1570 } else { 1571 switch (sc->link_speed) { 1572 case 10: 1573 ifmr->ifm_active |= IFM_10_T; 1574 break; 1575 case 100: 1576 ifmr->ifm_active |= IFM_100_TX; 1577 break; 1578 1579 case 1000: 1580 ifmr->ifm_active |= IFM_1000_T; 1581 break; 1582 } 1583 if (sc->link_duplex == FULL_DUPLEX) 1584 ifmr->ifm_active |= IFM_FDX; 1585 else 1586 ifmr->ifm_active |= IFM_HDX; 1587 } 1588 if (ifmr->ifm_active & IFM_FDX) 1589 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1590 } 1591 1592 static int 1593 emx_media_change(struct ifnet *ifp) 1594 { 1595 struct emx_softc *sc = ifp->if_softc; 1596 struct ifmedia *ifm = &sc->media; 1597 1598 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1599 1600 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1601 return (EINVAL); 1602 1603 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1604 case IFM_AUTO: 1605 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1606 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1607 break; 1608 1609 case IFM_1000_SX: 1610 case IFM_1000_T: 1611 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1612 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1613 break; 1614 1615 case IFM_100_TX: 1616 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1617 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1618 } else { 1619 if (IFM_OPTIONS(ifm->ifm_media) & 1620 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1621 if (bootverbose) { 1622 if_printf(ifp, "Flow control is not " 1623 "allowed for half-duplex\n"); 1624 } 1625 return EINVAL; 1626 } 1627 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1628 } 1629 sc->hw.mac.autoneg = FALSE; 1630 sc->hw.phy.autoneg_advertised = 0; 1631 break; 1632 1633 case IFM_10_T: 1634 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1635 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1636 } else { 1637 if (IFM_OPTIONS(ifm->ifm_media) & 1638 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1639 if (bootverbose) { 1640 if_printf(ifp, "Flow control is not " 1641 "allowed for half-duplex\n"); 1642 } 1643 return EINVAL; 1644 } 1645 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1646 } 1647 sc->hw.mac.autoneg = FALSE; 1648 sc->hw.phy.autoneg_advertised = 0; 1649 break; 1650 1651 default: 1652 if (bootverbose) { 1653 if_printf(ifp, "Unsupported media type %d\n", 1654 IFM_SUBTYPE(ifm->ifm_media)); 1655 } 1656 return EINVAL; 1657 } 1658 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1659 1660 if (ifp->if_flags & IFF_RUNNING) 1661 emx_init(sc); 1662 1663 return (0); 1664 } 1665 1666 static int 1667 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1668 int *segs_used, int *idx) 1669 { 1670 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1671 bus_dmamap_t map; 1672 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1673 struct e1000_tx_desc *ctxd = NULL; 1674 struct mbuf *m_head = *m_headp; 1675 uint32_t txd_upper, txd_lower, cmd = 0; 1676 int maxsegs, nsegs, i, j, first, last = 0, error; 1677 1678 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1679 error = emx_tso_pullup(tdata, m_headp); 1680 if (error) 1681 return error; 1682 m_head = *m_headp; 1683 } 1684 1685 txd_upper = txd_lower = 0; 1686 1687 /* 1688 * Capture the first descriptor index, this descriptor 1689 * will have the index of the EOP which is the only one 1690 * that now gets a DONE bit writeback. 1691 */ 1692 first = tdata->next_avail_tx_desc; 1693 tx_buffer = &tdata->tx_buf[first]; 1694 tx_buffer_mapped = tx_buffer; 1695 map = tx_buffer->map; 1696 1697 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1698 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1699 if (maxsegs > EMX_MAX_SCATTER) 1700 maxsegs = EMX_MAX_SCATTER; 1701 1702 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1703 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1704 if (error) { 1705 m_freem(*m_headp); 1706 *m_headp = NULL; 1707 return error; 1708 } 1709 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1710 1711 m_head = *m_headp; 1712 tdata->tx_nsegs += nsegs; 1713 *segs_used += nsegs; 1714 1715 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1716 /* TSO will consume one TX desc */ 1717 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1718 tdata->tx_nsegs += i; 1719 *segs_used += i; 1720 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1721 /* TX csum offloading will consume one TX desc */ 1722 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1723 tdata->tx_nsegs += i; 1724 *segs_used += i; 1725 } 1726 1727 /* Handle VLAN tag */ 1728 if (m_head->m_flags & M_VLANTAG) { 1729 /* Set the vlan id. */ 1730 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1731 /* Tell hardware to add tag */ 1732 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1733 } 1734 1735 i = tdata->next_avail_tx_desc; 1736 1737 /* Set up our transmit descriptors */ 1738 for (j = 0; j < nsegs; j++) { 1739 tx_buffer = &tdata->tx_buf[i]; 1740 ctxd = &tdata->tx_desc_base[i]; 1741 1742 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1743 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1744 txd_lower | segs[j].ds_len); 1745 ctxd->upper.data = htole32(txd_upper); 1746 1747 last = i; 1748 if (++i == tdata->num_tx_desc) 1749 i = 0; 1750 } 1751 1752 tdata->next_avail_tx_desc = i; 1753 1754 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1755 tdata->num_tx_desc_avail -= nsegs; 1756 tdata->tx_nmbuf++; 1757 1758 tx_buffer->m_head = m_head; 1759 tx_buffer_mapped->map = tx_buffer->map; 1760 tx_buffer->map = map; 1761 1762 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1763 tdata->tx_nsegs = 0; 1764 1765 /* 1766 * Report Status (RS) is turned on 1767 * every tx_intr_nsegs descriptors. 1768 */ 1769 cmd = E1000_TXD_CMD_RS; 1770 1771 /* 1772 * Keep track of the descriptor, which will 1773 * be written back by hardware. 1774 */ 1775 tdata->tx_dd[tdata->tx_dd_tail] = last; 1776 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1777 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1778 } 1779 1780 /* 1781 * Last Descriptor of Packet needs End Of Packet (EOP) 1782 */ 1783 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1784 1785 /* 1786 * Defer TDT updating, until enough descriptors are setup 1787 */ 1788 *idx = i; 1789 1790 #ifdef EMX_TSS_DEBUG 1791 tdata->tx_pkts++; 1792 #endif 1793 1794 return (0); 1795 } 1796 1797 static void 1798 emx_set_promisc(struct emx_softc *sc) 1799 { 1800 struct ifnet *ifp = &sc->arpcom.ac_if; 1801 uint32_t reg_rctl; 1802 1803 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1804 1805 if (ifp->if_flags & IFF_PROMISC) { 1806 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1807 /* Turn this on if you want to see bad packets */ 1808 if (emx_debug_sbp) 1809 reg_rctl |= E1000_RCTL_SBP; 1810 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1811 } else if (ifp->if_flags & IFF_ALLMULTI) { 1812 reg_rctl |= E1000_RCTL_MPE; 1813 reg_rctl &= ~E1000_RCTL_UPE; 1814 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1815 } 1816 } 1817 1818 static void 1819 emx_disable_promisc(struct emx_softc *sc) 1820 { 1821 struct ifnet *ifp = &sc->arpcom.ac_if; 1822 uint32_t reg_rctl; 1823 int mcnt = 0; 1824 1825 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1826 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1827 1828 if (ifp->if_flags & IFF_ALLMULTI) { 1829 mcnt = EMX_MCAST_ADDR_MAX; 1830 } else { 1831 const struct ifmultiaddr *ifma; 1832 1833 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1834 if (ifma->ifma_addr->sa_family != AF_LINK) 1835 continue; 1836 if (mcnt == EMX_MCAST_ADDR_MAX) 1837 break; 1838 mcnt++; 1839 } 1840 } 1841 /* Don't disable if in MAX groups */ 1842 if (mcnt < EMX_MCAST_ADDR_MAX) 1843 reg_rctl &= ~E1000_RCTL_MPE; 1844 1845 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1846 } 1847 1848 static void 1849 emx_set_multi(struct emx_softc *sc) 1850 { 1851 struct ifnet *ifp = &sc->arpcom.ac_if; 1852 struct ifmultiaddr *ifma; 1853 uint32_t reg_rctl = 0; 1854 uint8_t *mta; 1855 int mcnt = 0; 1856 1857 mta = sc->mta; 1858 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1859 1860 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1861 if (ifma->ifma_addr->sa_family != AF_LINK) 1862 continue; 1863 1864 if (mcnt == EMX_MCAST_ADDR_MAX) 1865 break; 1866 1867 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1868 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1869 mcnt++; 1870 } 1871 1872 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1873 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1874 reg_rctl |= E1000_RCTL_MPE; 1875 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1876 } else { 1877 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1878 } 1879 } 1880 1881 /* 1882 * This routine checks for link status and updates statistics. 1883 */ 1884 static void 1885 emx_timer(void *xsc) 1886 { 1887 struct emx_softc *sc = xsc; 1888 struct ifnet *ifp = &sc->arpcom.ac_if; 1889 1890 lwkt_serialize_enter(&sc->main_serialize); 1891 1892 emx_update_link_status(sc); 1893 emx_update_stats(sc); 1894 1895 /* Reset LAA into RAR[0] on 82571 */ 1896 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1897 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1898 1899 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1900 emx_print_hw_stats(sc); 1901 1902 emx_smartspeed(sc); 1903 1904 callout_reset(&sc->timer, hz, emx_timer, sc); 1905 1906 lwkt_serialize_exit(&sc->main_serialize); 1907 } 1908 1909 static void 1910 emx_update_link_status(struct emx_softc *sc) 1911 { 1912 struct e1000_hw *hw = &sc->hw; 1913 struct ifnet *ifp = &sc->arpcom.ac_if; 1914 device_t dev = sc->dev; 1915 uint32_t link_check = 0; 1916 1917 /* Get the cached link value or read phy for real */ 1918 switch (hw->phy.media_type) { 1919 case e1000_media_type_copper: 1920 if (hw->mac.get_link_status) { 1921 if (hw->mac.type >= e1000_pch_spt) 1922 msec_delay(50); 1923 /* Do the work to read phy */ 1924 e1000_check_for_link(hw); 1925 link_check = !hw->mac.get_link_status; 1926 if (link_check) /* ESB2 fix */ 1927 e1000_cfg_on_link_up(hw); 1928 } else { 1929 link_check = TRUE; 1930 } 1931 break; 1932 1933 case e1000_media_type_fiber: 1934 e1000_check_for_link(hw); 1935 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1936 break; 1937 1938 case e1000_media_type_internal_serdes: 1939 e1000_check_for_link(hw); 1940 link_check = sc->hw.mac.serdes_has_link; 1941 break; 1942 1943 case e1000_media_type_unknown: 1944 default: 1945 break; 1946 } 1947 1948 /* Now check for a transition */ 1949 if (link_check && sc->link_active == 0) { 1950 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1951 &sc->link_duplex); 1952 1953 /* 1954 * Check if we should enable/disable SPEED_MODE bit on 1955 * 82571EB/82572EI 1956 */ 1957 if (sc->link_speed != SPEED_1000 && 1958 (hw->mac.type == e1000_82571 || 1959 hw->mac.type == e1000_82572)) { 1960 int tarc0; 1961 1962 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1963 tarc0 &= ~EMX_TARC_SPEED_MODE; 1964 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1965 } 1966 if (bootverbose) { 1967 char flowctrl[IFM_ETH_FC_STRLEN]; 1968 1969 e1000_fc2str(hw->fc.current_mode, flowctrl, 1970 sizeof(flowctrl)); 1971 device_printf(dev, "Link is up %d Mbps %s, " 1972 "Flow control: %s\n", 1973 sc->link_speed, 1974 (sc->link_duplex == FULL_DUPLEX) ? 1975 "Full Duplex" : "Half Duplex", 1976 flowctrl); 1977 } 1978 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1979 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1980 sc->link_active = 1; 1981 sc->smartspeed = 0; 1982 ifp->if_baudrate = sc->link_speed * 1000000; 1983 ifp->if_link_state = LINK_STATE_UP; 1984 if_link_state_change(ifp); 1985 } else if (!link_check && sc->link_active == 1) { 1986 ifp->if_baudrate = sc->link_speed = 0; 1987 sc->link_duplex = 0; 1988 if (bootverbose) 1989 device_printf(dev, "Link is Down\n"); 1990 sc->link_active = 0; 1991 ifp->if_link_state = LINK_STATE_DOWN; 1992 if_link_state_change(ifp); 1993 } 1994 } 1995 1996 static void 1997 emx_stop(struct emx_softc *sc) 1998 { 1999 struct ifnet *ifp = &sc->arpcom.ac_if; 2000 int i; 2001 2002 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2003 2004 emx_disable_intr(sc); 2005 2006 callout_stop(&sc->timer); 2007 2008 ifp->if_flags &= ~IFF_RUNNING; 2009 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2010 struct emx_txdata *tdata = &sc->tx_data[i]; 2011 2012 ifsq_clr_oactive(tdata->ifsq); 2013 ifsq_watchdog_stop(&tdata->tx_watchdog); 2014 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 2015 2016 tdata->tx_running = 0; 2017 callout_stop(&tdata->tx_gc_timer); 2018 } 2019 2020 /* I219 needs some special flushing to avoid hangs */ 2021 if (sc->hw.mac.type >= e1000_pch_spt) 2022 emx_flush_txrx_ring(sc); 2023 2024 /* 2025 * Disable multiple receive queues. 2026 * 2027 * NOTE: 2028 * We should disable multiple receive queues before 2029 * resetting the hardware. 2030 */ 2031 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 2032 2033 e1000_reset_hw(&sc->hw); 2034 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2035 2036 for (i = 0; i < sc->tx_ring_cnt; ++i) 2037 emx_free_tx_ring(&sc->tx_data[i]); 2038 for (i = 0; i < sc->rx_ring_cnt; ++i) 2039 emx_free_rx_ring(&sc->rx_data[i]); 2040 } 2041 2042 static int 2043 emx_reset(struct emx_softc *sc) 2044 { 2045 device_t dev = sc->dev; 2046 uint16_t rx_buffer_size; 2047 uint32_t pba; 2048 2049 /* Set up smart power down as default off on newer adapters. */ 2050 if (!emx_smart_pwr_down && 2051 (sc->hw.mac.type == e1000_82571 || 2052 sc->hw.mac.type == e1000_82572)) { 2053 uint16_t phy_tmp = 0; 2054 2055 /* Speed up time to link by disabling smart power down. */ 2056 e1000_read_phy_reg(&sc->hw, 2057 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2058 phy_tmp &= ~IGP02E1000_PM_SPD; 2059 e1000_write_phy_reg(&sc->hw, 2060 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2061 } 2062 2063 /* 2064 * Packet Buffer Allocation (PBA) 2065 * Writing PBA sets the receive portion of the buffer 2066 * the remainder is used for the transmit buffer. 2067 */ 2068 switch (sc->hw.mac.type) { 2069 /* Total Packet Buffer on these is 48K */ 2070 case e1000_82571: 2071 case e1000_82572: 2072 case e1000_80003es2lan: 2073 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2074 break; 2075 2076 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2077 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2078 break; 2079 2080 case e1000_82574: 2081 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2082 break; 2083 2084 case e1000_pch_lpt: 2085 case e1000_pch_spt: 2086 case e1000_pch_cnp: 2087 pba = E1000_PBA_26K; 2088 break; 2089 2090 default: 2091 /* Devices before 82547 had a Packet Buffer of 64K. */ 2092 if (sc->hw.mac.max_frame_size > 8192) 2093 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2094 else 2095 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2096 } 2097 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2098 2099 /* 2100 * These parameters control the automatic generation (Tx) and 2101 * response (Rx) to Ethernet PAUSE frames. 2102 * - High water mark should allow for at least two frames to be 2103 * received after sending an XOFF. 2104 * - Low water mark works best when it is very near the high water mark. 2105 * This allows the receiver to restart by sending XON when it has 2106 * drained a bit. Here we use an arbitary value of 1500 which will 2107 * restart after one full frame is pulled from the buffer. There 2108 * could be several smaller frames in the buffer and if so they will 2109 * not trigger the XON until their total number reduces the buffer 2110 * by 1500. 2111 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2112 */ 2113 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2114 2115 sc->hw.fc.high_water = rx_buffer_size - 2116 roundup2(sc->hw.mac.max_frame_size, 1024); 2117 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2118 2119 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2120 sc->hw.fc.send_xon = TRUE; 2121 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2122 2123 /* 2124 * Device specific overrides/settings 2125 */ 2126 if (sc->hw.mac.type == e1000_pch_lpt || 2127 sc->hw.mac.type == e1000_pch_spt || 2128 sc->hw.mac.type == e1000_pch_cnp) { 2129 sc->hw.fc.high_water = 0x5C20; 2130 sc->hw.fc.low_water = 0x5048; 2131 sc->hw.fc.pause_time = 0x0650; 2132 sc->hw.fc.refresh_time = 0x0400; 2133 /* Jumbos need adjusted PBA */ 2134 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2135 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2136 else 2137 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2138 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2139 sc->hw.fc.pause_time = 0xFFFF; 2140 } 2141 2142 /* I219 needs some special flushing to avoid hangs */ 2143 if (sc->hw.mac.type >= e1000_pch_spt) 2144 emx_flush_txrx_ring(sc); 2145 2146 /* Issue a global reset */ 2147 e1000_reset_hw(&sc->hw); 2148 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2149 emx_disable_aspm(sc); 2150 2151 if (e1000_init_hw(&sc->hw) < 0) { 2152 device_printf(dev, "Hardware Initialization Failed\n"); 2153 return (EIO); 2154 } 2155 2156 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2157 e1000_get_phy_info(&sc->hw); 2158 e1000_check_for_link(&sc->hw); 2159 2160 return (0); 2161 } 2162 2163 static void 2164 emx_setup_ifp(struct emx_softc *sc) 2165 { 2166 struct ifnet *ifp = &sc->arpcom.ac_if; 2167 int i; 2168 2169 if_initname(ifp, device_get_name(sc->dev), 2170 device_get_unit(sc->dev)); 2171 ifp->if_softc = sc; 2172 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2173 ifp->if_init = emx_init; 2174 ifp->if_ioctl = emx_ioctl; 2175 ifp->if_start = emx_start; 2176 #ifdef IFPOLL_ENABLE 2177 ifp->if_npoll = emx_npoll; 2178 #endif 2179 ifp->if_serialize = emx_serialize; 2180 ifp->if_deserialize = emx_deserialize; 2181 ifp->if_tryserialize = emx_tryserialize; 2182 #ifdef INVARIANTS 2183 ifp->if_serialize_assert = emx_serialize_assert; 2184 #endif 2185 2186 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2187 2188 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2189 ifq_set_ready(&ifp->if_snd); 2190 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2191 2192 ifp->if_mapsubq = ifq_mapsubq_modulo; 2193 ifq_set_subq_divisor(&ifp->if_snd, 1); 2194 2195 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2196 2197 ifp->if_capabilities = IFCAP_HWCSUM | 2198 IFCAP_VLAN_HWTAGGING | 2199 IFCAP_VLAN_MTU | 2200 IFCAP_TSO; 2201 if (sc->rx_ring_cnt > 1) 2202 ifp->if_capabilities |= IFCAP_RSS; 2203 ifp->if_capenable = ifp->if_capabilities; 2204 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2205 2206 /* 2207 * Tell the upper layer(s) we support long frames. 2208 */ 2209 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2210 2211 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2212 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2213 struct emx_txdata *tdata = &sc->tx_data[i]; 2214 2215 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2216 ifsq_set_priv(ifsq, tdata); 2217 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2218 tdata->ifsq = ifsq; 2219 2220 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2221 } 2222 2223 /* 2224 * Specify the media types supported by this sc and register 2225 * callbacks to update media and link information 2226 */ 2227 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2228 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2229 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2230 0, NULL); 2231 } else { 2232 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2233 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2234 0, NULL); 2235 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2236 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2237 0, NULL); 2238 if (sc->hw.phy.type != e1000_phy_ife) { 2239 ifmedia_add(&sc->media, 2240 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2241 } 2242 } 2243 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2244 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2245 } 2246 2247 /* 2248 * Workaround for SmartSpeed on 82541 and 82547 controllers 2249 */ 2250 static void 2251 emx_smartspeed(struct emx_softc *sc) 2252 { 2253 uint16_t phy_tmp; 2254 2255 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2256 sc->hw.mac.autoneg == 0 || 2257 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2258 return; 2259 2260 if (sc->smartspeed == 0) { 2261 /* 2262 * If Master/Slave config fault is asserted twice, 2263 * we assume back-to-back 2264 */ 2265 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2266 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2267 return; 2268 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2269 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2270 e1000_read_phy_reg(&sc->hw, 2271 PHY_1000T_CTRL, &phy_tmp); 2272 if (phy_tmp & CR_1000T_MS_ENABLE) { 2273 phy_tmp &= ~CR_1000T_MS_ENABLE; 2274 e1000_write_phy_reg(&sc->hw, 2275 PHY_1000T_CTRL, phy_tmp); 2276 sc->smartspeed++; 2277 if (sc->hw.mac.autoneg && 2278 !e1000_phy_setup_autoneg(&sc->hw) && 2279 !e1000_read_phy_reg(&sc->hw, 2280 PHY_CONTROL, &phy_tmp)) { 2281 phy_tmp |= MII_CR_AUTO_NEG_EN | 2282 MII_CR_RESTART_AUTO_NEG; 2283 e1000_write_phy_reg(&sc->hw, 2284 PHY_CONTROL, phy_tmp); 2285 } 2286 } 2287 } 2288 return; 2289 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2290 /* If still no link, perhaps using 2/3 pair cable */ 2291 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2292 phy_tmp |= CR_1000T_MS_ENABLE; 2293 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2294 if (sc->hw.mac.autoneg && 2295 !e1000_phy_setup_autoneg(&sc->hw) && 2296 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2297 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2298 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2299 } 2300 } 2301 2302 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2303 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2304 sc->smartspeed = 0; 2305 } 2306 2307 static int 2308 emx_create_tx_ring(struct emx_txdata *tdata) 2309 { 2310 device_t dev = tdata->sc->dev; 2311 struct emx_txbuf *tx_buffer; 2312 int error, i, tsize, ntxd; 2313 2314 /* 2315 * Validate number of transmit descriptors. It must not exceed 2316 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2317 */ 2318 ntxd = device_getenv_int(dev, "txd", emx_txd); 2319 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2320 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2321 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2322 EMX_DEFAULT_TXD, ntxd); 2323 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2324 } else { 2325 tdata->num_tx_desc = ntxd; 2326 } 2327 2328 /* 2329 * Allocate Transmit Descriptor ring 2330 */ 2331 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2332 EMX_DBA_ALIGN); 2333 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2334 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2335 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2336 &tdata->tx_desc_paddr); 2337 if (tdata->tx_desc_base == NULL) { 2338 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2339 return ENOMEM; 2340 } 2341 2342 tsize = __VM_CACHELINE_ALIGN( 2343 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2344 tdata->tx_buf = kmalloc(tsize, M_DEVBUF, 2345 M_WAITOK | M_ZERO | M_CACHEALIGN); 2346 2347 /* 2348 * Create DMA tags for tx buffers 2349 */ 2350 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2351 1, 0, /* alignment, bounds */ 2352 BUS_SPACE_MAXADDR, /* lowaddr */ 2353 BUS_SPACE_MAXADDR, /* highaddr */ 2354 NULL, NULL, /* filter, filterarg */ 2355 EMX_TSO_SIZE, /* maxsize */ 2356 EMX_MAX_SCATTER, /* nsegments */ 2357 EMX_MAX_SEGSIZE, /* maxsegsize */ 2358 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2359 BUS_DMA_ONEBPAGE, /* flags */ 2360 &tdata->txtag); 2361 if (error) { 2362 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2363 kfree(tdata->tx_buf, M_DEVBUF); 2364 tdata->tx_buf = NULL; 2365 return error; 2366 } 2367 2368 /* 2369 * Create DMA maps for tx buffers 2370 */ 2371 for (i = 0; i < tdata->num_tx_desc; i++) { 2372 tx_buffer = &tdata->tx_buf[i]; 2373 2374 error = bus_dmamap_create(tdata->txtag, 2375 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2376 &tx_buffer->map); 2377 if (error) { 2378 device_printf(dev, "Unable to create TX DMA map\n"); 2379 emx_destroy_tx_ring(tdata, i); 2380 return error; 2381 } 2382 } 2383 2384 /* 2385 * Setup TX parameters 2386 */ 2387 tdata->spare_tx_desc = EMX_TX_SPARE; 2388 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2389 2390 /* 2391 * Keep following relationship between spare_tx_desc, oact_tx_desc 2392 * and tx_intr_nsegs: 2393 * (spare_tx_desc + EMX_TX_RESERVED) <= 2394 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2395 */ 2396 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2397 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2398 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2399 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2400 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2401 2402 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2403 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2404 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2405 2406 /* 2407 * Pullup extra 4bytes into the first data segment for TSO, see: 2408 * 82571/82572 specification update errata #7 2409 * 2410 * Same applies to I217 (and maybe I218 and I219). 2411 * 2412 * NOTE: 2413 * 4bytes instead of 2bytes, which are mentioned in the errata, 2414 * are pulled; mainly to keep rest of the data properly aligned. 2415 */ 2416 if (tdata->sc->hw.mac.type == e1000_82571 || 2417 tdata->sc->hw.mac.type == e1000_82572 || 2418 tdata->sc->hw.mac.type == e1000_pch_lpt || 2419 tdata->sc->hw.mac.type == e1000_pch_spt || 2420 tdata->sc->hw.mac.type == e1000_pch_cnp) 2421 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2422 2423 return (0); 2424 } 2425 2426 static void 2427 emx_init_tx_ring(struct emx_txdata *tdata) 2428 { 2429 /* Clear the old ring contents */ 2430 bzero(tdata->tx_desc_base, 2431 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2432 2433 /* Reset state */ 2434 tdata->next_avail_tx_desc = 0; 2435 tdata->next_tx_to_clean = 0; 2436 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2437 tdata->tx_nmbuf = 0; 2438 tdata->tx_running = 0; 2439 2440 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2441 if (tdata->sc->tx_ring_inuse > 1) { 2442 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2443 if (bootverbose) { 2444 if_printf(&tdata->sc->arpcom.ac_if, 2445 "TX %d force ctx setup\n", tdata->idx); 2446 } 2447 } 2448 } 2449 2450 static void 2451 emx_init_tx_unit(struct emx_softc *sc) 2452 { 2453 uint32_t tctl, tarc, tipg = 0, txdctl; 2454 int i; 2455 2456 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2457 struct emx_txdata *tdata = &sc->tx_data[i]; 2458 uint64_t bus_addr; 2459 2460 /* Setup the Base and Length of the Tx Descriptor Ring */ 2461 bus_addr = tdata->tx_desc_paddr; 2462 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2463 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2464 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2465 (uint32_t)(bus_addr >> 32)); 2466 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2467 (uint32_t)bus_addr); 2468 /* Setup the HW Tx Head and Tail descriptor pointers */ 2469 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2470 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2471 2472 txdctl = 0x1f; /* PTHRESH */ 2473 txdctl |= 1 << 8; /* HTHRESH */ 2474 txdctl |= 1 << 16; /* WTHRESH */ 2475 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2476 txdctl |= E1000_TXDCTL_GRAN; 2477 txdctl |= 1 << 25; /* LWTHRESH */ 2478 2479 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl); 2480 } 2481 2482 /* Set the default values for the Tx Inter Packet Gap timer */ 2483 switch (sc->hw.mac.type) { 2484 case e1000_80003es2lan: 2485 tipg = DEFAULT_82543_TIPG_IPGR1; 2486 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2487 E1000_TIPG_IPGR2_SHIFT; 2488 break; 2489 2490 default: 2491 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2492 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2493 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2494 else 2495 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2496 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2497 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2498 break; 2499 } 2500 2501 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2502 2503 /* NOTE: 0 is not allowed for TIDV */ 2504 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2505 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2506 2507 /* 2508 * Errata workaround (obtained from Linux). This is necessary 2509 * to make multiple TX queues work on 82574. 2510 * XXX can't find it in any published errata though. 2511 */ 2512 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2513 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2514 2515 if (sc->hw.mac.type == e1000_82571 || 2516 sc->hw.mac.type == e1000_82572) { 2517 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2518 tarc |= EMX_TARC_SPEED_MODE; 2519 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2520 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2521 /* errata: program both queues to unweighted RR */ 2522 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2523 tarc |= 1; 2524 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2525 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2526 tarc |= 1; 2527 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2528 } else if (sc->hw.mac.type == e1000_82574) { 2529 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2530 tarc |= EMX_TARC_ERRATA; 2531 if (sc->tx_ring_inuse > 1) { 2532 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX); 2533 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2534 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2535 } else { 2536 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2537 } 2538 } 2539 2540 /* Program the Transmit Control Register */ 2541 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2542 tctl &= ~E1000_TCTL_CT; 2543 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2544 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2545 tctl |= E1000_TCTL_MULR; 2546 2547 /* This write will effectively turn on the transmit unit. */ 2548 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2549 2550 if (sc->hw.mac.type == e1000_82571 || 2551 sc->hw.mac.type == e1000_82572 || 2552 sc->hw.mac.type == e1000_80003es2lan) { 2553 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2554 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2555 tarc &= ~(1 << 28); 2556 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2557 } else if (sc->hw.mac.type >= e1000_pch_spt) { 2558 uint32_t reg; 2559 2560 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC); 2561 reg |= E1000_RCTL_RDMTS_HEX; 2562 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg); 2563 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2564 reg |= E1000_TARC0_CB_MULTIQ_3_REQ; 2565 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg); 2566 } 2567 2568 if (sc->tx_ring_inuse > 1) { 2569 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2570 tarc &= ~EMX_TARC_COUNT_MASK; 2571 tarc |= 1; 2572 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2573 2574 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2575 tarc &= ~EMX_TARC_COUNT_MASK; 2576 tarc |= 1; 2577 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2578 } 2579 } 2580 2581 static void 2582 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2583 { 2584 struct emx_txbuf *tx_buffer; 2585 int i; 2586 2587 /* Free Transmit Descriptor ring */ 2588 if (tdata->tx_desc_base) { 2589 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2590 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2591 tdata->tx_desc_dmap); 2592 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2593 2594 tdata->tx_desc_base = NULL; 2595 } 2596 2597 if (tdata->tx_buf == NULL) 2598 return; 2599 2600 for (i = 0; i < ndesc; i++) { 2601 tx_buffer = &tdata->tx_buf[i]; 2602 2603 KKASSERT(tx_buffer->m_head == NULL); 2604 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2605 } 2606 bus_dma_tag_destroy(tdata->txtag); 2607 2608 kfree(tdata->tx_buf, M_DEVBUF); 2609 tdata->tx_buf = NULL; 2610 } 2611 2612 /* 2613 * The offload context needs to be set when we transfer the first 2614 * packet of a particular protocol (TCP/UDP). This routine has been 2615 * enhanced to deal with inserted VLAN headers. 2616 * 2617 * If the new packet's ether header length, ip header length and 2618 * csum offloading type are same as the previous packet, we should 2619 * avoid allocating a new csum context descriptor; mainly to take 2620 * advantage of the pipeline effect of the TX data read request. 2621 * 2622 * This function returns number of TX descrptors allocated for 2623 * csum context. 2624 */ 2625 static int 2626 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2627 uint32_t *txd_upper, uint32_t *txd_lower) 2628 { 2629 struct e1000_context_desc *TXD; 2630 int curr_txd, ehdrlen, csum_flags; 2631 uint32_t cmd, hdr_len, ip_hlen; 2632 2633 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2634 ip_hlen = mp->m_pkthdr.csum_iphlen; 2635 ehdrlen = mp->m_pkthdr.csum_lhlen; 2636 2637 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2638 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2639 tdata->csum_flags == csum_flags) { 2640 /* 2641 * Same csum offload context as the previous packets; 2642 * just return. 2643 */ 2644 *txd_upper = tdata->csum_txd_upper; 2645 *txd_lower = tdata->csum_txd_lower; 2646 return 0; 2647 } 2648 2649 /* 2650 * Setup a new csum offload context. 2651 */ 2652 2653 curr_txd = tdata->next_avail_tx_desc; 2654 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2655 2656 cmd = 0; 2657 2658 /* Setup of IP header checksum. */ 2659 if (csum_flags & CSUM_IP) { 2660 /* 2661 * Start offset for header checksum calculation. 2662 * End offset for header checksum calculation. 2663 * Offset of place to put the checksum. 2664 */ 2665 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2666 TXD->lower_setup.ip_fields.ipcse = 2667 htole16(ehdrlen + ip_hlen - 1); 2668 TXD->lower_setup.ip_fields.ipcso = 2669 ehdrlen + offsetof(struct ip, ip_sum); 2670 cmd |= E1000_TXD_CMD_IP; 2671 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2672 } 2673 hdr_len = ehdrlen + ip_hlen; 2674 2675 if (csum_flags & CSUM_TCP) { 2676 /* 2677 * Start offset for payload checksum calculation. 2678 * End offset for payload checksum calculation. 2679 * Offset of place to put the checksum. 2680 */ 2681 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2682 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2683 TXD->upper_setup.tcp_fields.tucso = 2684 hdr_len + offsetof(struct tcphdr, th_sum); 2685 cmd |= E1000_TXD_CMD_TCP; 2686 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2687 } else if (csum_flags & CSUM_UDP) { 2688 /* 2689 * Start offset for header checksum calculation. 2690 * End offset for header checksum calculation. 2691 * Offset of place to put the checksum. 2692 */ 2693 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2694 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2695 TXD->upper_setup.tcp_fields.tucso = 2696 hdr_len + offsetof(struct udphdr, uh_sum); 2697 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2698 } 2699 2700 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2701 E1000_TXD_DTYP_D; /* Data descr */ 2702 2703 /* Save the information for this csum offloading context */ 2704 tdata->csum_lhlen = ehdrlen; 2705 tdata->csum_iphlen = ip_hlen; 2706 tdata->csum_flags = csum_flags; 2707 tdata->csum_txd_upper = *txd_upper; 2708 tdata->csum_txd_lower = *txd_lower; 2709 2710 TXD->tcp_seg_setup.data = htole32(0); 2711 TXD->cmd_and_length = 2712 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2713 2714 if (++curr_txd == tdata->num_tx_desc) 2715 curr_txd = 0; 2716 2717 KKASSERT(tdata->num_tx_desc_avail > 0); 2718 tdata->num_tx_desc_avail--; 2719 2720 tdata->next_avail_tx_desc = curr_txd; 2721 return 1; 2722 } 2723 2724 static void 2725 emx_txeof(struct emx_txdata *tdata) 2726 { 2727 struct emx_txbuf *tx_buffer; 2728 int first, num_avail; 2729 2730 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2731 return; 2732 2733 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2734 return; 2735 2736 num_avail = tdata->num_tx_desc_avail; 2737 first = tdata->next_tx_to_clean; 2738 2739 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2740 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2741 struct e1000_tx_desc *tx_desc; 2742 2743 tx_desc = &tdata->tx_desc_base[dd_idx]; 2744 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2745 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2746 2747 if (++dd_idx == tdata->num_tx_desc) 2748 dd_idx = 0; 2749 2750 while (first != dd_idx) { 2751 logif(pkt_txclean); 2752 2753 KKASSERT(num_avail < tdata->num_tx_desc); 2754 num_avail++; 2755 2756 tx_buffer = &tdata->tx_buf[first]; 2757 if (tx_buffer->m_head) 2758 emx_free_txbuf(tdata, tx_buffer); 2759 2760 if (++first == tdata->num_tx_desc) 2761 first = 0; 2762 } 2763 } else { 2764 break; 2765 } 2766 } 2767 tdata->next_tx_to_clean = first; 2768 tdata->num_tx_desc_avail = num_avail; 2769 2770 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2771 tdata->tx_dd_head = 0; 2772 tdata->tx_dd_tail = 0; 2773 } 2774 2775 if (!EMX_IS_OACTIVE(tdata)) { 2776 ifsq_clr_oactive(tdata->ifsq); 2777 2778 /* All clean, turn off the timer */ 2779 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2780 tdata->tx_watchdog.wd_timer = 0; 2781 } 2782 tdata->tx_running = EMX_TX_RUNNING; 2783 } 2784 2785 static void 2786 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc) 2787 { 2788 struct emx_txbuf *tx_buffer; 2789 int tdh, first, num_avail, dd_idx = -1; 2790 2791 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2792 return; 2793 2794 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2795 if (tdh == tdata->next_tx_to_clean) { 2796 if (gc && tdata->tx_nmbuf > 0) 2797 tdata->tx_running = EMX_TX_RUNNING; 2798 return; 2799 } 2800 if (gc) 2801 tdata->tx_gc++; 2802 2803 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2804 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2805 2806 num_avail = tdata->num_tx_desc_avail; 2807 first = tdata->next_tx_to_clean; 2808 2809 while (first != tdh) { 2810 logif(pkt_txclean); 2811 2812 KKASSERT(num_avail < tdata->num_tx_desc); 2813 num_avail++; 2814 2815 tx_buffer = &tdata->tx_buf[first]; 2816 if (tx_buffer->m_head) 2817 emx_free_txbuf(tdata, tx_buffer); 2818 2819 if (first == dd_idx) { 2820 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2821 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2822 tdata->tx_dd_head = 0; 2823 tdata->tx_dd_tail = 0; 2824 dd_idx = -1; 2825 } else { 2826 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2827 } 2828 } 2829 2830 if (++first == tdata->num_tx_desc) 2831 first = 0; 2832 } 2833 tdata->next_tx_to_clean = first; 2834 tdata->num_tx_desc_avail = num_avail; 2835 2836 if (!EMX_IS_OACTIVE(tdata)) { 2837 ifsq_clr_oactive(tdata->ifsq); 2838 2839 /* All clean, turn off the timer */ 2840 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2841 tdata->tx_watchdog.wd_timer = 0; 2842 } 2843 if (!gc || tdata->tx_nmbuf > 0) 2844 tdata->tx_running = EMX_TX_RUNNING; 2845 } 2846 2847 /* 2848 * When Link is lost sometimes there is work still in the TX ring 2849 * which will result in a watchdog, rather than allow that do an 2850 * attempted cleanup and then reinit here. Note that this has been 2851 * seens mostly with fiber adapters. 2852 */ 2853 static void 2854 emx_tx_purge(struct emx_softc *sc) 2855 { 2856 int i; 2857 2858 if (sc->link_active) 2859 return; 2860 2861 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2862 struct emx_txdata *tdata = &sc->tx_data[i]; 2863 2864 if (tdata->tx_watchdog.wd_timer) { 2865 emx_tx_collect(tdata, FALSE); 2866 if (tdata->tx_watchdog.wd_timer) { 2867 if_printf(&sc->arpcom.ac_if, 2868 "Link lost, TX pending, reinit\n"); 2869 emx_init(sc); 2870 return; 2871 } 2872 } 2873 } 2874 } 2875 2876 static int 2877 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2878 { 2879 struct mbuf *m; 2880 bus_dma_segment_t seg; 2881 bus_dmamap_t map; 2882 struct emx_rxbuf *rx_buffer; 2883 int error, nseg; 2884 2885 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2886 if (m == NULL) { 2887 if (init) { 2888 if_printf(&rdata->sc->arpcom.ac_if, 2889 "Unable to allocate RX mbuf\n"); 2890 } 2891 return (ENOBUFS); 2892 } 2893 m->m_len = m->m_pkthdr.len = MCLBYTES; 2894 2895 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2896 m_adj(m, ETHER_ALIGN); 2897 2898 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2899 rdata->rx_sparemap, m, 2900 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2901 if (error) { 2902 m_freem(m); 2903 if (init) { 2904 if_printf(&rdata->sc->arpcom.ac_if, 2905 "Unable to load RX mbuf\n"); 2906 } 2907 return (error); 2908 } 2909 2910 rx_buffer = &rdata->rx_buf[i]; 2911 if (rx_buffer->m_head != NULL) 2912 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2913 2914 map = rx_buffer->map; 2915 rx_buffer->map = rdata->rx_sparemap; 2916 rdata->rx_sparemap = map; 2917 2918 rx_buffer->m_head = m; 2919 rx_buffer->paddr = seg.ds_addr; 2920 2921 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2922 return (0); 2923 } 2924 2925 static int 2926 emx_create_rx_ring(struct emx_rxdata *rdata) 2927 { 2928 device_t dev = rdata->sc->dev; 2929 struct emx_rxbuf *rx_buffer; 2930 int i, error, rsize, nrxd; 2931 2932 /* 2933 * Validate number of receive descriptors. It must not exceed 2934 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2935 */ 2936 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2937 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2938 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2939 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2940 EMX_DEFAULT_RXD, nrxd); 2941 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2942 } else { 2943 rdata->num_rx_desc = nrxd; 2944 } 2945 2946 /* 2947 * Allocate Receive Descriptor ring 2948 */ 2949 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2950 EMX_DBA_ALIGN); 2951 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2952 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2953 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2954 &rdata->rx_desc_paddr); 2955 if (rdata->rx_desc == NULL) { 2956 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2957 return ENOMEM; 2958 } 2959 2960 rsize = __VM_CACHELINE_ALIGN( 2961 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2962 rdata->rx_buf = kmalloc(rsize, M_DEVBUF, 2963 M_WAITOK | M_ZERO | M_CACHEALIGN); 2964 2965 /* 2966 * Create DMA tag for rx buffers 2967 */ 2968 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2969 1, 0, /* alignment, bounds */ 2970 BUS_SPACE_MAXADDR, /* lowaddr */ 2971 BUS_SPACE_MAXADDR, /* highaddr */ 2972 NULL, NULL, /* filter, filterarg */ 2973 MCLBYTES, /* maxsize */ 2974 1, /* nsegments */ 2975 MCLBYTES, /* maxsegsize */ 2976 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2977 &rdata->rxtag); 2978 if (error) { 2979 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2980 kfree(rdata->rx_buf, M_DEVBUF); 2981 rdata->rx_buf = NULL; 2982 return error; 2983 } 2984 2985 /* 2986 * Create spare DMA map for rx buffers 2987 */ 2988 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2989 &rdata->rx_sparemap); 2990 if (error) { 2991 device_printf(dev, "Unable to create spare RX DMA map\n"); 2992 bus_dma_tag_destroy(rdata->rxtag); 2993 kfree(rdata->rx_buf, M_DEVBUF); 2994 rdata->rx_buf = NULL; 2995 return error; 2996 } 2997 2998 /* 2999 * Create DMA maps for rx buffers 3000 */ 3001 for (i = 0; i < rdata->num_rx_desc; i++) { 3002 rx_buffer = &rdata->rx_buf[i]; 3003 3004 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 3005 &rx_buffer->map); 3006 if (error) { 3007 device_printf(dev, "Unable to create RX DMA map\n"); 3008 emx_destroy_rx_ring(rdata, i); 3009 return error; 3010 } 3011 } 3012 return (0); 3013 } 3014 3015 static void 3016 emx_free_rx_ring(struct emx_rxdata *rdata) 3017 { 3018 int i; 3019 3020 for (i = 0; i < rdata->num_rx_desc; i++) { 3021 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 3022 3023 if (rx_buffer->m_head != NULL) { 3024 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 3025 m_freem(rx_buffer->m_head); 3026 rx_buffer->m_head = NULL; 3027 } 3028 } 3029 3030 if (rdata->fmp != NULL) 3031 m_freem(rdata->fmp); 3032 rdata->fmp = NULL; 3033 rdata->lmp = NULL; 3034 } 3035 3036 static void 3037 emx_free_tx_ring(struct emx_txdata *tdata) 3038 { 3039 int i; 3040 3041 for (i = 0; i < tdata->num_tx_desc; i++) { 3042 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 3043 3044 if (tx_buffer->m_head != NULL) 3045 emx_free_txbuf(tdata, tx_buffer); 3046 } 3047 3048 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 3049 3050 tdata->csum_flags = 0; 3051 tdata->csum_lhlen = 0; 3052 tdata->csum_iphlen = 0; 3053 tdata->csum_thlen = 0; 3054 tdata->csum_mss = 0; 3055 tdata->csum_pktlen = 0; 3056 3057 tdata->tx_dd_head = 0; 3058 tdata->tx_dd_tail = 0; 3059 tdata->tx_nsegs = 0; 3060 } 3061 3062 static int 3063 emx_init_rx_ring(struct emx_rxdata *rdata) 3064 { 3065 int i, error; 3066 3067 /* Reset descriptor ring */ 3068 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 3069 3070 /* Allocate new ones. */ 3071 for (i = 0; i < rdata->num_rx_desc; i++) { 3072 error = emx_newbuf(rdata, i, 1); 3073 if (error) 3074 return (error); 3075 } 3076 3077 /* Setup our descriptor pointers */ 3078 rdata->next_rx_desc_to_check = 0; 3079 3080 return (0); 3081 } 3082 3083 static void 3084 emx_init_rx_unit(struct emx_softc *sc) 3085 { 3086 struct ifnet *ifp = &sc->arpcom.ac_if; 3087 uint64_t bus_addr; 3088 uint32_t rctl, itr, rfctl, rxcsum; 3089 int i; 3090 3091 /* 3092 * Make sure receives are disabled while setting 3093 * up the descriptor ring 3094 */ 3095 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 3096 /* Do not disable if ever enabled on this hardware */ 3097 if (sc->hw.mac.type != e1000_82574) 3098 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3099 3100 /* 3101 * Set the interrupt throttling rate. Value is calculated 3102 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3103 */ 3104 if (sc->int_throttle_ceil) 3105 itr = 1000000000 / 256 / sc->int_throttle_ceil; 3106 else 3107 itr = 0; 3108 emx_set_itr(sc, itr); 3109 3110 /* Use extended RX descriptor */ 3111 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL); 3112 rfctl |= E1000_RFCTL_EXTEN; 3113 /* Disable accelerated ackknowledge */ 3114 if (sc->hw.mac.type == e1000_82574) 3115 rfctl |= E1000_RFCTL_ACK_DIS; 3116 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 3117 3118 /* 3119 * Receive Checksum Offload for TCP and UDP 3120 * 3121 * Checksum offloading is also enabled if multiple receive 3122 * queue is to be supported, since we need it to figure out 3123 * packet type. 3124 */ 3125 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3126 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3127 sc->rx_ring_cnt > 1) { 3128 /* 3129 * NOTE: 3130 * PCSD must be enabled to enable multiple 3131 * receive queues. 3132 */ 3133 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3134 E1000_RXCSUM_PCSD; 3135 } else { 3136 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3137 E1000_RXCSUM_PCSD); 3138 } 3139 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3140 3141 /* 3142 * Configure multiple receive queue (RSS) 3143 */ 3144 if (sc->rx_ring_cnt > 1) { 3145 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3146 int r, j; 3147 3148 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3149 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3150 3151 /* 3152 * NOTE: 3153 * When we reach here, RSS has already been disabled 3154 * in emx_stop(), so we could safely configure RSS key 3155 * and redirect table. 3156 */ 3157 3158 /* 3159 * Configure RSS key 3160 */ 3161 toeplitz_get_key(key, sizeof(key)); 3162 for (i = 0; i < EMX_NRSSRK; ++i) { 3163 uint32_t rssrk; 3164 3165 rssrk = EMX_RSSRK_VAL(key, i); 3166 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3167 3168 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3169 } 3170 3171 /* 3172 * Configure RSS redirect table. 3173 */ 3174 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table, 3175 EMX_RDRTABLE_SIZE); 3176 3177 r = 0; 3178 for (j = 0; j < EMX_NRETA; ++j) { 3179 uint32_t reta = 0; 3180 3181 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3182 uint32_t q; 3183 3184 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT; 3185 reta |= q << (8 * i); 3186 ++r; 3187 } 3188 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3189 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta); 3190 } 3191 3192 /* 3193 * Enable multiple receive queues. 3194 * Enable IPv4 RSS standard hash functions. 3195 * Disable RSS interrupt. 3196 */ 3197 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3198 E1000_MRQC_ENABLE_RSS_2Q | 3199 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3200 E1000_MRQC_RSS_FIELD_IPV4); 3201 } 3202 3203 /* 3204 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3205 * long latencies are observed, like Lenovo X60. This 3206 * change eliminates the problem, but since having positive 3207 * values in RDTR is a known source of problems on other 3208 * platforms another solution is being sought. 3209 */ 3210 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3211 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3212 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3213 } 3214 3215 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3216 struct emx_rxdata *rdata = &sc->rx_data[i]; 3217 3218 /* 3219 * Setup the Base and Length of the Rx Descriptor Ring 3220 */ 3221 bus_addr = rdata->rx_desc_paddr; 3222 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3223 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3224 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3225 (uint32_t)(bus_addr >> 32)); 3226 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3227 (uint32_t)bus_addr); 3228 3229 /* 3230 * Setup the HW Rx Head and Tail Descriptor Pointers 3231 */ 3232 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3233 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3234 sc->rx_data[i].num_rx_desc - 1); 3235 } 3236 3237 /* Set PTHRESH for improved jumbo performance */ 3238 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) { 3239 uint32_t rxdctl; 3240 3241 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3242 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i)); 3243 rxdctl |= 0x20; /* PTHRESH */ 3244 rxdctl |= 4 << 8; /* HTHRESH */ 3245 rxdctl |= 4 << 16; /* WTHRESH */ 3246 rxdctl |= 1 << 24; /* Switch to granularity */ 3247 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl); 3248 } 3249 } 3250 3251 if (sc->hw.mac.type >= e1000_pch2lan) { 3252 if (ifp->if_mtu > ETHERMTU) 3253 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3254 else 3255 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3256 } 3257 3258 /* Setup the Receive Control Register */ 3259 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3260 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3261 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3262 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3263 3264 /* Make sure VLAN Filters are off */ 3265 rctl &= ~E1000_RCTL_VFE; 3266 3267 /* Don't store bad paket */ 3268 rctl &= ~E1000_RCTL_SBP; 3269 3270 /* MCLBYTES */ 3271 rctl |= E1000_RCTL_SZ_2048; 3272 3273 if (ifp->if_mtu > ETHERMTU) 3274 rctl |= E1000_RCTL_LPE; 3275 else 3276 rctl &= ~E1000_RCTL_LPE; 3277 3278 /* Enable Receives */ 3279 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3280 } 3281 3282 static void 3283 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3284 { 3285 struct emx_rxbuf *rx_buffer; 3286 int i; 3287 3288 /* Free Receive Descriptor ring */ 3289 if (rdata->rx_desc) { 3290 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3291 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3292 rdata->rx_desc_dmap); 3293 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3294 3295 rdata->rx_desc = NULL; 3296 } 3297 3298 if (rdata->rx_buf == NULL) 3299 return; 3300 3301 for (i = 0; i < ndesc; i++) { 3302 rx_buffer = &rdata->rx_buf[i]; 3303 3304 KKASSERT(rx_buffer->m_head == NULL); 3305 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3306 } 3307 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3308 bus_dma_tag_destroy(rdata->rxtag); 3309 3310 kfree(rdata->rx_buf, M_DEVBUF); 3311 rdata->rx_buf = NULL; 3312 } 3313 3314 static void 3315 emx_rxeof(struct emx_rxdata *rdata, int count) 3316 { 3317 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3318 uint32_t staterr; 3319 emx_rxdesc_t *current_desc; 3320 struct mbuf *mp; 3321 int i, cpuid = mycpuid; 3322 3323 i = rdata->next_rx_desc_to_check; 3324 current_desc = &rdata->rx_desc[i]; 3325 staterr = le32toh(current_desc->rxd_staterr); 3326 3327 if (!(staterr & E1000_RXD_STAT_DD)) 3328 return; 3329 3330 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3331 struct pktinfo *pi = NULL, pi0; 3332 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3333 struct mbuf *m = NULL; 3334 int eop, len; 3335 3336 logif(pkt_receive); 3337 3338 mp = rx_buf->m_head; 3339 3340 /* 3341 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3342 * needs to access the last received byte in the mbuf. 3343 */ 3344 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3345 BUS_DMASYNC_POSTREAD); 3346 3347 len = le16toh(current_desc->rxd_length); 3348 if (staterr & E1000_RXD_STAT_EOP) { 3349 count--; 3350 eop = 1; 3351 } else { 3352 eop = 0; 3353 } 3354 3355 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3356 uint16_t vlan = 0; 3357 uint32_t mrq, rss_hash; 3358 3359 /* 3360 * Save several necessary information, 3361 * before emx_newbuf() destroy it. 3362 */ 3363 if ((staterr & E1000_RXD_STAT_VP) && eop) 3364 vlan = le16toh(current_desc->rxd_vlan); 3365 3366 mrq = le32toh(current_desc->rxd_mrq); 3367 rss_hash = le32toh(current_desc->rxd_rss); 3368 3369 EMX_RSS_DPRINTF(rdata->sc, 10, 3370 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3371 rdata->idx, mrq, rss_hash); 3372 3373 if (emx_newbuf(rdata, i, 0) != 0) { 3374 IFNET_STAT_INC(ifp, iqdrops, 1); 3375 goto discard; 3376 } 3377 3378 /* Assign correct length to the current fragment */ 3379 mp->m_len = len; 3380 3381 if (rdata->fmp == NULL) { 3382 mp->m_pkthdr.len = len; 3383 rdata->fmp = mp; /* Store the first mbuf */ 3384 rdata->lmp = mp; 3385 } else { 3386 /* 3387 * Chain mbuf's together 3388 */ 3389 rdata->lmp->m_next = mp; 3390 rdata->lmp = rdata->lmp->m_next; 3391 rdata->fmp->m_pkthdr.len += len; 3392 } 3393 3394 if (eop) { 3395 rdata->fmp->m_pkthdr.rcvif = ifp; 3396 IFNET_STAT_INC(ifp, ipackets, 1); 3397 3398 if (ifp->if_capenable & IFCAP_RXCSUM) 3399 emx_rxcsum(staterr, rdata->fmp); 3400 3401 if (staterr & E1000_RXD_STAT_VP) { 3402 rdata->fmp->m_pkthdr.ether_vlantag = 3403 vlan; 3404 rdata->fmp->m_flags |= M_VLANTAG; 3405 } 3406 m = rdata->fmp; 3407 rdata->fmp = NULL; 3408 rdata->lmp = NULL; 3409 3410 if (ifp->if_capenable & IFCAP_RSS) { 3411 pi = emx_rssinfo(m, &pi0, mrq, 3412 rss_hash, staterr); 3413 } 3414 #ifdef EMX_RSS_DEBUG 3415 rdata->rx_pkts++; 3416 #endif 3417 } 3418 } else { 3419 IFNET_STAT_INC(ifp, ierrors, 1); 3420 discard: 3421 emx_setup_rxdesc(current_desc, rx_buf); 3422 if (rdata->fmp != NULL) { 3423 m_freem(rdata->fmp); 3424 rdata->fmp = NULL; 3425 rdata->lmp = NULL; 3426 } 3427 m = NULL; 3428 } 3429 3430 if (m != NULL) 3431 ifp->if_input(ifp, m, pi, cpuid); 3432 3433 /* Advance our pointers to the next descriptor. */ 3434 if (++i == rdata->num_rx_desc) 3435 i = 0; 3436 3437 current_desc = &rdata->rx_desc[i]; 3438 staterr = le32toh(current_desc->rxd_staterr); 3439 } 3440 rdata->next_rx_desc_to_check = i; 3441 3442 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3443 if (--i < 0) 3444 i = rdata->num_rx_desc - 1; 3445 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3446 } 3447 3448 static void 3449 emx_enable_intr(struct emx_softc *sc) 3450 { 3451 uint32_t ims_mask = IMS_ENABLE_MASK; 3452 3453 lwkt_serialize_handler_enable(&sc->main_serialize); 3454 3455 #if 0 3456 if (sc->hw.mac.type == e1000_82574) { 3457 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3458 ims_mask |= EM_MSIX_MASK; 3459 } 3460 #endif 3461 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3462 } 3463 3464 static void 3465 emx_disable_intr(struct emx_softc *sc) 3466 { 3467 if (sc->hw.mac.type == e1000_82574) 3468 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3469 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3470 3471 lwkt_serialize_handler_disable(&sc->main_serialize); 3472 } 3473 3474 /* 3475 * Bit of a misnomer, what this really means is 3476 * to enable OS management of the system... aka 3477 * to disable special hardware management features 3478 */ 3479 static void 3480 emx_get_mgmt(struct emx_softc *sc) 3481 { 3482 /* A shared code workaround */ 3483 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3484 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3485 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3486 3487 /* disable hardware interception of ARP */ 3488 manc &= ~(E1000_MANC_ARP_EN); 3489 3490 /* enable receiving management packets to the host */ 3491 manc |= E1000_MANC_EN_MNG2HOST; 3492 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3493 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3494 manc2h |= E1000_MNG2HOST_PORT_623; 3495 manc2h |= E1000_MNG2HOST_PORT_664; 3496 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3497 3498 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3499 } 3500 } 3501 3502 /* 3503 * Give control back to hardware management 3504 * controller if there is one. 3505 */ 3506 static void 3507 emx_rel_mgmt(struct emx_softc *sc) 3508 { 3509 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3510 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3511 3512 /* re-enable hardware interception of ARP */ 3513 manc |= E1000_MANC_ARP_EN; 3514 manc &= ~E1000_MANC_EN_MNG2HOST; 3515 3516 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3517 } 3518 } 3519 3520 /* 3521 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3522 * For ASF and Pass Through versions of f/w this means that 3523 * the driver is loaded. For AMT version (only with 82573) 3524 * of the f/w this means that the network i/f is open. 3525 */ 3526 static void 3527 emx_get_hw_control(struct emx_softc *sc) 3528 { 3529 /* Let firmware know the driver has taken over */ 3530 if (sc->hw.mac.type == e1000_82573) { 3531 uint32_t swsm; 3532 3533 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3534 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3535 swsm | E1000_SWSM_DRV_LOAD); 3536 } else { 3537 uint32_t ctrl_ext; 3538 3539 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3540 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3541 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3542 } 3543 sc->flags |= EMX_FLAG_HW_CTRL; 3544 } 3545 3546 /* 3547 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3548 * For ASF and Pass Through versions of f/w this means that the 3549 * driver is no longer loaded. For AMT version (only with 82573) 3550 * of the f/w this means that the network i/f is closed. 3551 */ 3552 static void 3553 emx_rel_hw_control(struct emx_softc *sc) 3554 { 3555 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3556 return; 3557 sc->flags &= ~EMX_FLAG_HW_CTRL; 3558 3559 /* Let firmware taken over control of h/w */ 3560 if (sc->hw.mac.type == e1000_82573) { 3561 uint32_t swsm; 3562 3563 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3564 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3565 swsm & ~E1000_SWSM_DRV_LOAD); 3566 } else { 3567 uint32_t ctrl_ext; 3568 3569 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3570 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3571 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3572 } 3573 } 3574 3575 static int 3576 emx_is_valid_eaddr(const uint8_t *addr) 3577 { 3578 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3579 3580 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3581 return (FALSE); 3582 3583 return (TRUE); 3584 } 3585 3586 /* 3587 * Enable PCI Wake On Lan capability 3588 */ 3589 static void 3590 emx_enable_wol(device_t dev) 3591 { 3592 uint16_t cap, status; 3593 uint8_t id; 3594 3595 /* First find the capabilities pointer*/ 3596 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3597 3598 /* Read the PM Capabilities */ 3599 id = pci_read_config(dev, cap, 1); 3600 if (id != PCIY_PMG) /* Something wrong */ 3601 return; 3602 3603 /* 3604 * OK, we have the power capabilities, 3605 * so now get the status register 3606 */ 3607 cap += PCIR_POWER_STATUS; 3608 status = pci_read_config(dev, cap, 2); 3609 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3610 pci_write_config(dev, cap, status, 2); 3611 } 3612 3613 static void 3614 emx_update_stats(struct emx_softc *sc) 3615 { 3616 struct ifnet *ifp = &sc->arpcom.ac_if; 3617 3618 if (sc->hw.phy.media_type == e1000_media_type_copper || 3619 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3620 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3621 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3622 } 3623 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3624 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3625 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3626 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3627 3628 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3629 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3630 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3631 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3632 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3633 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3634 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3635 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3636 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3637 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3638 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3639 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3640 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3641 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3642 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3643 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3644 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3645 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3646 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3647 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3648 3649 /* For the 64-bit byte counters the low dword must be read first. */ 3650 /* Both registers clear on the read of the high dword */ 3651 3652 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3653 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3654 3655 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3656 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3657 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3658 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3659 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3660 3661 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3662 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3663 3664 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3665 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3666 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3667 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3668 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3669 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3670 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3671 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3672 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3673 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3674 3675 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3676 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3677 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3678 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3679 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3680 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3681 3682 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3683 3684 /* Rx Errors */ 3685 IFNET_STAT_SET(ifp, ierrors, 3686 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3687 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3688 3689 /* Tx Errors */ 3690 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3691 } 3692 3693 static void 3694 emx_print_debug_info(struct emx_softc *sc) 3695 { 3696 device_t dev = sc->dev; 3697 uint8_t *hw_addr = sc->hw.hw_addr; 3698 int i; 3699 3700 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3701 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3702 E1000_READ_REG(&sc->hw, E1000_CTRL), 3703 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3704 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3705 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3706 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3707 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3708 sc->hw.fc.high_water, sc->hw.fc.low_water); 3709 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3710 E1000_READ_REG(&sc->hw, E1000_TIDV), 3711 E1000_READ_REG(&sc->hw, E1000_TADV)); 3712 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3713 E1000_READ_REG(&sc->hw, E1000_RDTR), 3714 E1000_READ_REG(&sc->hw, E1000_RADV)); 3715 3716 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3717 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3718 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3719 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3720 } 3721 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3722 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3723 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3724 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3725 } 3726 3727 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3728 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3729 sc->tx_data[i].num_tx_desc_avail); 3730 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3731 sc->tx_data[i].tso_segments); 3732 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3733 sc->tx_data[i].tso_ctx_reused); 3734 } 3735 } 3736 3737 static void 3738 emx_print_hw_stats(struct emx_softc *sc) 3739 { 3740 device_t dev = sc->dev; 3741 3742 device_printf(dev, "Excessive collisions = %lld\n", 3743 (long long)sc->stats.ecol); 3744 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3745 device_printf(dev, "Symbol errors = %lld\n", 3746 (long long)sc->stats.symerrs); 3747 #endif 3748 device_printf(dev, "Sequence errors = %lld\n", 3749 (long long)sc->stats.sec); 3750 device_printf(dev, "Defer count = %lld\n", 3751 (long long)sc->stats.dc); 3752 device_printf(dev, "Missed Packets = %lld\n", 3753 (long long)sc->stats.mpc); 3754 device_printf(dev, "Receive No Buffers = %lld\n", 3755 (long long)sc->stats.rnbc); 3756 /* RLEC is inaccurate on some hardware, calculate our own. */ 3757 device_printf(dev, "Receive Length Errors = %lld\n", 3758 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3759 device_printf(dev, "Receive errors = %lld\n", 3760 (long long)sc->stats.rxerrc); 3761 device_printf(dev, "Crc errors = %lld\n", 3762 (long long)sc->stats.crcerrs); 3763 device_printf(dev, "Alignment errors = %lld\n", 3764 (long long)sc->stats.algnerrc); 3765 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3766 (long long)sc->stats.cexterr); 3767 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3768 device_printf(dev, "XON Rcvd = %lld\n", 3769 (long long)sc->stats.xonrxc); 3770 device_printf(dev, "XON Xmtd = %lld\n", 3771 (long long)sc->stats.xontxc); 3772 device_printf(dev, "XOFF Rcvd = %lld\n", 3773 (long long)sc->stats.xoffrxc); 3774 device_printf(dev, "XOFF Xmtd = %lld\n", 3775 (long long)sc->stats.xofftxc); 3776 device_printf(dev, "Good Packets Rcvd = %lld\n", 3777 (long long)sc->stats.gprc); 3778 device_printf(dev, "Good Packets Xmtd = %lld\n", 3779 (long long)sc->stats.gptc); 3780 } 3781 3782 static void 3783 emx_print_nvm_info(struct emx_softc *sc) 3784 { 3785 uint16_t eeprom_data; 3786 int i, j, row = 0; 3787 3788 /* Its a bit crude, but it gets the job done */ 3789 kprintf("\nInterface EEPROM Dump:\n"); 3790 kprintf("Offset\n0x0000 "); 3791 for (i = 0, j = 0; i < 32; i++, j++) { 3792 if (j == 8) { /* Make the offset block */ 3793 j = 0; ++row; 3794 kprintf("\n0x00%x0 ",row); 3795 } 3796 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3797 kprintf("%04x ", eeprom_data); 3798 } 3799 kprintf("\n"); 3800 } 3801 3802 static int 3803 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3804 { 3805 struct emx_softc *sc; 3806 struct ifnet *ifp; 3807 int error, result; 3808 3809 result = -1; 3810 error = sysctl_handle_int(oidp, &result, 0, req); 3811 if (error || !req->newptr) 3812 return (error); 3813 3814 sc = (struct emx_softc *)arg1; 3815 ifp = &sc->arpcom.ac_if; 3816 3817 ifnet_serialize_all(ifp); 3818 3819 if (result == 1) 3820 emx_print_debug_info(sc); 3821 3822 /* 3823 * This value will cause a hex dump of the 3824 * first 32 16-bit words of the EEPROM to 3825 * the screen. 3826 */ 3827 if (result == 2) 3828 emx_print_nvm_info(sc); 3829 3830 ifnet_deserialize_all(ifp); 3831 3832 return (error); 3833 } 3834 3835 static int 3836 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3837 { 3838 int error, result; 3839 3840 result = -1; 3841 error = sysctl_handle_int(oidp, &result, 0, req); 3842 if (error || !req->newptr) 3843 return (error); 3844 3845 if (result == 1) { 3846 struct emx_softc *sc = (struct emx_softc *)arg1; 3847 struct ifnet *ifp = &sc->arpcom.ac_if; 3848 3849 ifnet_serialize_all(ifp); 3850 emx_print_hw_stats(sc); 3851 ifnet_deserialize_all(ifp); 3852 } 3853 return (error); 3854 } 3855 3856 static void 3857 emx_add_sysctl(struct emx_softc *sc) 3858 { 3859 struct sysctl_ctx_list *ctx; 3860 struct sysctl_oid *tree; 3861 char pkt_desc[32]; 3862 int i; 3863 3864 ctx = device_get_sysctl_ctx(sc->dev); 3865 tree = device_get_sysctl_tree(sc->dev); 3866 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3867 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3868 emx_sysctl_debug_info, "I", "Debug Information"); 3869 3870 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3871 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3872 emx_sysctl_stats, "I", "Statistics"); 3873 3874 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3875 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3876 "# of RX descs"); 3877 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3878 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3879 "# of TX descs"); 3880 3881 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3882 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3883 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3884 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3885 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3886 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3887 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3888 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3889 emx_sysctl_tx_wreg_nsegs, "I", 3890 "# segments sent before write to hardware register"); 3891 3892 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3893 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3894 "# of RX rings"); 3895 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3896 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3897 "# of TX rings"); 3898 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3899 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3900 "# of TX rings used"); 3901 3902 #ifdef IFPOLL_ENABLE 3903 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3904 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3905 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3906 "TX polling CPU map"); 3907 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3908 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD, 3909 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 3910 "RX polling CPU map"); 3911 #endif 3912 3913 #ifdef EMX_RSS_DEBUG 3914 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3915 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3916 0, "RSS debug level"); 3917 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3918 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3919 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3920 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3921 "RXed packets"); 3922 } 3923 #endif 3924 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3925 #ifdef EMX_TSS_DEBUG 3926 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3927 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3928 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3929 "TXed packets"); 3930 #endif 3931 3932 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i); 3933 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3934 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0, 3935 "# of pending TX mbufs"); 3936 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i); 3937 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3938 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc, 3939 "# of TX desc GC"); 3940 } 3941 } 3942 3943 static int 3944 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3945 { 3946 struct emx_softc *sc = (void *)arg1; 3947 struct ifnet *ifp = &sc->arpcom.ac_if; 3948 int error, throttle; 3949 3950 throttle = sc->int_throttle_ceil; 3951 error = sysctl_handle_int(oidp, &throttle, 0, req); 3952 if (error || req->newptr == NULL) 3953 return error; 3954 if (throttle < 0 || throttle > 1000000000 / 256) 3955 return EINVAL; 3956 3957 if (throttle) { 3958 /* 3959 * Set the interrupt throttling rate in 256ns increments, 3960 * recalculate sysctl value assignment to get exact frequency. 3961 */ 3962 throttle = 1000000000 / 256 / throttle; 3963 3964 /* Upper 16bits of ITR is reserved and should be zero */ 3965 if (throttle & 0xffff0000) 3966 return EINVAL; 3967 } 3968 3969 ifnet_serialize_all(ifp); 3970 3971 if (throttle) 3972 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3973 else 3974 sc->int_throttle_ceil = 0; 3975 3976 if (ifp->if_flags & IFF_RUNNING) 3977 emx_set_itr(sc, throttle); 3978 3979 ifnet_deserialize_all(ifp); 3980 3981 if (bootverbose) { 3982 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3983 sc->int_throttle_ceil); 3984 } 3985 return 0; 3986 } 3987 3988 static int 3989 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3990 { 3991 struct emx_softc *sc = (void *)arg1; 3992 struct ifnet *ifp = &sc->arpcom.ac_if; 3993 struct emx_txdata *tdata = &sc->tx_data[0]; 3994 int error, segs; 3995 3996 segs = tdata->tx_intr_nsegs; 3997 error = sysctl_handle_int(oidp, &segs, 0, req); 3998 if (error || req->newptr == NULL) 3999 return error; 4000 if (segs <= 0) 4001 return EINVAL; 4002 4003 ifnet_serialize_all(ifp); 4004 4005 /* 4006 * Don't allow tx_intr_nsegs to become: 4007 * o Less the oact_tx_desc 4008 * o Too large that no TX desc will cause TX interrupt to 4009 * be generated (OACTIVE will never recover) 4010 * o Too small that will cause tx_dd[] overflow 4011 */ 4012 if (segs < tdata->oact_tx_desc || 4013 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 4014 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 4015 error = EINVAL; 4016 } else { 4017 int i; 4018 4019 error = 0; 4020 for (i = 0; i < sc->tx_ring_cnt; ++i) 4021 sc->tx_data[i].tx_intr_nsegs = segs; 4022 } 4023 4024 ifnet_deserialize_all(ifp); 4025 4026 return error; 4027 } 4028 4029 static int 4030 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 4031 { 4032 struct emx_softc *sc = (void *)arg1; 4033 struct ifnet *ifp = &sc->arpcom.ac_if; 4034 int error, nsegs, i; 4035 4036 nsegs = sc->tx_data[0].tx_wreg_nsegs; 4037 error = sysctl_handle_int(oidp, &nsegs, 0, req); 4038 if (error || req->newptr == NULL) 4039 return error; 4040 4041 ifnet_serialize_all(ifp); 4042 for (i = 0; i < sc->tx_ring_cnt; ++i) 4043 sc->tx_data[i].tx_wreg_nsegs =nsegs; 4044 ifnet_deserialize_all(ifp); 4045 4046 return 0; 4047 } 4048 4049 static int 4050 emx_dma_alloc(struct emx_softc *sc) 4051 { 4052 int error, i; 4053 4054 /* 4055 * Create top level busdma tag 4056 */ 4057 error = bus_dma_tag_create(NULL, 1, 0, 4058 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4059 NULL, NULL, 4060 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4061 0, &sc->parent_dtag); 4062 if (error) { 4063 device_printf(sc->dev, "could not create top level DMA tag\n"); 4064 return error; 4065 } 4066 4067 /* 4068 * Allocate transmit descriptors ring and buffers 4069 */ 4070 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4071 error = emx_create_tx_ring(&sc->tx_data[i]); 4072 if (error) { 4073 device_printf(sc->dev, 4074 "Could not setup transmit structures\n"); 4075 return error; 4076 } 4077 } 4078 4079 /* 4080 * Allocate receive descriptors ring and buffers 4081 */ 4082 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4083 error = emx_create_rx_ring(&sc->rx_data[i]); 4084 if (error) { 4085 device_printf(sc->dev, 4086 "Could not setup receive structures\n"); 4087 return error; 4088 } 4089 } 4090 return 0; 4091 } 4092 4093 static void 4094 emx_dma_free(struct emx_softc *sc) 4095 { 4096 int i; 4097 4098 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4099 emx_destroy_tx_ring(&sc->tx_data[i], 4100 sc->tx_data[i].num_tx_desc); 4101 } 4102 4103 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4104 emx_destroy_rx_ring(&sc->rx_data[i], 4105 sc->rx_data[i].num_rx_desc); 4106 } 4107 4108 /* Free top level busdma tag */ 4109 if (sc->parent_dtag != NULL) 4110 bus_dma_tag_destroy(sc->parent_dtag); 4111 } 4112 4113 static void 4114 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4115 { 4116 struct emx_softc *sc = ifp->if_softc; 4117 4118 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4119 } 4120 4121 static void 4122 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4123 { 4124 struct emx_softc *sc = ifp->if_softc; 4125 4126 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4127 } 4128 4129 static int 4130 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4131 { 4132 struct emx_softc *sc = ifp->if_softc; 4133 4134 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4135 } 4136 4137 static void 4138 emx_serialize_skipmain(struct emx_softc *sc) 4139 { 4140 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4141 } 4142 4143 static void 4144 emx_deserialize_skipmain(struct emx_softc *sc) 4145 { 4146 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4147 } 4148 4149 #ifdef INVARIANTS 4150 4151 static void 4152 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4153 boolean_t serialized) 4154 { 4155 struct emx_softc *sc = ifp->if_softc; 4156 4157 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4158 slz, serialized); 4159 } 4160 4161 #endif /* INVARIANTS */ 4162 4163 #ifdef IFPOLL_ENABLE 4164 4165 static void 4166 emx_npoll_status(struct ifnet *ifp) 4167 { 4168 struct emx_softc *sc = ifp->if_softc; 4169 uint32_t reg_icr; 4170 4171 ASSERT_SERIALIZED(&sc->main_serialize); 4172 4173 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4174 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4175 callout_stop(&sc->timer); 4176 sc->hw.mac.get_link_status = 1; 4177 emx_update_link_status(sc); 4178 callout_reset(&sc->timer, hz, emx_timer, sc); 4179 } 4180 } 4181 4182 static void 4183 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4184 { 4185 struct emx_txdata *tdata = arg; 4186 4187 ASSERT_SERIALIZED(&tdata->tx_serialize); 4188 4189 emx_tx_intr(tdata); 4190 emx_try_txgc(tdata, 1); 4191 } 4192 4193 static void 4194 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4195 { 4196 struct emx_rxdata *rdata = arg; 4197 4198 ASSERT_SERIALIZED(&rdata->rx_serialize); 4199 4200 emx_rxeof(rdata, cycle); 4201 } 4202 4203 static void 4204 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4205 { 4206 struct emx_softc *sc = ifp->if_softc; 4207 int i, txr_cnt; 4208 4209 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4210 4211 if (info) { 4212 int cpu; 4213 4214 info->ifpi_status.status_func = emx_npoll_status; 4215 info->ifpi_status.serializer = &sc->main_serialize; 4216 4217 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4218 for (i = 0; i < txr_cnt; ++i) { 4219 struct emx_txdata *tdata = &sc->tx_data[i]; 4220 4221 cpu = if_ringmap_cpumap(sc->tx_rmap, i); 4222 KKASSERT(cpu < netisr_ncpus); 4223 info->ifpi_tx[cpu].poll_func = emx_npoll_tx; 4224 info->ifpi_tx[cpu].arg = tdata; 4225 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize; 4226 ifsq_set_cpuid(tdata->ifsq, cpu); 4227 } 4228 4229 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4230 struct emx_rxdata *rdata = &sc->rx_data[i]; 4231 4232 cpu = if_ringmap_cpumap(sc->rx_rmap, i); 4233 KKASSERT(cpu < netisr_ncpus); 4234 info->ifpi_rx[cpu].poll_func = emx_npoll_rx; 4235 info->ifpi_rx[cpu].arg = rdata; 4236 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize; 4237 } 4238 } else { 4239 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4240 struct emx_txdata *tdata = &sc->tx_data[i]; 4241 4242 ifsq_set_cpuid(tdata->ifsq, 4243 rman_get_cpuid(sc->intr_res)); 4244 } 4245 } 4246 if (ifp->if_flags & IFF_RUNNING) 4247 emx_init(sc); 4248 } 4249 4250 #endif /* IFPOLL_ENABLE */ 4251 4252 static void 4253 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4254 { 4255 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4256 if (sc->hw.mac.type == e1000_82574) { 4257 int i; 4258 4259 /* 4260 * When using MSIX interrupts we need to 4261 * throttle using the EITR register 4262 */ 4263 for (i = 0; i < 4; ++i) 4264 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4265 } 4266 } 4267 4268 /* 4269 * Disable the L0s, 82574L Errata #20 4270 */ 4271 static void 4272 emx_disable_aspm(struct emx_softc *sc) 4273 { 4274 uint16_t link_cap, link_ctrl, disable; 4275 uint8_t pcie_ptr, reg; 4276 device_t dev = sc->dev; 4277 4278 switch (sc->hw.mac.type) { 4279 case e1000_82571: 4280 case e1000_82572: 4281 case e1000_82573: 4282 /* 4283 * 82573 specification update 4284 * errata #8 disable L0s 4285 * errata #41 disable L1 4286 * 4287 * 82571/82572 specification update 4288 # errata #13 disable L1 4289 * errata #68 disable L0s 4290 */ 4291 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4292 break; 4293 4294 case e1000_82574: 4295 /* 4296 * 82574 specification update errata #20 4297 * 4298 * There is no need to disable L1 4299 */ 4300 disable = PCIEM_LNKCTL_ASPM_L0S; 4301 break; 4302 4303 default: 4304 return; 4305 } 4306 4307 pcie_ptr = pci_get_pciecap_ptr(dev); 4308 if (pcie_ptr == 0) 4309 return; 4310 4311 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4312 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4313 return; 4314 4315 if (bootverbose) 4316 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4317 4318 reg = pcie_ptr + PCIER_LINKCTRL; 4319 link_ctrl = pci_read_config(dev, reg, 2); 4320 link_ctrl &= ~disable; 4321 pci_write_config(dev, reg, link_ctrl, 2); 4322 } 4323 4324 static int 4325 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4326 { 4327 int iphlen, hoff, thoff, ex = 0; 4328 struct mbuf *m; 4329 struct ip *ip; 4330 4331 m = *mp; 4332 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4333 4334 iphlen = m->m_pkthdr.csum_iphlen; 4335 thoff = m->m_pkthdr.csum_thlen; 4336 hoff = m->m_pkthdr.csum_lhlen; 4337 4338 KASSERT(iphlen > 0, ("invalid ip hlen")); 4339 KASSERT(thoff > 0, ("invalid tcp hlen")); 4340 KASSERT(hoff > 0, ("invalid ether hlen")); 4341 4342 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4343 ex = 4; 4344 4345 if (m->m_len < hoff + iphlen + thoff + ex) { 4346 m = m_pullup(m, hoff + iphlen + thoff + ex); 4347 if (m == NULL) { 4348 *mp = NULL; 4349 return ENOBUFS; 4350 } 4351 *mp = m; 4352 } 4353 ip = mtodoff(m, struct ip *, hoff); 4354 ip->ip_len = 0; 4355 4356 return 0; 4357 } 4358 4359 static int 4360 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4361 uint32_t *txd_upper, uint32_t *txd_lower) 4362 { 4363 struct e1000_context_desc *TXD; 4364 int hoff, iphlen, thoff, hlen; 4365 int mss, pktlen, curr_txd; 4366 4367 #ifdef EMX_TSO_DEBUG 4368 tdata->tso_segments++; 4369 #endif 4370 4371 iphlen = mp->m_pkthdr.csum_iphlen; 4372 thoff = mp->m_pkthdr.csum_thlen; 4373 hoff = mp->m_pkthdr.csum_lhlen; 4374 mss = mp->m_pkthdr.tso_segsz; 4375 pktlen = mp->m_pkthdr.len; 4376 4377 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4378 tdata->csum_flags == CSUM_TSO && 4379 tdata->csum_iphlen == iphlen && 4380 tdata->csum_lhlen == hoff && 4381 tdata->csum_thlen == thoff && 4382 tdata->csum_mss == mss && 4383 tdata->csum_pktlen == pktlen) { 4384 *txd_upper = tdata->csum_txd_upper; 4385 *txd_lower = tdata->csum_txd_lower; 4386 #ifdef EMX_TSO_DEBUG 4387 tdata->tso_ctx_reused++; 4388 #endif 4389 return 0; 4390 } 4391 hlen = hoff + iphlen + thoff; 4392 4393 /* 4394 * Setup a new TSO context. 4395 */ 4396 4397 curr_txd = tdata->next_avail_tx_desc; 4398 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4399 4400 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4401 E1000_TXD_DTYP_D | /* Data descr type */ 4402 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4403 4404 /* IP and/or TCP header checksum calculation and insertion. */ 4405 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4406 4407 /* 4408 * Start offset for header checksum calculation. 4409 * End offset for header checksum calculation. 4410 * Offset of place put the checksum. 4411 */ 4412 TXD->lower_setup.ip_fields.ipcss = hoff; 4413 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4414 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4415 4416 /* 4417 * Start offset for payload checksum calculation. 4418 * End offset for payload checksum calculation. 4419 * Offset of place to put the checksum. 4420 */ 4421 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4422 TXD->upper_setup.tcp_fields.tucse = 0; 4423 TXD->upper_setup.tcp_fields.tucso = 4424 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4425 4426 /* 4427 * Payload size per packet w/o any headers. 4428 * Length of all headers up to payload. 4429 */ 4430 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4431 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4432 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4433 E1000_TXD_CMD_DEXT | /* Extended descr */ 4434 E1000_TXD_CMD_TSE | /* TSE context */ 4435 E1000_TXD_CMD_IP | /* Do IP csum */ 4436 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4437 (pktlen - hlen)); /* Total len */ 4438 4439 /* Save the information for this TSO context */ 4440 tdata->csum_flags = CSUM_TSO; 4441 tdata->csum_lhlen = hoff; 4442 tdata->csum_iphlen = iphlen; 4443 tdata->csum_thlen = thoff; 4444 tdata->csum_mss = mss; 4445 tdata->csum_pktlen = pktlen; 4446 tdata->csum_txd_upper = *txd_upper; 4447 tdata->csum_txd_lower = *txd_lower; 4448 4449 if (++curr_txd == tdata->num_tx_desc) 4450 curr_txd = 0; 4451 4452 KKASSERT(tdata->num_tx_desc_avail > 0); 4453 tdata->num_tx_desc_avail--; 4454 4455 tdata->next_avail_tx_desc = curr_txd; 4456 return 1; 4457 } 4458 4459 static int 4460 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4461 { 4462 if (polling) 4463 return sc->tx_ring_cnt; 4464 else 4465 return 1; 4466 } 4467 4468 /* 4469 * Remove all descriptors from the TX ring. 4470 * 4471 * We want to clear all pending descriptors from the TX ring. Zeroing 4472 * happens when the HW reads the regs. We assign the ring itself as 4473 * the data of the next descriptor. We don't care about the data we 4474 * are about to reset the HW. 4475 */ 4476 static void 4477 emx_flush_tx_ring(struct emx_softc *sc) 4478 { 4479 struct e1000_hw *hw = &sc->hw; 4480 uint32_t tctl; 4481 int i; 4482 4483 tctl = E1000_READ_REG(hw, E1000_TCTL); 4484 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); 4485 4486 for (i = 0; i < sc->tx_ring_inuse; ++i) { 4487 struct emx_txdata *tdata = &sc->tx_data[i]; 4488 struct e1000_tx_desc *txd; 4489 4490 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0) 4491 continue; 4492 4493 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++]; 4494 if (tdata->next_avail_tx_desc == tdata->num_tx_desc) 4495 tdata->next_avail_tx_desc = 0; 4496 4497 /* Just use the ring as a dummy buffer addr */ 4498 txd->buffer_addr = tdata->tx_desc_paddr; 4499 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512); 4500 txd->upper.data = 0; 4501 4502 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc); 4503 usec_delay(250); 4504 } 4505 } 4506 4507 /* 4508 * Remove all descriptors from the RX rings. 4509 * 4510 * Mark all descriptors in the RX rings as consumed and disable the RX rings. 4511 */ 4512 static void 4513 emx_flush_rx_ring(struct emx_softc *sc) 4514 { 4515 struct e1000_hw *hw = &sc->hw; 4516 uint32_t rctl; 4517 int i; 4518 4519 rctl = E1000_READ_REG(hw, E1000_RCTL); 4520 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4521 E1000_WRITE_FLUSH(hw); 4522 usec_delay(150); 4523 4524 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4525 uint32_t rxdctl; 4526 4527 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 4528 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4529 rxdctl &= 0xffffc000; 4530 /* 4531 * Update thresholds: prefetch threshold to 31, host threshold 4532 * to 1 and make sure the granularity is "descriptors" and not 4533 * "cache lines". 4534 */ 4535 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 4536 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 4537 } 4538 4539 /* Momentarily enable the RX rings for the changes to take effect */ 4540 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); 4541 E1000_WRITE_FLUSH(hw); 4542 usec_delay(150); 4543 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4544 } 4545 4546 /* 4547 * Remove all descriptors from the descriptor rings. 4548 * 4549 * In i219, the descriptor rings must be emptied before resetting the HW 4550 * or before changing the device state to D3 during runtime (runtime PM). 4551 * 4552 * Failure to do this will cause the HW to enter a unit hang state which 4553 * can only be released by PCI reset on the device. 4554 */ 4555 static void 4556 emx_flush_txrx_ring(struct emx_softc *sc) 4557 { 4558 struct e1000_hw *hw = &sc->hw; 4559 device_t dev = sc->dev; 4560 uint16_t hang_state; 4561 uint32_t fext_nvm11, tdlen; 4562 int i; 4563 4564 /* 4565 * First, disable MULR fix in FEXTNVM11. 4566 */ 4567 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 4568 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 4569 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 4570 4571 /* 4572 * Do nothing if we're not in faulty state, or if the queue is 4573 * empty. 4574 */ 4575 tdlen = 0; 4576 for (i = 0; i < sc->tx_ring_inuse; ++i) 4577 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i)); 4578 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4579 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen) 4580 emx_flush_tx_ring(sc); 4581 4582 /* 4583 * Recheck, maybe the fault is caused by the RX ring. 4584 */ 4585 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2); 4586 if (hang_state & EMX_FLUSH_DESC_REQUIRED) 4587 emx_flush_rx_ring(sc); 4588 } 4589