1 /* 2 * Copyright (c) 2004 3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 4 * 5 * Copyright (c) 1997, 1998-2003 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $ 36 */ 37 38 /* 39 * RealTek 8169S/8110S/8168/8111/8101E/8125 PCI NIC driver 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Networking Software Engineer 43 * Wind River Systems 44 */ 45 46 /* 47 * This driver is designed to support RealTek's next generation of 48 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 49 * seven devices in this family: the the RTL8169, the RTL8169S, RTL8110S, 50 * the RTL8168, the RTL8111 and the RTL8101E. 51 * 52 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA. 59 * 60 * o TCP/IP checksum offload for both RX and TX. 61 * 62 * o High and normal priority transmit DMA rings. 63 * 64 * o VLAN tag insertion and extraction. 65 * 66 * o TCP large send (segmentation offload). 67 * 68 * o 1000Mbps mode. 69 * 70 * o Jumbo frames. 71 * 72 * o GMII and TBI ports/registers for interfacing with copper 73 * or fiber PHYs. 74 * 75 * o RX and TX DMA rings can have up to 1024 descriptors. 76 * 77 * The 8169 does not have a built-in PHY. Most reference boards use a 78 * Marvell 88E1000 'Alaska' copper gigE PHY. 8169/8110 is _no longer_ 79 * supported. 80 * 81 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 82 * (the 'S' stands for 'single-chip'). These devices have the same 83 * programming API as the older 8169, but also have some vendor-specific 84 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 85 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 86 * 8125 supports 10/100/1000/2500. 87 * 88 * This driver takes advantage of the RX and TX checksum offload and 89 * VLAN tag insertion/extraction features. It also implements 90 * interrupt moderation using the timer interrupt registers, which 91 * significantly reduces interrupt load. 92 */ 93 94 #define _IP_VHL 95 96 #include "opt_ifpoll.h" 97 98 #include <sys/param.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/kernel.h> 102 #include <sys/in_cksum.h> 103 #include <sys/interrupt.h> 104 #include <sys/malloc.h> 105 #include <sys/mbuf.h> 106 #include <sys/rman.h> 107 #include <sys/serialize.h> 108 #include <sys/socket.h> 109 #include <sys/sockio.h> 110 #include <sys/sysctl.h> 111 112 #include <net/bpf.h> 113 #include <net/ethernet.h> 114 #include <net/if.h> 115 #include <net/ifq_var.h> 116 #include <net/if_arp.h> 117 #include <net/if_dl.h> 118 #include <net/if_media.h> 119 #include <net/if_poll.h> 120 #include <net/if_types.h> 121 #include <net/vlan/if_vlan_var.h> 122 #include <net/vlan/if_vlan_ether.h> 123 124 #include <netinet/ip.h> 125 126 #include "pcidevs.h" 127 #include <bus/pci/pcireg.h> 128 #include <bus/pci/pcivar.h> 129 130 #include <dev/netif/re/if_rereg.h> 131 #include <dev/netif/re/if_revar.h> 132 #include <dev/netif/re/re.h> 133 #include <dev/netif/re/re_dragonfly.h> 134 135 /* 136 * Various supported device vendors/types and their names. 137 */ 138 static const struct re_type { 139 uint16_t re_vid; 140 uint16_t re_did; 141 const char *re_name; 142 } re_devs[] = { 143 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T, 144 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 145 146 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E, 147 "RealTek 810x PCIe 10/100baseTX" }, 148 149 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, 150 "RealTek 8111/8168 PCIe Gigabit Ethernet" }, 151 152 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168_1, 153 "RealTek 8168 PCIe Gigabit Ethernet" }, 154 155 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125, 156 "RealTek 8125 PCIe Gigabit Ethernet" }, 157 158 #ifdef notyet 159 /* 160 * This driver now only supports built-in PHYs. 161 */ 162 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, 163 "RealTek 8110/8169 Gigabit Ethernet" }, 164 #endif 165 166 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC, 167 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 168 169 { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT, 170 "Corega CG-LAPCIGT Gigabit Ethernet" }, 171 172 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032, 173 "Linksys EG1032 Gigabit Ethernet" }, 174 175 { PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902, 176 "US Robotics 997902 Gigabit Ethernet" }, 177 178 { PCI_VENDOR_TTTECH, PCI_PRODUCT_TTTECH_MC322, 179 "TTTech MC322 Gigabit Ethernet" }, 180 181 { 0, 0, NULL } 182 }; 183 184 static int re_probe(device_t); 185 static int re_attach(device_t); 186 static int re_detach(device_t); 187 static int re_suspend(device_t); 188 static int re_resume(device_t); 189 static void re_shutdown(device_t); 190 191 static int re_allocmem(device_t); 192 static void re_freemem(device_t); 193 static void re_freebufmem(struct re_softc *, int, int); 194 static int re_encap(struct re_softc *, struct mbuf **, int *); 195 static int re_newbuf_std(struct re_softc *, int, int); 196 #ifdef RE_JUMBO 197 static int re_newbuf_jumbo(struct re_softc *, int, int); 198 #endif 199 static void re_setup_rxdesc(struct re_softc *, int); 200 static int re_rx_list_init(struct re_softc *); 201 static int re_tx_list_init(struct re_softc *); 202 static int re_rxeof(struct re_softc *); 203 static int re_txeof(struct re_softc *); 204 static int re_tx_collect(struct re_softc *); 205 static void re_intr(void *); 206 static void re_tick(void *); 207 static void re_tick_serialized(void *); 208 static void re_disable_aspm(device_t); 209 static void re_link_up(struct re_softc *); 210 static void re_link_down(struct re_softc *); 211 212 static void re_start_xmit(struct re_softc *); 213 static void re_write_imr(struct re_softc *, uint32_t); 214 static void re_write_isr(struct re_softc *, uint32_t); 215 static uint32_t re_read_isr(struct re_softc *); 216 static void re_start_xmit_8125(struct re_softc *); 217 static void re_write_imr_8125(struct re_softc *, uint32_t); 218 static void re_write_isr_8125(struct re_softc *, uint32_t); 219 static uint32_t re_read_isr_8125(struct re_softc *); 220 221 static void re_start(struct ifnet *, struct ifaltq_subque *); 222 static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 223 static void re_init(void *); 224 static void re_stop(struct re_softc *, boolean_t); 225 static void re_watchdog(struct ifnet *); 226 227 static void re_setup_hw_im(struct re_softc *); 228 static void re_setup_sim_im(struct re_softc *); 229 static void re_disable_hw_im(struct re_softc *); 230 static void re_disable_sim_im(struct re_softc *); 231 static void re_config_imtype(struct re_softc *, int); 232 static void re_setup_intr(struct re_softc *, int, int); 233 234 static int re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *); 235 static int re_sysctl_rxtime(SYSCTL_HANDLER_ARGS); 236 static int re_sysctl_txtime(SYSCTL_HANDLER_ARGS); 237 static int re_sysctl_simtime(SYSCTL_HANDLER_ARGS); 238 static int re_sysctl_imtype(SYSCTL_HANDLER_ARGS); 239 240 static int re_jpool_alloc(struct re_softc *); 241 static void re_jpool_free(struct re_softc *); 242 #ifdef RE_JUMBO 243 static struct re_jbuf *re_jbuf_alloc(struct re_softc *); 244 static void re_jbuf_free(void *); 245 static void re_jbuf_ref(void *); 246 #endif 247 248 #ifdef IFPOLL_ENABLE 249 static void re_npoll(struct ifnet *, struct ifpoll_info *); 250 static void re_npoll_compat(struct ifnet *, void *, int); 251 #endif 252 253 static device_method_t re_methods[] = { 254 /* Device interface */ 255 DEVMETHOD(device_probe, re_probe), 256 DEVMETHOD(device_attach, re_attach), 257 DEVMETHOD(device_detach, re_detach), 258 DEVMETHOD(device_suspend, re_suspend), 259 DEVMETHOD(device_resume, re_resume), 260 DEVMETHOD(device_shutdown, re_shutdown), 261 DEVMETHOD_END 262 }; 263 264 static driver_t re_driver = { 265 "re", 266 re_methods, 267 sizeof(struct re_softc) 268 }; 269 270 static devclass_t re_devclass; 271 272 DECLARE_DUMMY_MODULE(if_re); 273 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, NULL, NULL); 274 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, NULL, NULL); 275 276 static int re_rx_desc_count = RE_RX_DESC_CNT_DEF; 277 static int re_tx_desc_count = RE_TX_DESC_CNT_DEF; 278 static int re_msi_enable = 1; 279 280 TUNABLE_INT("hw.re.rx_desc_count", &re_rx_desc_count); 281 TUNABLE_INT("hw.re.tx_desc_count", &re_tx_desc_count); 282 TUNABLE_INT("hw.re.msi.enable", &re_msi_enable); 283 284 static __inline void 285 re_free_rxchain(struct re_softc *sc) 286 { 287 if (sc->re_head != NULL) { 288 m_freem(sc->re_head); 289 sc->re_head = sc->re_tail = NULL; 290 } 291 } 292 293 static int 294 re_probe(device_t dev) 295 { 296 const struct re_type *t; 297 uint16_t vendor, product; 298 299 vendor = pci_get_vendor(dev); 300 product = pci_get_device(dev); 301 302 /* 303 * Only attach to rev.3 of the Linksys EG1032 adapter. 304 * Rev.2 is supported by sk(4). 305 */ 306 if (vendor == PCI_VENDOR_LINKSYS && 307 product == PCI_PRODUCT_LINKSYS_EG1032 && 308 pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3) 309 return ENXIO; 310 311 for (t = re_devs; t->re_name != NULL; t++) { 312 if (product == t->re_did && vendor == t->re_vid) 313 break; 314 } 315 if (t->re_name == NULL) 316 return ENXIO; 317 318 device_set_desc(dev, t->re_name); 319 return 0; 320 } 321 322 static int 323 re_allocmem(device_t dev) 324 { 325 struct re_softc *sc = device_get_softc(dev); 326 bus_dmamem_t dmem; 327 int error, i; 328 329 /* 330 * Allocate list data 331 */ 332 sc->re_ldata.re_tx_mbuf = 333 kmalloc(sc->re_tx_desc_cnt * sizeof(struct mbuf *), 334 M_DEVBUF, M_ZERO | M_WAITOK); 335 336 sc->re_ldata.re_rx_mbuf = 337 kmalloc(sc->re_rx_desc_cnt * sizeof(struct mbuf *), 338 M_DEVBUF, M_ZERO | M_WAITOK); 339 340 sc->re_ldata.re_rx_paddr = 341 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_addr_t), 342 M_DEVBUF, M_ZERO | M_WAITOK); 343 344 sc->re_ldata.re_tx_dmamap = 345 kmalloc(sc->re_tx_desc_cnt * sizeof(bus_dmamap_t), 346 M_DEVBUF, M_ZERO | M_WAITOK); 347 348 sc->re_ldata.re_rx_dmamap = 349 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_dmamap_t), 350 M_DEVBUF, M_ZERO | M_WAITOK); 351 352 /* 353 * Allocate the parent bus DMA tag appropriate for PCI. 354 */ 355 error = bus_dma_tag_create(NULL, /* parent */ 356 1, 0, /* alignment, boundary */ 357 BUS_SPACE_MAXADDR, /* lowaddr */ 358 BUS_SPACE_MAXADDR, /* highaddr */ 359 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 360 0, /* nsegments */ 361 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 362 0, /* flags */ 363 &sc->re_parent_tag); 364 if (error) { 365 device_printf(dev, "could not allocate parent dma tag\n"); 366 return error; 367 } 368 369 /* Allocate TX descriptor list. */ 370 error = bus_dmamem_coherent(sc->re_parent_tag, 371 RE_RING_ALIGN, 0, 372 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 373 RE_TX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO, 374 &dmem); 375 if (error) { 376 device_printf(dev, "could not allocate TX ring\n"); 377 return error; 378 } 379 sc->re_ldata.re_tx_list_tag = dmem.dmem_tag; 380 sc->re_ldata.re_tx_list_map = dmem.dmem_map; 381 sc->re_ldata.re_tx_list = dmem.dmem_addr; 382 sc->re_ldata.re_tx_list_addr = dmem.dmem_busaddr; 383 384 /* Allocate RX descriptor list. */ 385 error = bus_dmamem_coherent(sc->re_parent_tag, 386 RE_RING_ALIGN, 0, 387 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 388 RE_RX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO, 389 &dmem); 390 if (error) { 391 device_printf(dev, "could not allocate RX ring\n"); 392 return error; 393 } 394 sc->re_ldata.re_rx_list_tag = dmem.dmem_tag; 395 sc->re_ldata.re_rx_list_map = dmem.dmem_map; 396 sc->re_ldata.re_rx_list = dmem.dmem_addr; 397 sc->re_ldata.re_rx_list_addr = dmem.dmem_busaddr; 398 399 /* Allocate maps for TX mbufs. */ 400 error = bus_dma_tag_create(sc->re_parent_tag, 401 1, 0, 402 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 403 RE_FRAMELEN_MAX, RE_MAXSEGS, MCLBYTES, 404 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 405 &sc->re_ldata.re_tx_mtag); 406 if (error) { 407 device_printf(dev, "could not allocate TX buf dma tag\n"); 408 return(error); 409 } 410 411 /* Create DMA maps for TX buffers */ 412 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 413 error = bus_dmamap_create(sc->re_ldata.re_tx_mtag, 414 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 415 &sc->re_ldata.re_tx_dmamap[i]); 416 if (error) { 417 device_printf(dev, "can't create DMA map for TX buf\n"); 418 re_freebufmem(sc, i, 0); 419 return(error); 420 } 421 } 422 423 /* Allocate maps for RX mbufs. */ 424 error = bus_dma_tag_create(sc->re_parent_tag, 425 RE_RXBUF_ALIGN, 0, 426 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 427 MCLBYTES, 1, MCLBYTES, 428 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED, 429 &sc->re_ldata.re_rx_mtag); 430 if (error) { 431 device_printf(dev, "could not allocate RX buf dma tag\n"); 432 return(error); 433 } 434 435 /* Create spare DMA map for RX */ 436 error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, BUS_DMA_WAITOK, 437 &sc->re_ldata.re_rx_spare); 438 if (error) { 439 device_printf(dev, "can't create spare DMA map for RX\n"); 440 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag); 441 sc->re_ldata.re_rx_mtag = NULL; 442 return error; 443 } 444 445 /* Create DMA maps for RX buffers */ 446 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 447 error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, 448 BUS_DMA_WAITOK, &sc->re_ldata.re_rx_dmamap[i]); 449 if (error) { 450 device_printf(dev, "can't create DMA map for RX buf\n"); 451 re_freebufmem(sc, sc->re_tx_desc_cnt, i); 452 return(error); 453 } 454 } 455 456 /* Create jumbo buffer pool for RX if required */ 457 if (sc->re_caps & RE_C_CONTIGRX) { 458 error = re_jpool_alloc(sc); 459 if (error) { 460 re_jpool_free(sc); 461 #ifdef RE_JUMBO 462 /* Disable jumbo frame support */ 463 sc->re_maxmtu = ETHERMTU; 464 #endif 465 } 466 } 467 return(0); 468 } 469 470 static void 471 re_freebufmem(struct re_softc *sc, int tx_cnt, int rx_cnt) 472 { 473 int i; 474 475 /* Destroy all the RX and TX buffer maps */ 476 if (sc->re_ldata.re_tx_mtag) { 477 for (i = 0; i < tx_cnt; i++) { 478 bus_dmamap_destroy(sc->re_ldata.re_tx_mtag, 479 sc->re_ldata.re_tx_dmamap[i]); 480 } 481 bus_dma_tag_destroy(sc->re_ldata.re_tx_mtag); 482 sc->re_ldata.re_tx_mtag = NULL; 483 } 484 485 if (sc->re_ldata.re_rx_mtag) { 486 for (i = 0; i < rx_cnt; i++) { 487 bus_dmamap_destroy(sc->re_ldata.re_rx_mtag, 488 sc->re_ldata.re_rx_dmamap[i]); 489 } 490 bus_dmamap_destroy(sc->re_ldata.re_rx_mtag, 491 sc->re_ldata.re_rx_spare); 492 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag); 493 sc->re_ldata.re_rx_mtag = NULL; 494 } 495 } 496 497 static void 498 re_freemem(device_t dev) 499 { 500 struct re_softc *sc = device_get_softc(dev); 501 502 /* Unload and free the RX DMA ring memory and map */ 503 if (sc->re_ldata.re_rx_list_tag) { 504 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag, 505 sc->re_ldata.re_rx_list_map); 506 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 507 sc->re_ldata.re_rx_list, 508 sc->re_ldata.re_rx_list_map); 509 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 510 } 511 512 /* Unload and free the TX DMA ring memory and map */ 513 if (sc->re_ldata.re_tx_list_tag) { 514 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag, 515 sc->re_ldata.re_tx_list_map); 516 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 517 sc->re_ldata.re_tx_list, 518 sc->re_ldata.re_tx_list_map); 519 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 520 } 521 522 /* Free RX/TX buf DMA stuffs */ 523 re_freebufmem(sc, sc->re_tx_desc_cnt, sc->re_rx_desc_cnt); 524 525 /* Unload and free the stats buffer and map */ 526 if (sc->re_ldata.re_stag) { 527 bus_dmamap_unload(sc->re_ldata.re_stag, sc->re_ldata.re_smap); 528 bus_dmamem_free(sc->re_ldata.re_stag, 529 sc->re_ldata.re_stats, 530 sc->re_ldata.re_smap); 531 bus_dma_tag_destroy(sc->re_ldata.re_stag); 532 } 533 534 if (sc->re_caps & RE_C_CONTIGRX) 535 re_jpool_free(sc); 536 537 if (sc->re_parent_tag) 538 bus_dma_tag_destroy(sc->re_parent_tag); 539 540 if (sc->re_ldata.re_tx_mbuf != NULL) 541 kfree(sc->re_ldata.re_tx_mbuf, M_DEVBUF); 542 if (sc->re_ldata.re_rx_mbuf != NULL) 543 kfree(sc->re_ldata.re_rx_mbuf, M_DEVBUF); 544 if (sc->re_ldata.re_rx_paddr != NULL) 545 kfree(sc->re_ldata.re_rx_paddr, M_DEVBUF); 546 if (sc->re_ldata.re_tx_dmamap != NULL) 547 kfree(sc->re_ldata.re_tx_dmamap, M_DEVBUF); 548 if (sc->re_ldata.re_rx_dmamap != NULL) 549 kfree(sc->re_ldata.re_rx_dmamap, M_DEVBUF); 550 } 551 552 static boolean_t 553 re_is_faste(struct re_softc *sc) 554 { 555 if (pci_get_vendor(sc->dev) == PCI_VENDOR_REALTEK) { 556 switch (sc->re_device_id) { 557 case PCI_PRODUCT_REALTEK_RT8169: 558 case PCI_PRODUCT_REALTEK_RT8169SC: 559 case PCI_PRODUCT_REALTEK_RT8168: 560 case PCI_PRODUCT_REALTEK_RT8168_1: 561 case PCI_PRODUCT_REALTEK_RT8125: 562 return FALSE; 563 default: 564 return TRUE; 565 } 566 } else { 567 return FALSE; 568 } 569 } 570 571 static bool 572 re_is_2500e(const struct re_softc *sc) 573 { 574 if (pci_get_vendor(sc->dev) == PCI_VENDOR_REALTEK) { 575 switch (sc->re_device_id) { 576 case PCI_PRODUCT_REALTEK_RT8125: 577 return true; 578 579 default: 580 return false; 581 } 582 } 583 return false; 584 } 585 586 /* 587 * Attach the interface. Allocate softc structures, do ifmedia 588 * setup and ethernet/BPF attach. 589 */ 590 static int 591 re_attach(device_t dev) 592 { 593 struct re_softc *sc = device_get_softc(dev); 594 struct ifnet *ifp; 595 struct sysctl_ctx_list *ctx; 596 struct sysctl_oid *tree; 597 uint8_t eaddr[ETHER_ADDR_LEN]; 598 int error = 0, qlen, msi_enable; 599 u_int irq_flags; 600 601 callout_init_mp(&sc->re_timer); 602 sc->dev = dev; 603 sc->re_device_id = pci_get_device(dev); 604 sc->re_unit = device_get_unit(dev); 605 ifmedia_init(&sc->media, IFM_IMASK, rtl_ifmedia_upd, rtl_ifmedia_sts); 606 607 if (pci_get_vendor(dev) == PCI_VENDOR_REALTEK && 608 sc->re_device_id == PCI_PRODUCT_REALTEK_RT8125) { 609 sc->re_start_xmit = re_start_xmit_8125; 610 sc->re_write_imr = re_write_imr_8125; 611 sc->re_write_isr = re_write_isr_8125; 612 sc->re_read_isr = re_read_isr_8125; 613 } else { 614 sc->re_start_xmit = re_start_xmit; 615 sc->re_write_imr = re_write_imr; 616 sc->re_write_isr = re_write_isr; 617 sc->re_read_isr = re_read_isr; 618 } 619 620 sc->re_caps = RE_C_HWIM; 621 622 sc->re_rx_desc_cnt = re_rx_desc_count; 623 if (sc->re_rx_desc_cnt > RE_RX_DESC_CNT_MAX) 624 sc->re_rx_desc_cnt = RE_RX_DESC_CNT_MAX; 625 626 sc->re_tx_desc_cnt = re_tx_desc_count; 627 if (sc->re_tx_desc_cnt > RE_TX_DESC_CNT_MAX) 628 sc->re_tx_desc_cnt = RE_TX_DESC_CNT_MAX; 629 630 qlen = RE_IFQ_MAXLEN; 631 if (sc->re_tx_desc_cnt > qlen) 632 qlen = sc->re_tx_desc_cnt; 633 634 sc->re_rxbuf_size = MCLBYTES; 635 sc->re_newbuf = re_newbuf_std; 636 637 /* 638 * Hardware interrupt moderation settings. 639 * XXX does not seem correct, undocumented. 640 */ 641 sc->re_tx_time = 5; /* 125us */ 642 sc->re_rx_time = 2; /* 50us */ 643 644 /* Simulated interrupt moderation setting. */ 645 sc->re_sim_time = 150; /* 150us */ 646 647 /* Use simulated interrupt moderation by default. */ 648 sc->re_imtype = RE_IMTYPE_SIM; 649 re_config_imtype(sc, sc->re_imtype); 650 651 ctx = device_get_sysctl_ctx(dev); 652 tree = device_get_sysctl_tree(dev); 653 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 654 "rx_desc_count", CTLFLAG_RD, &sc->re_rx_desc_cnt, 655 0, "RX desc count"); 656 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 657 "tx_desc_count", CTLFLAG_RD, &sc->re_tx_desc_cnt, 658 0, "TX desc count"); 659 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sim_time", 660 CTLTYPE_INT | CTLFLAG_RW, 661 sc, 0, re_sysctl_simtime, "I", 662 "Simulated interrupt moderation time (usec)."); 663 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "imtype", 664 CTLTYPE_INT | CTLFLAG_RW, 665 sc, 0, re_sysctl_imtype, "I", 666 "Interrupt moderation type -- " 667 "0:disable, 1:simulated, " 668 "2:hardware(if supported)"); 669 if (sc->re_caps & RE_C_HWIM) { 670 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 671 OID_AUTO, "hw_rxtime", 672 CTLTYPE_INT | CTLFLAG_RW, 673 sc, 0, re_sysctl_rxtime, "I", 674 "Hardware interrupt moderation time " 675 "(unit: 25usec)."); 676 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 677 OID_AUTO, "hw_txtime", 678 CTLTYPE_INT | CTLFLAG_RW, 679 sc, 0, re_sysctl_txtime, "I", 680 "Hardware interrupt moderation time " 681 "(unit: 25usec)."); 682 } 683 684 #ifndef BURN_BRIDGES 685 /* 686 * Handle power management nonsense. 687 */ 688 689 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 690 uint32_t membase, irq; 691 692 /* Save important PCI config data. */ 693 membase = pci_read_config(dev, RE_PCI_LOMEM, 4); 694 irq = pci_read_config(dev, PCIR_INTLINE, 4); 695 696 /* Reset the power state. */ 697 device_printf(dev, "chip is in D%d power mode " 698 "-- setting to D0\n", pci_get_powerstate(dev)); 699 700 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 701 702 /* Restore PCI config data. */ 703 pci_write_config(dev, RE_PCI_LOMEM, membase, 4); 704 pci_write_config(dev, PCIR_INTLINE, irq, 4); 705 } 706 #endif 707 /* 708 * Map control/status registers. 709 */ 710 pci_enable_busmaster(dev); 711 712 if (pci_is_pcie(dev)) { 713 sc->re_res_rid = PCIR_BAR(2); 714 sc->re_res_type = SYS_RES_MEMORY; 715 } else { 716 sc->re_res_rid = PCIR_BAR(0); 717 sc->re_res_type = SYS_RES_IOPORT; 718 } 719 sc->re_res = bus_alloc_resource_any(dev, sc->re_res_type, 720 &sc->re_res_rid, RF_ACTIVE); 721 if (sc->re_res == NULL) { 722 device_printf(dev, "couldn't map IO\n"); 723 error = ENXIO; 724 goto fail; 725 } 726 727 sc->re_btag = rman_get_bustag(sc->re_res); 728 sc->re_bhandle = rman_get_bushandle(sc->re_res); 729 730 error = rtl_check_mac_version(sc); 731 if (error) { 732 device_printf(dev, "check mac version failed\n"); 733 goto fail; 734 } 735 736 rtl_init_software_variable(sc); 737 if (pci_is_pcie(dev)) 738 sc->re_if_flags |= RL_FLAG_PCIE; 739 else 740 sc->re_if_flags &= ~RL_FLAG_PCIE; 741 device_printf(dev, "MAC version 0x%08x, MACFG %u%s%s%s\n", 742 (CSR_READ_4(sc, RE_TXCFG) & 0xFCF00000), sc->re_type, 743 sc->re_coalesce_tx_pkt ? ", software TX defrag" : "", 744 sc->re_pad_runt ? ", pad runt" : "", 745 sc->re_hw_enable_msi_msix ? ", support MSI" : ""); 746 747 /* 748 * Allocate interrupt 749 */ 750 if (pci_is_pcie(dev) && sc->re_hw_enable_msi_msix) 751 msi_enable = re_msi_enable; 752 else 753 msi_enable = 0; 754 sc->re_irq_type = pci_alloc_1intr(dev, msi_enable, 755 &sc->re_irq_rid, &irq_flags); 756 757 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->re_irq_rid, 758 irq_flags); 759 if (sc->re_irq == NULL) { 760 device_printf(dev, "couldn't map interrupt\n"); 761 error = ENXIO; 762 goto fail; 763 } 764 765 /* Disable ASPM */ 766 re_disable_aspm(dev); 767 768 rtl_exit_oob(sc); 769 rtl_hw_init(sc); 770 771 /* Reset the adapter. */ 772 rtl_reset(sc); 773 774 rtl_get_hw_mac_address(sc, eaddr); 775 if (sc->re_type == MACFG_3) /* Change PCI Latency time*/ 776 pci_write_config(dev, PCIR_LATTIMER, 0x40, 1); 777 778 /* Allocate DMA stuffs */ 779 error = re_allocmem(dev); 780 if (error) 781 goto fail; 782 783 if (pci_is_pcie(dev)) { 784 sc->re_bus_speed = 125; 785 } else { 786 uint8_t cfg2; 787 788 cfg2 = CSR_READ_1(sc, RE_CFG2); 789 switch (cfg2 & RE_CFG2_PCICLK_MASK) { 790 case RE_CFG2_PCICLK_33MHZ: 791 sc->re_bus_speed = 33; 792 break; 793 case RE_CFG2_PCICLK_66MHZ: 794 sc->re_bus_speed = 66; 795 break; 796 default: 797 device_printf(dev, "unknown bus speed, assume 33MHz\n"); 798 sc->re_bus_speed = 33; 799 break; 800 } 801 } 802 device_printf(dev, "bus speed %dMHz\n", sc->re_bus_speed); 803 804 /* Enable hardware checksum if available. */ 805 sc->re_tx_cstag = 1; 806 sc->re_rx_cstag = 1; 807 808 ifp = &sc->arpcom.ac_if; 809 ifp->if_softc = sc; 810 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 811 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 812 ifp->if_ioctl = re_ioctl; 813 ifp->if_start = re_start; 814 #ifdef IFPOLL_ENABLE 815 ifp->if_npoll = re_npoll; 816 #endif 817 ifp->if_watchdog = re_watchdog; 818 ifp->if_init = re_init; 819 if (re_is_faste(sc)) 820 ifp->if_baudrate = IF_Mbps(100ULL); 821 else if (re_is_2500e(sc)) 822 ifp->if_baudrate = IF_Mbps(2500ULL); 823 else 824 ifp->if_baudrate = IF_Mbps(1000ULL); 825 ifp->if_nmbclusters = sc->re_rx_desc_cnt; 826 ifq_set_maxlen(&ifp->if_snd, qlen); 827 ifq_set_ready(&ifp->if_snd); 828 829 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 830 IFCAP_RXCSUM | IFCAP_TXCSUM; 831 ifp->if_capenable = ifp->if_capabilities; 832 /* NOTE: if_hwassist will be setup after the interface is up. */ 833 834 /* 835 * Call MI attach routine. 836 */ 837 ether_ifattach(ifp, eaddr, NULL); 838 839 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq)); 840 841 rtl_phy_power_up(sc); 842 rtl_hw_phy_config(sc); 843 rtl_clrwol(sc); 844 845 /* TODO: jumbo frame */ 846 CSR_WRITE_2(sc, RE_RxMaxSize, sc->re_rxbuf_size); 847 848 #ifdef IFPOLL_ENABLE 849 ifpoll_compat_setup(&sc->re_npoll, ctx, (struct sysctl_oid *)tree, 850 device_get_unit(dev), ifp->if_serializer); 851 #endif 852 853 /* Hook interrupt last to avoid having to lock softc */ 854 error = bus_setup_intr(dev, sc->re_irq, INTR_MPSAFE | INTR_HIFREQ, 855 re_intr, sc, &sc->re_intrhand, ifp->if_serializer); 856 if (error) { 857 device_printf(dev, "couldn't set up irq\n"); 858 ether_ifdetach(ifp); 859 goto fail; 860 } 861 862 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 863 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 864 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 865 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 866 if (!re_is_faste(sc)) { 867 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 868 0, NULL); 869 } 870 if (re_is_2500e(sc)) { 871 #ifndef IFM_2500_T 872 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_SX | IFM_FDX, 873 0, NULL); 874 #else 875 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T | IFM_FDX, 876 0, NULL); 877 #endif 878 } 879 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 880 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 881 rtl_ifmedia_upd(ifp); 882 883 fail: 884 if (error) 885 re_detach(dev); 886 887 return (error); 888 } 889 890 /* 891 * Shutdown hardware and free up resources. This can be called any 892 * time after the mutex has been initialized. It is called in both 893 * the error case in attach and the normal detach case so it needs 894 * to be careful about only freeing resources that have actually been 895 * allocated. 896 */ 897 static int 898 re_detach(device_t dev) 899 { 900 struct re_softc *sc = device_get_softc(dev); 901 struct ifnet *ifp = &sc->arpcom.ac_if; 902 903 /* These should only be active if attach succeeded */ 904 if (device_is_attached(dev)) { 905 lwkt_serialize_enter(ifp->if_serializer); 906 re_stop(sc, TRUE); 907 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand); 908 lwkt_serialize_exit(ifp->if_serializer); 909 910 ether_ifdetach(ifp); 911 } 912 ifmedia_removeall(&sc->media); 913 914 if (sc->re_irq) 915 bus_release_resource(dev, SYS_RES_IRQ, sc->re_irq_rid, 916 sc->re_irq); 917 918 if (sc->re_irq_type == PCI_INTR_TYPE_MSI) 919 pci_release_msi(dev); 920 921 if (sc->re_res) { 922 bus_release_resource(dev, sc->re_res_type, sc->re_res_rid, 923 sc->re_res); 924 } 925 rtl_cmac_unmap(sc); 926 927 /* Free DMA stuffs */ 928 re_freemem(dev); 929 930 return(0); 931 } 932 933 static void 934 re_setup_rxdesc(struct re_softc *sc, int idx) 935 { 936 bus_addr_t paddr; 937 uint32_t cmdstat; 938 struct re_desc *d; 939 940 paddr = sc->re_ldata.re_rx_paddr[idx]; 941 d = &sc->re_ldata.re_rx_list[idx]; 942 943 d->re_bufaddr_lo = htole32(RE_ADDR_LO(paddr)); 944 d->re_bufaddr_hi = htole32(RE_ADDR_HI(paddr)); 945 946 cmdstat = sc->re_rxbuf_size | RE_RDESC_CMD_OWN; 947 if (idx == (sc->re_rx_desc_cnt - 1)) 948 cmdstat |= RE_RDESC_CMD_EOR; 949 d->re_cmdstat = htole32(cmdstat); 950 } 951 952 static int 953 re_newbuf_std(struct re_softc *sc, int idx, int init) 954 { 955 bus_dma_segment_t seg; 956 bus_dmamap_t map; 957 struct mbuf *m; 958 int error, nsegs; 959 960 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 961 if (m == NULL) { 962 error = ENOBUFS; 963 964 if (init) { 965 if_printf(&sc->arpcom.ac_if, "m_getcl failed\n"); 966 return error; 967 } else { 968 goto back; 969 } 970 } 971 m->m_len = m->m_pkthdr.len = MCLBYTES; 972 973 /* 974 * NOTE: 975 * re(4) chips need address of the receive buffer to be 8-byte 976 * aligned, so don't call m_adj(m, ETHER_ALIGN) here. 977 */ 978 979 error = bus_dmamap_load_mbuf_segment(sc->re_ldata.re_rx_mtag, 980 sc->re_ldata.re_rx_spare, m, 981 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 982 if (error) { 983 m_freem(m); 984 if (init) { 985 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 986 return error; 987 } else { 988 goto back; 989 } 990 } 991 992 if (!init) { 993 bus_dmamap_sync(sc->re_ldata.re_rx_mtag, 994 sc->re_ldata.re_rx_dmamap[idx], 995 BUS_DMASYNC_POSTREAD); 996 bus_dmamap_unload(sc->re_ldata.re_rx_mtag, 997 sc->re_ldata.re_rx_dmamap[idx]); 998 } 999 sc->re_ldata.re_rx_mbuf[idx] = m; 1000 sc->re_ldata.re_rx_paddr[idx] = seg.ds_addr; 1001 1002 map = sc->re_ldata.re_rx_dmamap[idx]; 1003 sc->re_ldata.re_rx_dmamap[idx] = sc->re_ldata.re_rx_spare; 1004 sc->re_ldata.re_rx_spare = map; 1005 back: 1006 re_setup_rxdesc(sc, idx); 1007 return error; 1008 } 1009 1010 #ifdef RE_JUMBO 1011 static int 1012 re_newbuf_jumbo(struct re_softc *sc, int idx, int init) 1013 { 1014 struct mbuf *m; 1015 struct re_jbuf *jbuf; 1016 int error = 0; 1017 1018 MGETHDR(m, init ? M_WAITOK : M_NOWAIT, MT_DATA); 1019 if (m == NULL) { 1020 error = ENOBUFS; 1021 if (init) { 1022 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 1023 return error; 1024 } else { 1025 goto back; 1026 } 1027 } 1028 1029 jbuf = re_jbuf_alloc(sc); 1030 if (jbuf == NULL) { 1031 m_freem(m); 1032 1033 error = ENOBUFS; 1034 if (init) { 1035 if_printf(&sc->arpcom.ac_if, "jpool is empty\n"); 1036 return error; 1037 } else { 1038 goto back; 1039 } 1040 } 1041 1042 m->m_ext.ext_arg = jbuf; 1043 m->m_ext.ext_buf = jbuf->re_buf; 1044 m->m_ext.ext_free = re_jbuf_free; 1045 m->m_ext.ext_ref = re_jbuf_ref; 1046 m->m_ext.ext_size = sc->re_rxbuf_size; 1047 1048 m->m_data = m->m_ext.ext_buf; 1049 m->m_flags |= M_EXT; 1050 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1051 1052 /* 1053 * NOTE: 1054 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer 1055 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here. 1056 */ 1057 1058 sc->re_ldata.re_rx_mbuf[idx] = m; 1059 sc->re_ldata.re_rx_paddr[idx] = jbuf->re_paddr; 1060 back: 1061 re_setup_rxdesc(sc, idx); 1062 return error; 1063 } 1064 #endif /* RE_JUMBO */ 1065 1066 static int 1067 re_tx_list_init(struct re_softc *sc) 1068 { 1069 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 1070 1071 sc->re_ldata.re_tx_prodidx = 0; 1072 sc->re_ldata.re_tx_considx = 0; 1073 sc->re_ldata.re_tx_free = sc->re_tx_desc_cnt; 1074 1075 return(0); 1076 } 1077 1078 static int 1079 re_rx_list_init(struct re_softc *sc) 1080 { 1081 int i, error; 1082 1083 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc)); 1084 1085 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 1086 error = sc->re_newbuf(sc, i, 1); 1087 if (error) 1088 return(error); 1089 } 1090 1091 sc->re_ldata.re_rx_prodidx = 0; 1092 sc->re_head = sc->re_tail = NULL; 1093 1094 return(0); 1095 } 1096 1097 #define RE_IP4_PACKET 0x1 1098 #define RE_TCP_PACKET 0x2 1099 #define RE_UDP_PACKET 0x4 1100 1101 static __inline uint8_t 1102 re_packet_type(struct re_softc *sc, uint32_t rxstat, uint32_t rxctrl) 1103 { 1104 uint8_t packet_type = 0; 1105 1106 if (sc->re_if_flags & RL_FLAG_DESCV2) { 1107 if (rxctrl & RE_RDESC_CTL_PROTOIP4) 1108 packet_type |= RE_IP4_PACKET; 1109 } else { 1110 if (rxstat & RE_RDESC_STAT_PROTOID) 1111 packet_type |= RE_IP4_PACKET; 1112 } 1113 if (RE_TCPPKT(rxstat)) 1114 packet_type |= RE_TCP_PACKET; 1115 else if (RE_UDPPKT(rxstat)) 1116 packet_type |= RE_UDP_PACKET; 1117 return packet_type; 1118 } 1119 1120 /* 1121 * RX handler for C+ and 8169. For the gigE chips, we support 1122 * the reception of jumbo frames that have been fragmented 1123 * across multiple 2K mbuf cluster buffers. 1124 */ 1125 static int 1126 re_rxeof(struct re_softc *sc) 1127 { 1128 struct ifnet *ifp = &sc->arpcom.ac_if; 1129 struct mbuf *m; 1130 struct re_desc *cur_rx; 1131 uint32_t rxstat, rxctrl; 1132 int i, total_len, rx = 0; 1133 1134 for (i = sc->re_ldata.re_rx_prodidx; 1135 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0; RE_RXDESC_INC(sc, i)) { 1136 cur_rx = &sc->re_ldata.re_rx_list[i]; 1137 m = sc->re_ldata.re_rx_mbuf[i]; 1138 total_len = RE_RXBYTES(cur_rx); 1139 rxstat = le32toh(cur_rx->re_cmdstat); 1140 rxctrl = le32toh(cur_rx->re_control); 1141 1142 rx = 1; 1143 1144 #ifdef INVARIANTS 1145 if (sc->re_flags & RE_F_USE_JPOOL) 1146 KKASSERT(rxstat & RE_RDESC_STAT_EOF); 1147 #endif 1148 1149 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1150 if (sc->re_flags & RE_F_DROP_RXFRAG) { 1151 re_setup_rxdesc(sc, i); 1152 continue; 1153 } 1154 1155 if (sc->re_newbuf(sc, i, 0)) { 1156 /* Drop upcoming fragments */ 1157 sc->re_flags |= RE_F_DROP_RXFRAG; 1158 continue; 1159 } 1160 1161 m->m_len = MCLBYTES; 1162 if (sc->re_head == NULL) { 1163 sc->re_head = sc->re_tail = m; 1164 } else { 1165 sc->re_tail->m_next = m; 1166 sc->re_tail = m; 1167 } 1168 continue; 1169 } else if (sc->re_flags & RE_F_DROP_RXFRAG) { 1170 /* 1171 * Last fragment of a multi-fragment packet. 1172 * 1173 * Since error already happened, this fragment 1174 * must be dropped as well as the fragment chain. 1175 */ 1176 re_setup_rxdesc(sc, i); 1177 re_free_rxchain(sc); 1178 sc->re_flags &= ~RE_F_DROP_RXFRAG; 1179 continue; 1180 } 1181 1182 rxstat >>= 1; 1183 if (rxstat & RE_RDESC_STAT_RXERRSUM) { 1184 IFNET_STAT_INC(ifp, ierrors, 1); 1185 /* 1186 * If this is part of a multi-fragment packet, 1187 * discard all the pieces. 1188 */ 1189 re_free_rxchain(sc); 1190 re_setup_rxdesc(sc, i); 1191 continue; 1192 } 1193 1194 /* 1195 * If allocating a replacement mbuf fails, 1196 * reload the current one. 1197 */ 1198 1199 if (sc->re_newbuf(sc, i, 0)) { 1200 IFNET_STAT_INC(ifp, ierrors, 1); 1201 continue; 1202 } 1203 1204 if (sc->re_head != NULL) { 1205 m->m_len = total_len % MCLBYTES; 1206 /* 1207 * Special case: if there's 4 bytes or less 1208 * in this buffer, the mbuf can be discarded: 1209 * the last 4 bytes is the CRC, which we don't 1210 * care about anyway. 1211 */ 1212 if (m->m_len <= ETHER_CRC_LEN) { 1213 sc->re_tail->m_len -= 1214 (ETHER_CRC_LEN - m->m_len); 1215 m_freem(m); 1216 } else { 1217 m->m_len -= ETHER_CRC_LEN; 1218 sc->re_tail->m_next = m; 1219 } 1220 m = sc->re_head; 1221 sc->re_head = sc->re_tail = NULL; 1222 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1223 } else { 1224 m->m_pkthdr.len = m->m_len = 1225 (total_len - ETHER_CRC_LEN); 1226 } 1227 1228 IFNET_STAT_INC(ifp, ipackets, 1); 1229 m->m_pkthdr.rcvif = ifp; 1230 1231 /* Do RX checksumming if enabled */ 1232 1233 if (ifp->if_capenable & IFCAP_RXCSUM) { 1234 uint8_t packet_type; 1235 1236 packet_type = re_packet_type(sc, rxstat, rxctrl); 1237 1238 /* Check IP header checksum */ 1239 if (packet_type & RE_IP4_PACKET) { 1240 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1241 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0) 1242 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1243 } 1244 1245 /* Check TCP/UDP checksum */ 1246 if (((packet_type & RE_TCP_PACKET) && 1247 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) || 1248 ((packet_type & RE_UDP_PACKET) && 1249 (rxstat & RE_RDESC_STAT_UDPSUMBAD) == 0)) { 1250 m->m_pkthdr.csum_flags |= 1251 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1252 CSUM_FRAG_NOT_CHECKED; 1253 m->m_pkthdr.csum_data = 0xffff; 1254 } 1255 } 1256 1257 if (rxctrl & RE_RDESC_CTL_HASTAG) { 1258 m->m_flags |= M_VLANTAG; 1259 m->m_pkthdr.ether_vlantag = 1260 be16toh((rxctrl & RE_RDESC_CTL_TAGDATA)); 1261 } 1262 ifp->if_input(ifp, m, NULL, -1); 1263 } 1264 1265 sc->re_ldata.re_rx_prodidx = i; 1266 1267 return rx; 1268 } 1269 1270 #undef RE_IP4_PACKET 1271 #undef RE_TCP_PACKET 1272 #undef RE_UDP_PACKET 1273 1274 static int 1275 re_tx_collect(struct re_softc *sc) 1276 { 1277 struct ifnet *ifp = &sc->arpcom.ac_if; 1278 uint32_t txstat; 1279 int idx, tx = 0; 1280 1281 for (idx = sc->re_ldata.re_tx_considx; 1282 sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt; 1283 RE_TXDESC_INC(sc, idx)) { 1284 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat); 1285 if (txstat & RE_TDESC_CMD_OWN) 1286 break; 1287 1288 tx = 1; 1289 1290 sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0; 1291 1292 /* 1293 * We only stash mbufs in the last descriptor 1294 * in a fragment chain, which also happens to 1295 * be the only place where the TX status bits 1296 * are valid. 1297 * 1298 * NOTE: 1299 * On 8125, RE_TDESC_CMD_EOF is no longer left 1300 * uncleared. 1301 */ 1302 if (sc->re_ldata.re_tx_mbuf[idx] != NULL) { 1303 bus_dmamap_unload(sc->re_ldata.re_tx_mtag, 1304 sc->re_ldata.re_tx_dmamap[idx]); 1305 m_freem(sc->re_ldata.re_tx_mbuf[idx]); 1306 sc->re_ldata.re_tx_mbuf[idx] = NULL; 1307 if (txstat & (RE_TDESC_STAT_EXCESSCOL| 1308 RE_TDESC_STAT_COLCNT)) 1309 IFNET_STAT_INC(ifp, collisions, 1); 1310 if (txstat & RE_TDESC_STAT_TXERRSUM) 1311 IFNET_STAT_INC(ifp, oerrors, 1); 1312 else 1313 IFNET_STAT_INC(ifp, opackets, 1); 1314 } 1315 sc->re_ldata.re_tx_free++; 1316 } 1317 sc->re_ldata.re_tx_considx = idx; 1318 1319 return tx; 1320 } 1321 1322 static int 1323 re_txeof(struct re_softc *sc) 1324 { 1325 struct ifnet *ifp = &sc->arpcom.ac_if; 1326 int tx; 1327 1328 tx = re_tx_collect(sc); 1329 1330 /* There is enough free TX descs */ 1331 if (sc->re_ldata.re_tx_free > RE_TXDESC_SPARE) 1332 ifq_clr_oactive(&ifp->if_snd); 1333 1334 /* 1335 * Some chips will ignore a second TX request issued while an 1336 * existing transmission is in progress. If the transmitter goes 1337 * idle but there are still packets waiting to be sent, we need 1338 * to restart the channel here to flush them out. This only seems 1339 * to be required with the PCIe devices. 1340 */ 1341 if (sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt) 1342 sc->re_start_xmit(sc); 1343 else 1344 ifp->if_timer = 0; 1345 1346 return tx; 1347 } 1348 1349 static void 1350 re_tick(void *xsc) 1351 { 1352 struct re_softc *sc = xsc; 1353 1354 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); 1355 re_tick_serialized(xsc); 1356 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); 1357 } 1358 1359 static void 1360 re_tick_serialized(void *xsc) 1361 { 1362 struct re_softc *sc = xsc; 1363 struct ifnet *ifp = &sc->arpcom.ac_if; 1364 1365 ASSERT_SERIALIZED(ifp->if_serializer); 1366 1367 if ((ifp->if_flags & IFF_RUNNING) == 0) 1368 return; 1369 1370 if (rtl_link_ok(sc)) { 1371 if ((sc->re_flags & RE_F_LINKED) == 0) 1372 re_link_up(sc); 1373 } else if (sc->re_flags & RE_F_LINKED) { 1374 re_link_down(sc); 1375 } 1376 callout_reset(&sc->re_timer, hz, re_tick, sc); 1377 } 1378 1379 #ifdef IFPOLL_ENABLE 1380 1381 static void 1382 re_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1383 { 1384 struct re_softc *sc = ifp->if_softc; 1385 1386 ASSERT_SERIALIZED(ifp->if_serializer); 1387 1388 if (sc->re_npoll.ifpc_stcount-- == 0) { 1389 uint32_t status; 1390 1391 sc->re_npoll.ifpc_stcount = sc->re_npoll.ifpc_stfrac; 1392 1393 status = sc->re_read_isr(sc); 1394 if (status) 1395 sc->re_write_isr(sc, status); 1396 1397 /* 1398 * XXX check behaviour on receiver stalls. 1399 */ 1400 1401 if (status & RE_ISR_SYSTEM_ERR) { 1402 rtl_reset(sc); 1403 re_init(sc); 1404 /* Done! */ 1405 return; 1406 } 1407 } 1408 1409 sc->rxcycles = count; 1410 re_rxeof(sc); 1411 re_txeof(sc); 1412 1413 if (!ifq_is_empty(&ifp->if_snd)) 1414 if_devstart(ifp); 1415 } 1416 1417 static void 1418 re_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1419 { 1420 struct re_softc *sc = ifp->if_softc; 1421 1422 ASSERT_SERIALIZED(ifp->if_serializer); 1423 1424 if (info != NULL) { 1425 int cpuid = sc->re_npoll.ifpc_cpuid; 1426 1427 info->ifpi_rx[cpuid].poll_func = re_npoll_compat; 1428 info->ifpi_rx[cpuid].arg = NULL; 1429 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1430 1431 if (ifp->if_flags & IFF_RUNNING) 1432 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 1433 ifq_set_cpuid(&ifp->if_snd, cpuid); 1434 } else { 1435 if (ifp->if_flags & IFF_RUNNING) 1436 re_setup_intr(sc, 1, sc->re_imtype); 1437 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq)); 1438 } 1439 } 1440 #endif /* IFPOLL_ENABLE */ 1441 1442 static void 1443 re_intr(void *arg) 1444 { 1445 struct re_softc *sc = arg; 1446 struct ifnet *ifp = &sc->arpcom.ac_if; 1447 uint32_t status; 1448 int proc; 1449 1450 ASSERT_SERIALIZED(ifp->if_serializer); 1451 1452 if ((sc->re_flags & RE_F_SUSPENDED) || 1453 (ifp->if_flags & IFF_RUNNING) == 0) 1454 return; 1455 1456 /* Disable interrupts. */ 1457 sc->re_write_imr(sc, 0); 1458 1459 status = sc->re_read_isr(sc); 1460 again: 1461 proc = 0; 1462 if (status) 1463 sc->re_write_isr(sc, status); 1464 if (status & sc->re_intrs) { 1465 if (status & RE_ISR_SYSTEM_ERR) { 1466 rtl_reset(sc); 1467 re_init(sc); 1468 /* Done! */ 1469 return; 1470 } 1471 proc |= re_rxeof(sc); 1472 proc |= re_txeof(sc); 1473 } 1474 1475 if (sc->re_imtype == RE_IMTYPE_SIM) { 1476 if ((sc->re_flags & RE_F_TIMER_INTR)) { 1477 if (!proc) { 1478 /* 1479 * Nothing needs to be processed, fallback 1480 * to use TX/RX interrupts. 1481 * 1482 * NOTE: This will re-enable interrupts. 1483 */ 1484 re_setup_intr(sc, 1, RE_IMTYPE_NONE); 1485 1486 /* 1487 * Recollect, mainly to avoid the possible 1488 * race introduced by changing interrupt 1489 * masks. 1490 */ 1491 re_rxeof(sc); 1492 re_txeof(sc); 1493 } else { 1494 /* Re-enable interrupts. */ 1495 sc->re_write_imr(sc, sc->re_intrs); 1496 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 1497 } 1498 } else if (proc) { 1499 /* 1500 * Assume that using simulated interrupt moderation 1501 * (hardware timer based) could reduce the interript 1502 * rate. 1503 * 1504 * NOTE: This will re-enable interrupts. 1505 */ 1506 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 1507 } else { 1508 /* Re-enable interrupts. */ 1509 sc->re_write_imr(sc, sc->re_intrs); 1510 } 1511 } else { 1512 status = sc->re_read_isr(sc); 1513 if (status & sc->re_intrs) { 1514 if (!ifq_is_empty(&ifp->if_snd)) 1515 if_devstart(ifp); 1516 /* NOTE: Interrupts are still disabled. */ 1517 goto again; 1518 } 1519 /* Re-enable interrupts. */ 1520 sc->re_write_imr(sc, sc->re_intrs); 1521 } 1522 1523 if (!ifq_is_empty(&ifp->if_snd)) 1524 if_devstart(ifp); 1525 } 1526 1527 static int 1528 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx0) 1529 { 1530 struct mbuf *m = *m_head; 1531 bus_dma_segment_t segs[RE_MAXSEGS]; 1532 bus_dmamap_t map; 1533 int error, maxsegs, idx, i, nsegs; 1534 struct re_desc *d, *tx_ring; 1535 uint32_t cmd_csum, ctl_csum, vlantag; 1536 1537 KASSERT(sc->re_ldata.re_tx_free > RE_TXDESC_SPARE, 1538 ("not enough free TX desc")); 1539 1540 if (sc->re_coalesce_tx_pkt && m->m_pkthdr.len != m->m_len) { 1541 struct mbuf *m_new; 1542 1543 m_new = m_defrag(m, M_NOWAIT); 1544 if (m_new == NULL) { 1545 error = ENOBUFS; 1546 goto back; 1547 } else { 1548 *m_head = m = m_new; 1549 if (m->m_pkthdr.len != m->m_len) { 1550 /* Still not configuous; give up. */ 1551 error = ENOBUFS; 1552 goto back; 1553 } 1554 } 1555 } 1556 1557 map = sc->re_ldata.re_tx_dmamap[*idx0]; 1558 1559 /* 1560 * Set up checksum offload. Note: checksum offload bits must 1561 * appear in all descriptors of a multi-descriptor transmit 1562 * attempt. (This is according to testing done with an 8169 1563 * chip. I'm not sure if this is a requirement or a bug.) 1564 */ 1565 cmd_csum = ctl_csum = 0; 1566 if (m->m_pkthdr.csum_flags & CSUM_IP) { 1567 cmd_csum |= RE_TDESC_CMD_IPCSUM; 1568 ctl_csum |= RE_TDESC_CTL_IPCSUM; 1569 } 1570 if (m->m_pkthdr.csum_flags & CSUM_TCP) { 1571 cmd_csum |= RE_TDESC_CMD_TCPCSUM; 1572 ctl_csum |= RE_TDESC_CTL_TCPCSUM; 1573 } 1574 if (m->m_pkthdr.csum_flags & CSUM_UDP) { 1575 cmd_csum |= RE_TDESC_CMD_UDPCSUM; 1576 ctl_csum |= RE_TDESC_CTL_UDPCSUM; 1577 } 1578 1579 /* For version2 descriptor, csum flags are set on re_control */ 1580 if (sc->re_if_flags & RL_FLAG_DESCV2) 1581 cmd_csum = 0; 1582 else 1583 ctl_csum = 0; 1584 1585 if (sc->re_pad_runt) { 1586 /* 1587 * With some of the RealTek chips, using the checksum offload 1588 * support in conjunction with the autopadding feature results 1589 * in the transmission of corrupt frames. For example, if we 1590 * need to send a really small IP fragment that's less than 60 1591 * bytes in size, and IP header checksumming is enabled, the 1592 * resulting ethernet frame that appears on the wire will 1593 * have garbled payload. To work around this, if TX checksum 1594 * offload is enabled, we always manually pad short frames out 1595 * to the minimum ethernet frame size. 1596 * 1597 * Note: this appears unnecessary for TCP, and doing it for TCP 1598 * with PCIe adapters seems to result in bad checksums. 1599 */ 1600 if ((m->m_pkthdr.csum_flags & 1601 (CSUM_DELAY_IP | CSUM_DELAY_DATA)) && 1602 (m->m_pkthdr.csum_flags & CSUM_TCP) == 0 && 1603 m->m_pkthdr.len < RE_MIN_FRAMELEN) { 1604 error = m_devpad(m, RE_MIN_FRAMELEN); 1605 if (error) 1606 goto back; 1607 } 1608 } 1609 1610 vlantag = 0; 1611 if (m->m_flags & M_VLANTAG) { 1612 vlantag = htobe16(m->m_pkthdr.ether_vlantag) | 1613 RE_TDESC_CTL_INSTAG; 1614 } 1615 1616 maxsegs = sc->re_ldata.re_tx_free; 1617 if (maxsegs > RE_MAXSEGS) 1618 maxsegs = RE_MAXSEGS; 1619 1620 error = bus_dmamap_load_mbuf_defrag(sc->re_ldata.re_tx_mtag, map, 1621 m_head, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1622 if (error) 1623 goto back; 1624 1625 m = *m_head; 1626 bus_dmamap_sync(sc->re_ldata.re_tx_mtag, map, BUS_DMASYNC_PREWRITE); 1627 1628 /* 1629 * Map the segment array into descriptors. We also keep track 1630 * of the end of the ring and set the end-of-ring bits as needed, 1631 * and we set the ownership bits in all except the very first 1632 * descriptor, whose ownership bits will be turned on later. 1633 */ 1634 tx_ring = sc->re_ldata.re_tx_list; 1635 idx = *idx0; 1636 i = 0; 1637 for (;;) { 1638 uint32_t cmdstat; 1639 1640 d = &tx_ring[idx]; 1641 1642 KKASSERT(sc->re_ldata.re_tx_mbuf[idx] == NULL); 1643 1644 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr)); 1645 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr)); 1646 1647 cmdstat = segs[i].ds_len; 1648 if (i == 0) { 1649 cmdstat |= RE_TDESC_CMD_SOF; 1650 } else if (i != nsegs - 1) { 1651 /* 1652 * Last descriptor's ownership will be transfered 1653 * later. 1654 */ 1655 cmdstat |= RE_TDESC_CMD_OWN; 1656 } 1657 if (idx == (sc->re_tx_desc_cnt - 1)) 1658 cmdstat |= RE_TDESC_CMD_EOR; 1659 1660 d->re_control = htole32(ctl_csum | vlantag); 1661 d->re_cmdstat = htole32(cmdstat | cmd_csum); 1662 1663 i++; 1664 if (i == nsegs) 1665 break; 1666 RE_TXDESC_INC(sc, idx); 1667 } 1668 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF); 1669 1670 /* Transfer ownership of packet to the chip. */ 1671 d->re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 1672 if (*idx0 != idx) 1673 tx_ring[*idx0].re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 1674 1675 /* 1676 * Insure that the map for this transmission 1677 * is placed at the array index of the last descriptor 1678 * in this chain. 1679 */ 1680 sc->re_ldata.re_tx_dmamap[*idx0] = sc->re_ldata.re_tx_dmamap[idx]; 1681 sc->re_ldata.re_tx_dmamap[idx] = map; 1682 1683 sc->re_ldata.re_tx_mbuf[idx] = m; 1684 sc->re_ldata.re_tx_free -= nsegs; 1685 1686 RE_TXDESC_INC(sc, idx); 1687 *idx0 = idx; 1688 back: 1689 if (error) { 1690 m_freem(*m_head); 1691 *m_head = NULL; 1692 } 1693 return error; 1694 } 1695 1696 /* 1697 * Main transmit routine for C+ and gigE NICs. 1698 */ 1699 1700 static void 1701 re_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1702 { 1703 struct re_softc *sc = ifp->if_softc; 1704 struct mbuf *m_head; 1705 int idx, need_trans, oactive, error; 1706 1707 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1708 ASSERT_SERIALIZED(ifp->if_serializer); 1709 1710 if ((sc->re_flags & RE_F_LINKED) == 0) { 1711 ifq_purge(&ifp->if_snd); 1712 return; 1713 } 1714 1715 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1716 return; 1717 1718 idx = sc->re_ldata.re_tx_prodidx; 1719 1720 need_trans = 0; 1721 oactive = 0; 1722 for (;;) { 1723 if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) { 1724 if (!oactive) { 1725 if (re_tx_collect(sc)) { 1726 oactive = 1; 1727 continue; 1728 } 1729 } 1730 ifq_set_oactive(&ifp->if_snd); 1731 break; 1732 } 1733 1734 m_head = ifq_dequeue(&ifp->if_snd); 1735 if (m_head == NULL) 1736 break; 1737 1738 error = re_encap(sc, &m_head, &idx); 1739 if (error) { 1740 /* m_head is freed by re_encap(), if we reach here */ 1741 IFNET_STAT_INC(ifp, oerrors, 1); 1742 1743 if (error == EFBIG && !oactive) { 1744 if (re_tx_collect(sc)) { 1745 oactive = 1; 1746 continue; 1747 } 1748 } 1749 ifq_set_oactive(&ifp->if_snd); 1750 break; 1751 } 1752 1753 oactive = 0; 1754 need_trans = 1; 1755 1756 /* 1757 * If there's a BPF listener, bounce a copy of this frame 1758 * to him. 1759 */ 1760 ETHER_BPF_MTAP(ifp, m_head); 1761 } 1762 1763 if (!need_trans) 1764 return; 1765 1766 sc->re_ldata.re_tx_prodidx = idx; 1767 1768 /* 1769 * RealTek put the TX poll request register in a different 1770 * location on the 8169 gigE chip. I don't know why. 1771 */ 1772 sc->re_start_xmit(sc); 1773 1774 /* 1775 * Set a timeout in case the chip goes out to lunch. 1776 */ 1777 ifp->if_timer = 5; 1778 } 1779 1780 static void 1781 re_link_up(struct re_softc *sc) 1782 { 1783 struct ifnet *ifp = &sc->arpcom.ac_if; 1784 int error; 1785 1786 ASSERT_SERIALIZED(ifp->if_serializer); 1787 1788 rtl_link_on_patch(sc); 1789 re_stop(sc, FALSE); 1790 rtl_set_eaddr(sc); 1791 1792 error = re_rx_list_init(sc); 1793 if (error) { 1794 re_stop(sc, TRUE); 1795 return; 1796 } 1797 error = re_tx_list_init(sc); 1798 if (error) { 1799 re_stop(sc, TRUE); 1800 return; 1801 } 1802 1803 /* 1804 * Load the addresses of the RX and TX lists into the chip. 1805 */ 1806 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI, 1807 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr)); 1808 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO, 1809 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr)); 1810 1811 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI, 1812 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr)); 1813 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO, 1814 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr)); 1815 1816 rtl_hw_start(sc); 1817 1818 #ifdef IFPOLL_ENABLE 1819 /* 1820 * Disable interrupts if we are polling. 1821 */ 1822 if (ifp->if_flags & IFF_NPOLLING) 1823 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 1824 else /* otherwise ... */ 1825 #endif /* IFPOLL_ENABLE */ 1826 /* 1827 * Enable interrupts. 1828 */ 1829 re_setup_intr(sc, 1, sc->re_imtype); 1830 sc->re_write_isr(sc, sc->re_intrs); 1831 1832 sc->re_flags |= RE_F_LINKED; 1833 ifp->if_link_state = LINK_STATE_UP; 1834 if_link_state_change(ifp); 1835 1836 if (bootverbose) 1837 if_printf(ifp, "link UP\n"); 1838 1839 if (!ifq_is_empty(&ifp->if_snd)) 1840 if_devstart(ifp); 1841 } 1842 1843 static void 1844 re_link_down(struct re_softc *sc) 1845 { 1846 struct ifnet *ifp = &sc->arpcom.ac_if; 1847 1848 /* NOTE: re_stop() will reset RE_F_LINKED. */ 1849 ifp->if_link_state = LINK_STATE_DOWN; 1850 if_link_state_change(ifp); 1851 1852 re_stop(sc, FALSE); 1853 rtl_ifmedia_upd(ifp); 1854 1855 if (bootverbose) 1856 if_printf(ifp, "link DOWN\n"); 1857 } 1858 1859 static void 1860 re_init(void *xsc) 1861 { 1862 struct re_softc *sc = xsc; 1863 struct ifnet *ifp = &sc->arpcom.ac_if; 1864 1865 ASSERT_SERIALIZED(ifp->if_serializer); 1866 1867 re_stop(sc, TRUE); 1868 if (rtl_link_ok(sc)) { 1869 if (bootverbose) 1870 if_printf(ifp, "link is UP in if_init\n"); 1871 re_link_up(sc); 1872 } 1873 1874 ifp->if_flags |= IFF_RUNNING; 1875 ifq_clr_oactive(&ifp->if_snd); 1876 1877 callout_reset(&sc->re_timer, hz, re_tick, sc); 1878 } 1879 1880 static int 1881 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1882 { 1883 struct re_softc *sc = ifp->if_softc; 1884 struct ifreq *ifr = (struct ifreq *)data; 1885 int error = 0, mask; 1886 1887 ASSERT_SERIALIZED(ifp->if_serializer); 1888 1889 switch(command) { 1890 case SIOCSIFMTU: 1891 #ifdef RE_JUMBO 1892 if (ifr->ifr_mtu > sc->re_maxmtu) { 1893 error = EINVAL; 1894 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1895 ifp->if_mtu = ifr->ifr_mtu; 1896 if (ifp->if_flags & IFF_RUNNING) 1897 ifp->if_init(sc); 1898 } 1899 #else 1900 error = EOPNOTSUPP; 1901 #endif 1902 break; 1903 1904 case SIOCSIFFLAGS: 1905 if (ifp->if_flags & IFF_UP) { 1906 if (ifp->if_flags & IFF_RUNNING) { 1907 if ((ifp->if_flags ^ sc->re_saved_ifflags) & 1908 (IFF_PROMISC | IFF_ALLMULTI)) 1909 rtl_set_rx_packet_filter(sc); 1910 } else { 1911 re_init(sc); 1912 } 1913 } else if (ifp->if_flags & IFF_RUNNING) { 1914 re_stop(sc, TRUE); 1915 } 1916 sc->re_saved_ifflags = ifp->if_flags; 1917 break; 1918 1919 case SIOCADDMULTI: 1920 case SIOCDELMULTI: 1921 rtl_set_rx_packet_filter(sc); 1922 break; 1923 1924 case SIOCGIFMEDIA: 1925 case SIOCGIFXMEDIA: 1926 case SIOCSIFMEDIA: 1927 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1928 break; 1929 1930 case SIOCSIFCAP: 1931 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & 1932 ifp->if_capabilities; 1933 ifp->if_capenable ^= mask; 1934 1935 /* NOTE: re_init will setup if_hwassist. */ 1936 ifp->if_hwassist = 0; 1937 1938 /* Setup flags for the backend. */ 1939 if (ifp->if_capenable & IFCAP_RXCSUM) 1940 sc->re_rx_cstag = 1; 1941 else 1942 sc->re_rx_cstag = 0; 1943 if (ifp->if_capenable & IFCAP_TXCSUM) 1944 sc->re_tx_cstag = 1; 1945 else 1946 sc->re_tx_cstag = 0; 1947 1948 if (mask && (ifp->if_flags & IFF_RUNNING)) 1949 re_init(sc); 1950 break; 1951 1952 default: 1953 error = ether_ioctl(ifp, command, data); 1954 break; 1955 } 1956 return(error); 1957 } 1958 1959 static void 1960 re_watchdog(struct ifnet *ifp) 1961 { 1962 struct re_softc *sc = ifp->if_softc; 1963 1964 ASSERT_SERIALIZED(ifp->if_serializer); 1965 1966 IFNET_STAT_INC(ifp, oerrors, 1); 1967 1968 re_txeof(sc); 1969 re_rxeof(sc); 1970 1971 if (sc->re_ldata.re_tx_free != sc->re_tx_desc_cnt) { 1972 if_printf(ifp, "watchdog timeout, txd free %d\n", 1973 sc->re_ldata.re_tx_free); 1974 rtl_reset(sc); 1975 re_init(sc); 1976 } 1977 } 1978 1979 /* 1980 * Stop the adapter and free any mbufs allocated to the 1981 * RX and TX lists. 1982 */ 1983 static void 1984 re_stop(struct re_softc *sc, boolean_t full_stop) 1985 { 1986 struct ifnet *ifp = &sc->arpcom.ac_if; 1987 int i; 1988 1989 ASSERT_SERIALIZED(ifp->if_serializer); 1990 1991 /* Stop the adapter. */ 1992 rtl_stop(sc); 1993 1994 ifp->if_timer = 0; 1995 if (full_stop) { 1996 callout_stop(&sc->re_timer); 1997 ifp->if_flags &= ~IFF_RUNNING; 1998 } 1999 ifq_clr_oactive(&ifp->if_snd); 2000 sc->re_flags &= ~(RE_F_TIMER_INTR | RE_F_DROP_RXFRAG | RE_F_LINKED); 2001 2002 re_free_rxchain(sc); 2003 2004 /* Free the TX list buffers. */ 2005 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 2006 if (sc->re_ldata.re_tx_mbuf[i] != NULL) { 2007 bus_dmamap_unload(sc->re_ldata.re_tx_mtag, 2008 sc->re_ldata.re_tx_dmamap[i]); 2009 m_freem(sc->re_ldata.re_tx_mbuf[i]); 2010 sc->re_ldata.re_tx_mbuf[i] = NULL; 2011 } 2012 } 2013 2014 /* Free the RX list buffers. */ 2015 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 2016 if (sc->re_ldata.re_rx_mbuf[i] != NULL) { 2017 if ((sc->re_flags & RE_F_USE_JPOOL) == 0) { 2018 bus_dmamap_unload(sc->re_ldata.re_rx_mtag, 2019 sc->re_ldata.re_rx_dmamap[i]); 2020 } 2021 m_freem(sc->re_ldata.re_rx_mbuf[i]); 2022 sc->re_ldata.re_rx_mbuf[i] = NULL; 2023 } 2024 } 2025 } 2026 2027 /* 2028 * Device suspend routine. Stop the interface and save some PCI 2029 * settings in case the BIOS doesn't restore them properly on 2030 * resume. 2031 */ 2032 static int 2033 re_suspend(device_t dev) 2034 { 2035 #ifndef BURN_BRIDGES 2036 int i; 2037 #endif 2038 struct re_softc *sc = device_get_softc(dev); 2039 struct ifnet *ifp = &sc->arpcom.ac_if; 2040 2041 lwkt_serialize_enter(ifp->if_serializer); 2042 2043 re_stop(sc, TRUE); 2044 2045 #ifndef BURN_BRIDGES 2046 for (i = 0; i < 5; i++) 2047 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2048 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2049 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2050 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2051 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2052 #endif 2053 2054 sc->re_flags |= RE_F_SUSPENDED; 2055 2056 lwkt_serialize_exit(ifp->if_serializer); 2057 2058 return (0); 2059 } 2060 2061 /* 2062 * Device resume routine. Restore some PCI settings in case the BIOS 2063 * doesn't, re-enable busmastering, and restart the interface if 2064 * appropriate. 2065 */ 2066 static int 2067 re_resume(device_t dev) 2068 { 2069 struct re_softc *sc = device_get_softc(dev); 2070 struct ifnet *ifp = &sc->arpcom.ac_if; 2071 #ifndef BURN_BRIDGES 2072 int i; 2073 #endif 2074 2075 lwkt_serialize_enter(ifp->if_serializer); 2076 2077 #ifndef BURN_BRIDGES 2078 /* better way to do this? */ 2079 for (i = 0; i < 5; i++) 2080 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2081 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2082 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2083 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2084 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2085 2086 /* reenable busmastering */ 2087 pci_enable_busmaster(dev); 2088 pci_enable_io(dev, SYS_RES_IOPORT); 2089 #endif 2090 2091 /* reinitialize interface if necessary */ 2092 if (ifp->if_flags & IFF_UP) 2093 re_init(sc); 2094 2095 sc->re_flags &= ~RE_F_SUSPENDED; 2096 2097 lwkt_serialize_exit(ifp->if_serializer); 2098 2099 return (0); 2100 } 2101 2102 /* 2103 * Stop all chip I/O so that the kernel's probe routines don't 2104 * get confused by errant DMAs when rebooting. 2105 */ 2106 static void 2107 re_shutdown(device_t dev) 2108 { 2109 struct re_softc *sc = device_get_softc(dev); 2110 struct ifnet *ifp = &sc->arpcom.ac_if; 2111 2112 lwkt_serialize_enter(ifp->if_serializer); 2113 re_stop(sc, TRUE); 2114 rtl_hw_d3_para(sc); 2115 rtl_phy_power_down(sc); 2116 lwkt_serialize_exit(ifp->if_serializer); 2117 } 2118 2119 static int 2120 re_sysctl_rxtime(SYSCTL_HANDLER_ARGS) 2121 { 2122 struct re_softc *sc = arg1; 2123 2124 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_rx_time); 2125 } 2126 2127 static int 2128 re_sysctl_txtime(SYSCTL_HANDLER_ARGS) 2129 { 2130 struct re_softc *sc = arg1; 2131 2132 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_tx_time); 2133 } 2134 2135 static int 2136 re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *hwtime) 2137 { 2138 struct re_softc *sc = arg1; 2139 struct ifnet *ifp = &sc->arpcom.ac_if; 2140 int error, v; 2141 2142 lwkt_serialize_enter(ifp->if_serializer); 2143 2144 v = *hwtime; 2145 error = sysctl_handle_int(oidp, &v, 0, req); 2146 if (error || req->newptr == NULL) 2147 goto back; 2148 2149 if (v <= 0) { 2150 error = EINVAL; 2151 goto back; 2152 } 2153 2154 if (v != *hwtime) { 2155 *hwtime = v; 2156 2157 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2158 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_HW) 2159 re_setup_hw_im(sc); 2160 } 2161 back: 2162 lwkt_serialize_exit(ifp->if_serializer); 2163 return error; 2164 } 2165 2166 static int 2167 re_sysctl_simtime(SYSCTL_HANDLER_ARGS) 2168 { 2169 struct re_softc *sc = arg1; 2170 struct ifnet *ifp = &sc->arpcom.ac_if; 2171 int error, v; 2172 2173 lwkt_serialize_enter(ifp->if_serializer); 2174 2175 v = sc->re_sim_time; 2176 error = sysctl_handle_int(oidp, &v, 0, req); 2177 if (error || req->newptr == NULL) 2178 goto back; 2179 2180 if (v <= 0) { 2181 error = EINVAL; 2182 goto back; 2183 } 2184 2185 if (v != sc->re_sim_time) { 2186 sc->re_sim_time = v; 2187 2188 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2189 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_SIM) { 2190 #ifdef foo 2191 /* 2192 * Following code causes various strange 2193 * performance problems. Hmm ... 2194 */ 2195 sc->re_write_imr(sc, 0); 2196 CSR_WRITE_4(sc, RE_TIMERINT, 0); 2197 CSR_READ_4(sc, RE_TIMERINT); /* flush */ 2198 2199 sc->re_write_imr(sc, sc->re_intrs); 2200 re_setup_sim_im(sc); 2201 #else 2202 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 2203 DELAY(10); 2204 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 2205 #endif 2206 } 2207 } 2208 back: 2209 lwkt_serialize_exit(ifp->if_serializer); 2210 return error; 2211 } 2212 2213 static int 2214 re_sysctl_imtype(SYSCTL_HANDLER_ARGS) 2215 { 2216 struct re_softc *sc = arg1; 2217 struct ifnet *ifp = &sc->arpcom.ac_if; 2218 int error, v; 2219 2220 lwkt_serialize_enter(ifp->if_serializer); 2221 2222 v = sc->re_imtype; 2223 error = sysctl_handle_int(oidp, &v, 0, req); 2224 if (error || req->newptr == NULL) 2225 goto back; 2226 2227 if (v != RE_IMTYPE_HW && v != RE_IMTYPE_SIM && v != RE_IMTYPE_NONE) { 2228 error = EINVAL; 2229 goto back; 2230 } 2231 if (v == RE_IMTYPE_HW && (sc->re_caps & RE_C_HWIM) == 0) { 2232 /* Can't do hardware interrupt moderation */ 2233 error = EOPNOTSUPP; 2234 goto back; 2235 } 2236 2237 if (v != sc->re_imtype) { 2238 sc->re_imtype = v; 2239 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2240 IFF_RUNNING) 2241 re_setup_intr(sc, 1, sc->re_imtype); 2242 } 2243 back: 2244 lwkt_serialize_exit(ifp->if_serializer); 2245 return error; 2246 } 2247 2248 static void 2249 re_setup_hw_im(struct re_softc *sc) 2250 { 2251 KKASSERT(sc->re_caps & RE_C_HWIM); 2252 2253 /* 2254 * Interrupt moderation 2255 * 2256 * 0xABCD 2257 * A - unknown (maybe TX related) 2258 * B - TX timer (unit: 25us) 2259 * C - unknown (maybe RX related) 2260 * D - RX timer (unit: 25us) 2261 * 2262 * 2263 * re(4)'s interrupt moderation is actually controlled by 2264 * two variables, like most other NICs (bge, bce etc.) 2265 * o timer 2266 * o number of packets [P] 2267 * 2268 * The logic relationship between these two variables is 2269 * similar to other NICs too: 2270 * if (timer expire || packets > [P]) 2271 * Interrupt is delivered 2272 * 2273 * Currently we only know how to set 'timer', but not 2274 * 'number of packets', which should be ~30, as far as I 2275 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2276 */ 2277 CSR_WRITE_2(sc, RE_IM, 2278 RE_IM_RXTIME(sc->re_rx_time) | 2279 RE_IM_TXTIME(sc->re_tx_time) | 2280 RE_IM_MAGIC); 2281 } 2282 2283 static void 2284 re_disable_hw_im(struct re_softc *sc) 2285 { 2286 if (sc->re_caps & RE_C_HWIM) 2287 CSR_WRITE_2(sc, RE_IM, 0); 2288 } 2289 2290 static void 2291 re_setup_sim_im(struct re_softc *sc) 2292 { 2293 uint32_t ticks; 2294 2295 if (sc->re_if_flags & RL_FLAG_PCIE) { 2296 ticks = sc->re_sim_time * sc->re_bus_speed; 2297 } else { 2298 /* 2299 * Datasheet says tick decreases at bus speed, 2300 * but it seems the clock runs a little bit 2301 * faster, so we do some compensation here. 2302 */ 2303 ticks = (sc->re_sim_time * sc->re_bus_speed * 8) / 5; 2304 } 2305 CSR_WRITE_4(sc, RE_TIMERINT, ticks); 2306 2307 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 2308 sc->re_flags |= RE_F_TIMER_INTR; 2309 } 2310 2311 static void 2312 re_disable_sim_im(struct re_softc *sc) 2313 { 2314 CSR_WRITE_4(sc, RE_TIMERINT, 0); 2315 sc->re_flags &= ~RE_F_TIMER_INTR; 2316 } 2317 2318 static void 2319 re_config_imtype(struct re_softc *sc, int imtype) 2320 { 2321 switch (imtype) { 2322 case RE_IMTYPE_HW: 2323 KKASSERT(sc->re_caps & RE_C_HWIM); 2324 /* FALL THROUGH */ 2325 case RE_IMTYPE_NONE: 2326 sc->re_intrs = RE_INTRS; 2327 sc->re_rx_ack = RE_ISR_RX_OK | RE_ISR_FIFO_OFLOW | 2328 RE_ISR_RX_OVERRUN; 2329 sc->re_tx_ack = RE_ISR_TX_OK; 2330 break; 2331 2332 case RE_IMTYPE_SIM: 2333 sc->re_intrs = RE_INTRS_TIMER; 2334 sc->re_rx_ack = RE_ISR_PCS_TIMEOUT; 2335 sc->re_tx_ack = RE_ISR_PCS_TIMEOUT; 2336 break; 2337 2338 default: 2339 panic("%s: unknown imtype %d", 2340 sc->arpcom.ac_if.if_xname, imtype); 2341 } 2342 } 2343 2344 static void 2345 re_setup_intr(struct re_softc *sc, int enable_intrs, int imtype) 2346 { 2347 re_config_imtype(sc, imtype); 2348 2349 if (enable_intrs) 2350 sc->re_write_imr(sc, sc->re_intrs); 2351 else 2352 sc->re_write_imr(sc, 0); 2353 2354 sc->re_npoll.ifpc_stcount = 0; 2355 2356 switch (imtype) { 2357 case RE_IMTYPE_NONE: 2358 re_disable_sim_im(sc); 2359 re_disable_hw_im(sc); 2360 break; 2361 2362 case RE_IMTYPE_HW: 2363 KKASSERT(sc->re_caps & RE_C_HWIM); 2364 re_disable_sim_im(sc); 2365 re_setup_hw_im(sc); 2366 break; 2367 2368 case RE_IMTYPE_SIM: 2369 re_disable_hw_im(sc); 2370 re_setup_sim_im(sc); 2371 break; 2372 2373 default: 2374 panic("%s: unknown imtype %d", 2375 sc->arpcom.ac_if.if_xname, imtype); 2376 } 2377 } 2378 2379 static int 2380 re_jpool_alloc(struct re_softc *sc) 2381 { 2382 struct re_list_data *ldata = &sc->re_ldata; 2383 struct re_jbuf *jbuf; 2384 bus_addr_t paddr; 2385 bus_size_t jpool_size; 2386 bus_dmamem_t dmem; 2387 caddr_t buf; 2388 int i, error; 2389 2390 lwkt_serialize_init(&ldata->re_jbuf_serializer); 2391 2392 ldata->re_jbuf = kmalloc(sizeof(struct re_jbuf) * RE_JBUF_COUNT(sc), 2393 M_DEVBUF, M_WAITOK | M_ZERO); 2394 2395 jpool_size = RE_JBUF_COUNT(sc) * RE_JBUF_SIZE; 2396 2397 error = bus_dmamem_coherent(sc->re_parent_tag, 2398 RE_RXBUF_ALIGN, 0, 2399 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2400 jpool_size, BUS_DMA_WAITOK, &dmem); 2401 if (error) { 2402 device_printf(sc->dev, "could not allocate jumbo memory\n"); 2403 return error; 2404 } 2405 ldata->re_jpool_tag = dmem.dmem_tag; 2406 ldata->re_jpool_map = dmem.dmem_map; 2407 ldata->re_jpool = dmem.dmem_addr; 2408 paddr = dmem.dmem_busaddr; 2409 2410 /* ..and split it into 9KB chunks */ 2411 SLIST_INIT(&ldata->re_jbuf_free); 2412 2413 buf = ldata->re_jpool; 2414 for (i = 0; i < RE_JBUF_COUNT(sc); i++) { 2415 jbuf = &ldata->re_jbuf[i]; 2416 2417 jbuf->re_sc = sc; 2418 jbuf->re_inuse = 0; 2419 jbuf->re_slot = i; 2420 jbuf->re_buf = buf; 2421 jbuf->re_paddr = paddr; 2422 2423 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 2424 2425 buf += RE_JBUF_SIZE; 2426 paddr += RE_JBUF_SIZE; 2427 } 2428 return 0; 2429 } 2430 2431 static void 2432 re_jpool_free(struct re_softc *sc) 2433 { 2434 struct re_list_data *ldata = &sc->re_ldata; 2435 2436 if (ldata->re_jpool_tag != NULL) { 2437 bus_dmamap_unload(ldata->re_jpool_tag, ldata->re_jpool_map); 2438 bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool, 2439 ldata->re_jpool_map); 2440 bus_dma_tag_destroy(ldata->re_jpool_tag); 2441 ldata->re_jpool_tag = NULL; 2442 } 2443 2444 if (ldata->re_jbuf != NULL) { 2445 kfree(ldata->re_jbuf, M_DEVBUF); 2446 ldata->re_jbuf = NULL; 2447 } 2448 } 2449 2450 #ifdef RE_JUMBO 2451 static struct re_jbuf * 2452 re_jbuf_alloc(struct re_softc *sc) 2453 { 2454 struct re_list_data *ldata = &sc->re_ldata; 2455 struct re_jbuf *jbuf; 2456 2457 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 2458 2459 jbuf = SLIST_FIRST(&ldata->re_jbuf_free); 2460 if (jbuf != NULL) { 2461 SLIST_REMOVE_HEAD(&ldata->re_jbuf_free, re_link); 2462 jbuf->re_inuse = 1; 2463 } 2464 2465 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 2466 2467 return jbuf; 2468 } 2469 2470 static void 2471 re_jbuf_free(void *arg) 2472 { 2473 struct re_jbuf *jbuf = arg; 2474 struct re_softc *sc = jbuf->re_sc; 2475 struct re_list_data *ldata = &sc->re_ldata; 2476 2477 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 2478 panic("%s: free wrong jumbo buffer", 2479 sc->arpcom.ac_if.if_xname); 2480 } else if (jbuf->re_inuse == 0) { 2481 panic("%s: jumbo buffer already freed", 2482 sc->arpcom.ac_if.if_xname); 2483 } 2484 2485 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 2486 atomic_subtract_int(&jbuf->re_inuse, 1); 2487 if (jbuf->re_inuse == 0) 2488 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 2489 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 2490 } 2491 2492 static void 2493 re_jbuf_ref(void *arg) 2494 { 2495 struct re_jbuf *jbuf = arg; 2496 struct re_softc *sc = jbuf->re_sc; 2497 struct re_list_data *ldata = &sc->re_ldata; 2498 2499 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 2500 panic("%s: ref wrong jumbo buffer", 2501 sc->arpcom.ac_if.if_xname); 2502 } else if (jbuf->re_inuse == 0) { 2503 panic("%s: jumbo buffer already freed", 2504 sc->arpcom.ac_if.if_xname); 2505 } 2506 atomic_add_int(&jbuf->re_inuse, 1); 2507 } 2508 #endif /* RE_JUMBO */ 2509 2510 static void 2511 re_disable_aspm(device_t dev) 2512 { 2513 uint16_t link_cap, link_ctrl; 2514 uint8_t pcie_ptr, reg; 2515 2516 pcie_ptr = pci_get_pciecap_ptr(dev); 2517 if (pcie_ptr == 0) 2518 return; 2519 2520 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 2521 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 2522 return; 2523 2524 if (bootverbose) 2525 device_printf(dev, "disable ASPM\n"); 2526 2527 reg = pcie_ptr + PCIER_LINKCTRL; 2528 link_ctrl = pci_read_config(dev, reg, 2); 2529 link_ctrl &= ~(PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1); 2530 pci_write_config(dev, reg, link_ctrl, 2); 2531 } 2532 2533 static void 2534 re_start_xmit(struct re_softc *sc) 2535 { 2536 CSR_WRITE_1(sc, RE_TPPOLL, RE_NPQ); 2537 } 2538 2539 static void 2540 re_write_imr(struct re_softc *sc, uint32_t val) 2541 { 2542 CSR_WRITE_2(sc, RE_IMR, val); 2543 } 2544 2545 static void 2546 re_write_isr(struct re_softc *sc, uint32_t val) 2547 { 2548 CSR_WRITE_2(sc, RE_ISR, val); 2549 } 2550 2551 static uint32_t 2552 re_read_isr(struct re_softc *sc) 2553 { 2554 return CSR_READ_2(sc, RE_ISR); 2555 } 2556 2557 static void 2558 re_start_xmit_8125(struct re_softc *sc) 2559 { 2560 CSR_WRITE_2(sc, RE_TPPOLL_8125, RE_NPQ_8125); 2561 } 2562 2563 static void 2564 re_write_imr_8125(struct re_softc *sc, uint32_t val) 2565 { 2566 CSR_WRITE_4(sc, RE_IMR0_8125, val); 2567 } 2568 2569 static void 2570 re_write_isr_8125(struct re_softc *sc, uint32_t val) 2571 { 2572 CSR_WRITE_4(sc, RE_ISR0_8125, val); 2573 } 2574 2575 static uint32_t 2576 re_read_isr_8125(struct re_softc *sc) 2577 { 2578 return CSR_READ_4(sc, RE_ISR0_8125); 2579 } 2580