1 /* 2 * Copyright (c) 2004 3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 4 * 5 * Copyright (c) 1997, 1998-2003 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $ 36 */ 37 38 /* 39 * RealTek 8169S/8110S/8168/8111/8101E/8125 PCI NIC driver 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Networking Software Engineer 43 * Wind River Systems 44 */ 45 46 /* 47 * This driver is designed to support RealTek's next generation of 48 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 49 * seven devices in this family: the the RTL8169, the RTL8169S, RTL8110S, 50 * the RTL8168, the RTL8111 and the RTL8101E. 51 * 52 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA. 59 * 60 * o TCP/IP checksum offload for both RX and TX. 61 * 62 * o High and normal priority transmit DMA rings. 63 * 64 * o VLAN tag insertion and extraction. 65 * 66 * o TCP large send (segmentation offload). 67 * 68 * o 1000Mbps mode. 69 * 70 * o Jumbo frames. 71 * 72 * o GMII and TBI ports/registers for interfacing with copper 73 * or fiber PHYs. 74 * 75 * o RX and TX DMA rings can have up to 1024 descriptors. 76 * 77 * The 8169 does not have a built-in PHY. Most reference boards use a 78 * Marvell 88E1000 'Alaska' copper gigE PHY. 8169/8110 is _no longer_ 79 * supported. 80 * 81 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 82 * (the 'S' stands for 'single-chip'). These devices have the same 83 * programming API as the older 8169, but also have some vendor-specific 84 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 85 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 86 * 8125 supports 10/100/1000/2500. 87 * 88 * This driver takes advantage of the RX and TX checksum offload and 89 * VLAN tag insertion/extraction features. It also implements 90 * interrupt moderation using the timer interrupt registers, which 91 * significantly reduces interrupt load. 92 */ 93 94 #define _IP_VHL 95 96 #include "opt_ifpoll.h" 97 98 #include <sys/param.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/kernel.h> 102 #include <sys/in_cksum.h> 103 #include <sys/interrupt.h> 104 #include <sys/malloc.h> 105 #include <sys/mbuf.h> 106 #include <sys/rman.h> 107 #include <sys/serialize.h> 108 #include <sys/socket.h> 109 #include <sys/sockio.h> 110 #include <sys/sysctl.h> 111 112 #include <net/bpf.h> 113 #include <net/ethernet.h> 114 #include <net/if.h> 115 #include <net/ifq_var.h> 116 #include <net/if_arp.h> 117 #include <net/if_dl.h> 118 #include <net/if_media.h> 119 #include <net/if_poll.h> 120 #include <net/if_types.h> 121 #include <net/vlan/if_vlan_var.h> 122 #include <net/vlan/if_vlan_ether.h> 123 124 #include <netinet/ip.h> 125 126 #include "pcidevs.h" 127 #include <bus/pci/pcireg.h> 128 #include <bus/pci/pcivar.h> 129 130 #include <dev/netif/re/if_rereg.h> 131 #include <dev/netif/re/if_revar.h> 132 #include <dev/netif/re/re.h> 133 #include <dev/netif/re/re_dragonfly.h> 134 135 /* 136 * Various supported device vendors/types and their names. 137 */ 138 static const struct re_type { 139 uint16_t re_vid; 140 uint16_t re_did; 141 const char *re_name; 142 } re_devs[] = { 143 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T, 144 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 145 146 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E, 147 "RealTek 810x PCIe 10/100baseTX" }, 148 149 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, 150 "RealTek 8111/8168 PCIe Gigabit Ethernet" }, 151 152 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168_1, 153 "RealTek 8168 PCIe Gigabit Ethernet" }, 154 155 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125, 156 "RealTek 8125 PCIe Gigabit Ethernet" }, 157 158 #ifdef notyet 159 /* 160 * This driver now only supports built-in PHYs. 161 */ 162 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, 163 "RealTek 8110/8169 Gigabit Ethernet" }, 164 #endif 165 166 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC, 167 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 168 169 { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT, 170 "Corega CG-LAPCIGT Gigabit Ethernet" }, 171 172 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032, 173 "Linksys EG1032 Gigabit Ethernet" }, 174 175 { PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902, 176 "US Robotics 997902 Gigabit Ethernet" }, 177 178 { PCI_VENDOR_TTTECH, PCI_PRODUCT_TTTECH_MC322, 179 "TTTech MC322 Gigabit Ethernet" }, 180 181 { 0, 0, NULL } 182 }; 183 184 static int re_probe(device_t); 185 static int re_attach(device_t); 186 static int re_detach(device_t); 187 static int re_suspend(device_t); 188 static int re_resume(device_t); 189 static void re_shutdown(device_t); 190 191 static int re_allocmem(device_t); 192 static void re_freemem(device_t); 193 static void re_freebufmem(struct re_softc *, int, int); 194 static int re_encap(struct re_softc *, struct mbuf **, int *); 195 static int re_newbuf_std(struct re_softc *, int, int); 196 #ifdef RE_JUMBO 197 static int re_newbuf_jumbo(struct re_softc *, int, int); 198 #endif 199 static void re_setup_rxdesc(struct re_softc *, int); 200 static int re_rx_list_init(struct re_softc *); 201 static int re_tx_list_init(struct re_softc *); 202 static int re_rxeof(struct re_softc *); 203 static int re_txeof(struct re_softc *); 204 static int re_tx_collect(struct re_softc *); 205 static void re_intr(void *); 206 static void re_tick(void *); 207 static void re_tick_serialized(void *); 208 static void re_disable_aspm(device_t); 209 static void re_link_up(struct re_softc *); 210 static void re_link_down(struct re_softc *); 211 212 static void re_start_xmit(struct re_softc *); 213 static void re_write_imr(struct re_softc *, uint32_t); 214 static void re_write_isr(struct re_softc *, uint32_t); 215 static uint32_t re_read_isr(struct re_softc *); 216 static void re_start_xmit_8125(struct re_softc *); 217 static void re_write_imr_8125(struct re_softc *, uint32_t); 218 static void re_write_isr_8125(struct re_softc *, uint32_t); 219 static uint32_t re_read_isr_8125(struct re_softc *); 220 221 static void re_start(struct ifnet *, struct ifaltq_subque *); 222 static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 223 static void re_init(void *); 224 static void re_stop(struct re_softc *, boolean_t); 225 static void re_watchdog(struct ifnet *); 226 227 static void re_setup_hw_im(struct re_softc *); 228 static void re_setup_sim_im(struct re_softc *); 229 static void re_disable_hw_im(struct re_softc *); 230 static void re_disable_sim_im(struct re_softc *); 231 static void re_config_imtype(struct re_softc *, int); 232 static void re_setup_intr(struct re_softc *, int, int); 233 234 static int re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *); 235 static int re_sysctl_rxtime(SYSCTL_HANDLER_ARGS); 236 static int re_sysctl_txtime(SYSCTL_HANDLER_ARGS); 237 static int re_sysctl_simtime(SYSCTL_HANDLER_ARGS); 238 static int re_sysctl_imtype(SYSCTL_HANDLER_ARGS); 239 240 static int re_jpool_alloc(struct re_softc *); 241 static void re_jpool_free(struct re_softc *); 242 #ifdef RE_JUMBO 243 static struct re_jbuf *re_jbuf_alloc(struct re_softc *); 244 static void re_jbuf_free(void *); 245 static void re_jbuf_ref(void *); 246 #endif 247 248 #ifdef IFPOLL_ENABLE 249 static void re_npoll(struct ifnet *, struct ifpoll_info *); 250 static void re_npoll_compat(struct ifnet *, void *, int); 251 #endif 252 253 static device_method_t re_methods[] = { 254 /* Device interface */ 255 DEVMETHOD(device_probe, re_probe), 256 DEVMETHOD(device_attach, re_attach), 257 DEVMETHOD(device_detach, re_detach), 258 DEVMETHOD(device_suspend, re_suspend), 259 DEVMETHOD(device_resume, re_resume), 260 DEVMETHOD(device_shutdown, re_shutdown), 261 DEVMETHOD_END 262 }; 263 264 static driver_t re_driver = { 265 "re", 266 re_methods, 267 sizeof(struct re_softc) 268 }; 269 270 static devclass_t re_devclass; 271 272 DECLARE_DUMMY_MODULE(if_re); 273 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, NULL, NULL); 274 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, NULL, NULL); 275 276 static int re_rx_desc_count = RE_RX_DESC_CNT_DEF; 277 static int re_tx_desc_count = RE_TX_DESC_CNT_DEF; 278 static int re_msi_enable = 1; 279 280 TUNABLE_INT("hw.re.rx_desc_count", &re_rx_desc_count); 281 TUNABLE_INT("hw.re.tx_desc_count", &re_tx_desc_count); 282 TUNABLE_INT("hw.re.msi.enable", &re_msi_enable); 283 284 static __inline void 285 re_free_rxchain(struct re_softc *sc) 286 { 287 if (sc->re_head != NULL) { 288 m_freem(sc->re_head); 289 sc->re_head = sc->re_tail = NULL; 290 } 291 } 292 293 static int 294 re_probe(device_t dev) 295 { 296 const struct re_type *t; 297 uint16_t vendor, product; 298 299 vendor = pci_get_vendor(dev); 300 product = pci_get_device(dev); 301 302 /* 303 * Only attach to rev.3 of the Linksys EG1032 adapter. 304 * Rev.2 is supported by sk(4). 305 */ 306 if (vendor == PCI_VENDOR_LINKSYS && 307 product == PCI_PRODUCT_LINKSYS_EG1032 && 308 pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3) 309 return ENXIO; 310 311 for (t = re_devs; t->re_name != NULL; t++) { 312 if (product == t->re_did && vendor == t->re_vid) 313 break; 314 } 315 if (t->re_name == NULL) 316 return ENXIO; 317 318 device_set_desc(dev, t->re_name); 319 return 0; 320 } 321 322 static int 323 re_allocmem(device_t dev) 324 { 325 struct re_softc *sc = device_get_softc(dev); 326 bus_dmamem_t dmem; 327 int error, i; 328 329 /* 330 * Allocate list data 331 */ 332 sc->re_ldata.re_tx_mbuf = 333 kmalloc(sc->re_tx_desc_cnt * sizeof(struct mbuf *), 334 M_DEVBUF, M_ZERO | M_WAITOK); 335 336 sc->re_ldata.re_rx_mbuf = 337 kmalloc(sc->re_rx_desc_cnt * sizeof(struct mbuf *), 338 M_DEVBUF, M_ZERO | M_WAITOK); 339 340 sc->re_ldata.re_rx_paddr = 341 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_addr_t), 342 M_DEVBUF, M_ZERO | M_WAITOK); 343 344 sc->re_ldata.re_tx_dmamap = 345 kmalloc(sc->re_tx_desc_cnt * sizeof(bus_dmamap_t), 346 M_DEVBUF, M_ZERO | M_WAITOK); 347 348 sc->re_ldata.re_rx_dmamap = 349 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_dmamap_t), 350 M_DEVBUF, M_ZERO | M_WAITOK); 351 352 /* 353 * Allocate the parent bus DMA tag appropriate for PCI. 354 */ 355 error = bus_dma_tag_create(NULL, /* parent */ 356 1, 0, /* alignment, boundary */ 357 BUS_SPACE_MAXADDR, /* lowaddr */ 358 BUS_SPACE_MAXADDR, /* highaddr */ 359 NULL, NULL, /* filter, filterarg */ 360 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 361 0, /* nsegments */ 362 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 363 0, /* flags */ 364 &sc->re_parent_tag); 365 if (error) { 366 device_printf(dev, "could not allocate parent dma tag\n"); 367 return error; 368 } 369 370 /* Allocate TX descriptor list. */ 371 error = bus_dmamem_coherent(sc->re_parent_tag, 372 RE_RING_ALIGN, 0, 373 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 374 RE_TX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO, 375 &dmem); 376 if (error) { 377 device_printf(dev, "could not allocate TX ring\n"); 378 return error; 379 } 380 sc->re_ldata.re_tx_list_tag = dmem.dmem_tag; 381 sc->re_ldata.re_tx_list_map = dmem.dmem_map; 382 sc->re_ldata.re_tx_list = dmem.dmem_addr; 383 sc->re_ldata.re_tx_list_addr = dmem.dmem_busaddr; 384 385 /* Allocate RX descriptor list. */ 386 error = bus_dmamem_coherent(sc->re_parent_tag, 387 RE_RING_ALIGN, 0, 388 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 389 RE_RX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO, 390 &dmem); 391 if (error) { 392 device_printf(dev, "could not allocate RX ring\n"); 393 return error; 394 } 395 sc->re_ldata.re_rx_list_tag = dmem.dmem_tag; 396 sc->re_ldata.re_rx_list_map = dmem.dmem_map; 397 sc->re_ldata.re_rx_list = dmem.dmem_addr; 398 sc->re_ldata.re_rx_list_addr = dmem.dmem_busaddr; 399 400 /* Allocate maps for TX mbufs. */ 401 error = bus_dma_tag_create(sc->re_parent_tag, 402 1, 0, 403 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 404 NULL, NULL, 405 RE_FRAMELEN_MAX, RE_MAXSEGS, MCLBYTES, 406 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 407 &sc->re_ldata.re_tx_mtag); 408 if (error) { 409 device_printf(dev, "could not allocate TX buf dma tag\n"); 410 return(error); 411 } 412 413 /* Create DMA maps for TX buffers */ 414 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 415 error = bus_dmamap_create(sc->re_ldata.re_tx_mtag, 416 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 417 &sc->re_ldata.re_tx_dmamap[i]); 418 if (error) { 419 device_printf(dev, "can't create DMA map for TX buf\n"); 420 re_freebufmem(sc, i, 0); 421 return(error); 422 } 423 } 424 425 /* Allocate maps for RX mbufs. */ 426 error = bus_dma_tag_create(sc->re_parent_tag, 427 RE_RXBUF_ALIGN, 0, 428 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 429 NULL, NULL, 430 MCLBYTES, 1, MCLBYTES, 431 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED, 432 &sc->re_ldata.re_rx_mtag); 433 if (error) { 434 device_printf(dev, "could not allocate RX buf dma tag\n"); 435 return(error); 436 } 437 438 /* Create spare DMA map for RX */ 439 error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, BUS_DMA_WAITOK, 440 &sc->re_ldata.re_rx_spare); 441 if (error) { 442 device_printf(dev, "can't create spare DMA map for RX\n"); 443 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag); 444 sc->re_ldata.re_rx_mtag = NULL; 445 return error; 446 } 447 448 /* Create DMA maps for RX buffers */ 449 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 450 error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, 451 BUS_DMA_WAITOK, &sc->re_ldata.re_rx_dmamap[i]); 452 if (error) { 453 device_printf(dev, "can't create DMA map for RX buf\n"); 454 re_freebufmem(sc, sc->re_tx_desc_cnt, i); 455 return(error); 456 } 457 } 458 459 /* Create jumbo buffer pool for RX if required */ 460 if (sc->re_caps & RE_C_CONTIGRX) { 461 error = re_jpool_alloc(sc); 462 if (error) { 463 re_jpool_free(sc); 464 #ifdef RE_JUMBO 465 /* Disable jumbo frame support */ 466 sc->re_maxmtu = ETHERMTU; 467 #endif 468 } 469 } 470 return(0); 471 } 472 473 static void 474 re_freebufmem(struct re_softc *sc, int tx_cnt, int rx_cnt) 475 { 476 int i; 477 478 /* Destroy all the RX and TX buffer maps */ 479 if (sc->re_ldata.re_tx_mtag) { 480 for (i = 0; i < tx_cnt; i++) { 481 bus_dmamap_destroy(sc->re_ldata.re_tx_mtag, 482 sc->re_ldata.re_tx_dmamap[i]); 483 } 484 bus_dma_tag_destroy(sc->re_ldata.re_tx_mtag); 485 sc->re_ldata.re_tx_mtag = NULL; 486 } 487 488 if (sc->re_ldata.re_rx_mtag) { 489 for (i = 0; i < rx_cnt; i++) { 490 bus_dmamap_destroy(sc->re_ldata.re_rx_mtag, 491 sc->re_ldata.re_rx_dmamap[i]); 492 } 493 bus_dmamap_destroy(sc->re_ldata.re_rx_mtag, 494 sc->re_ldata.re_rx_spare); 495 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag); 496 sc->re_ldata.re_rx_mtag = NULL; 497 } 498 } 499 500 static void 501 re_freemem(device_t dev) 502 { 503 struct re_softc *sc = device_get_softc(dev); 504 505 /* Unload and free the RX DMA ring memory and map */ 506 if (sc->re_ldata.re_rx_list_tag) { 507 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag, 508 sc->re_ldata.re_rx_list_map); 509 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 510 sc->re_ldata.re_rx_list, 511 sc->re_ldata.re_rx_list_map); 512 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 513 } 514 515 /* Unload and free the TX DMA ring memory and map */ 516 if (sc->re_ldata.re_tx_list_tag) { 517 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag, 518 sc->re_ldata.re_tx_list_map); 519 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 520 sc->re_ldata.re_tx_list, 521 sc->re_ldata.re_tx_list_map); 522 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 523 } 524 525 /* Free RX/TX buf DMA stuffs */ 526 re_freebufmem(sc, sc->re_tx_desc_cnt, sc->re_rx_desc_cnt); 527 528 /* Unload and free the stats buffer and map */ 529 if (sc->re_ldata.re_stag) { 530 bus_dmamap_unload(sc->re_ldata.re_stag, sc->re_ldata.re_smap); 531 bus_dmamem_free(sc->re_ldata.re_stag, 532 sc->re_ldata.re_stats, 533 sc->re_ldata.re_smap); 534 bus_dma_tag_destroy(sc->re_ldata.re_stag); 535 } 536 537 if (sc->re_caps & RE_C_CONTIGRX) 538 re_jpool_free(sc); 539 540 if (sc->re_parent_tag) 541 bus_dma_tag_destroy(sc->re_parent_tag); 542 543 if (sc->re_ldata.re_tx_mbuf != NULL) 544 kfree(sc->re_ldata.re_tx_mbuf, M_DEVBUF); 545 if (sc->re_ldata.re_rx_mbuf != NULL) 546 kfree(sc->re_ldata.re_rx_mbuf, M_DEVBUF); 547 if (sc->re_ldata.re_rx_paddr != NULL) 548 kfree(sc->re_ldata.re_rx_paddr, M_DEVBUF); 549 if (sc->re_ldata.re_tx_dmamap != NULL) 550 kfree(sc->re_ldata.re_tx_dmamap, M_DEVBUF); 551 if (sc->re_ldata.re_rx_dmamap != NULL) 552 kfree(sc->re_ldata.re_rx_dmamap, M_DEVBUF); 553 } 554 555 static boolean_t 556 re_is_faste(struct re_softc *sc) 557 { 558 if (pci_get_vendor(sc->dev) == PCI_VENDOR_REALTEK) { 559 switch (sc->re_device_id) { 560 case PCI_PRODUCT_REALTEK_RT8169: 561 case PCI_PRODUCT_REALTEK_RT8169SC: 562 case PCI_PRODUCT_REALTEK_RT8168: 563 case PCI_PRODUCT_REALTEK_RT8168_1: 564 case PCI_PRODUCT_REALTEK_RT8125: 565 return FALSE; 566 default: 567 return TRUE; 568 } 569 } else { 570 return FALSE; 571 } 572 } 573 574 static bool 575 re_is_2500e(const struct re_softc *sc) 576 { 577 if (pci_get_vendor(sc->dev) == PCI_VENDOR_REALTEK) { 578 switch (sc->re_device_id) { 579 case PCI_PRODUCT_REALTEK_RT8125: 580 return true; 581 582 default: 583 return false; 584 } 585 } 586 return false; 587 } 588 589 /* 590 * Attach the interface. Allocate softc structures, do ifmedia 591 * setup and ethernet/BPF attach. 592 */ 593 static int 594 re_attach(device_t dev) 595 { 596 struct re_softc *sc = device_get_softc(dev); 597 struct ifnet *ifp; 598 struct sysctl_ctx_list *ctx; 599 struct sysctl_oid *tree; 600 uint8_t eaddr[ETHER_ADDR_LEN]; 601 int error = 0, qlen, msi_enable; 602 u_int irq_flags; 603 604 callout_init_mp(&sc->re_timer); 605 sc->dev = dev; 606 sc->re_device_id = pci_get_device(dev); 607 sc->re_unit = device_get_unit(dev); 608 ifmedia_init(&sc->media, IFM_IMASK, rtl_ifmedia_upd, rtl_ifmedia_sts); 609 610 if (pci_get_vendor(dev) == PCI_VENDOR_REALTEK && 611 sc->re_device_id == PCI_PRODUCT_REALTEK_RT8125) { 612 sc->re_start_xmit = re_start_xmit_8125; 613 sc->re_write_imr = re_write_imr_8125; 614 sc->re_write_isr = re_write_isr_8125; 615 sc->re_read_isr = re_read_isr_8125; 616 } else { 617 sc->re_start_xmit = re_start_xmit; 618 sc->re_write_imr = re_write_imr; 619 sc->re_write_isr = re_write_isr; 620 sc->re_read_isr = re_read_isr; 621 } 622 623 sc->re_caps = RE_C_HWIM; 624 625 sc->re_rx_desc_cnt = re_rx_desc_count; 626 if (sc->re_rx_desc_cnt > RE_RX_DESC_CNT_MAX) 627 sc->re_rx_desc_cnt = RE_RX_DESC_CNT_MAX; 628 629 sc->re_tx_desc_cnt = re_tx_desc_count; 630 if (sc->re_tx_desc_cnt > RE_TX_DESC_CNT_MAX) 631 sc->re_tx_desc_cnt = RE_TX_DESC_CNT_MAX; 632 633 qlen = RE_IFQ_MAXLEN; 634 if (sc->re_tx_desc_cnt > qlen) 635 qlen = sc->re_tx_desc_cnt; 636 637 sc->re_rxbuf_size = MCLBYTES; 638 sc->re_newbuf = re_newbuf_std; 639 640 /* 641 * Hardware interrupt moderation settings. 642 * XXX does not seem correct, undocumented. 643 */ 644 sc->re_tx_time = 5; /* 125us */ 645 sc->re_rx_time = 2; /* 50us */ 646 647 /* Simulated interrupt moderation setting. */ 648 sc->re_sim_time = 150; /* 150us */ 649 650 /* Use simulated interrupt moderation by default. */ 651 sc->re_imtype = RE_IMTYPE_SIM; 652 re_config_imtype(sc, sc->re_imtype); 653 654 ctx = device_get_sysctl_ctx(dev); 655 tree = device_get_sysctl_tree(dev); 656 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 657 "rx_desc_count", CTLFLAG_RD, &sc->re_rx_desc_cnt, 658 0, "RX desc count"); 659 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 660 "tx_desc_count", CTLFLAG_RD, &sc->re_tx_desc_cnt, 661 0, "TX desc count"); 662 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sim_time", 663 CTLTYPE_INT | CTLFLAG_RW, 664 sc, 0, re_sysctl_simtime, "I", 665 "Simulated interrupt moderation time (usec)."); 666 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "imtype", 667 CTLTYPE_INT | CTLFLAG_RW, 668 sc, 0, re_sysctl_imtype, "I", 669 "Interrupt moderation type -- " 670 "0:disable, 1:simulated, " 671 "2:hardware(if supported)"); 672 if (sc->re_caps & RE_C_HWIM) { 673 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 674 OID_AUTO, "hw_rxtime", 675 CTLTYPE_INT | CTLFLAG_RW, 676 sc, 0, re_sysctl_rxtime, "I", 677 "Hardware interrupt moderation time " 678 "(unit: 25usec)."); 679 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 680 OID_AUTO, "hw_txtime", 681 CTLTYPE_INT | CTLFLAG_RW, 682 sc, 0, re_sysctl_txtime, "I", 683 "Hardware interrupt moderation time " 684 "(unit: 25usec)."); 685 } 686 687 #ifndef BURN_BRIDGES 688 /* 689 * Handle power management nonsense. 690 */ 691 692 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 693 uint32_t membase, irq; 694 695 /* Save important PCI config data. */ 696 membase = pci_read_config(dev, RE_PCI_LOMEM, 4); 697 irq = pci_read_config(dev, PCIR_INTLINE, 4); 698 699 /* Reset the power state. */ 700 device_printf(dev, "chip is in D%d power mode " 701 "-- setting to D0\n", pci_get_powerstate(dev)); 702 703 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 704 705 /* Restore PCI config data. */ 706 pci_write_config(dev, RE_PCI_LOMEM, membase, 4); 707 pci_write_config(dev, PCIR_INTLINE, irq, 4); 708 } 709 #endif 710 /* 711 * Map control/status registers. 712 */ 713 pci_enable_busmaster(dev); 714 715 if (pci_is_pcie(dev)) { 716 sc->re_res_rid = PCIR_BAR(2); 717 sc->re_res_type = SYS_RES_MEMORY; 718 } else { 719 sc->re_res_rid = PCIR_BAR(0); 720 sc->re_res_type = SYS_RES_IOPORT; 721 } 722 sc->re_res = bus_alloc_resource_any(dev, sc->re_res_type, 723 &sc->re_res_rid, RF_ACTIVE); 724 if (sc->re_res == NULL) { 725 device_printf(dev, "couldn't map IO\n"); 726 error = ENXIO; 727 goto fail; 728 } 729 730 sc->re_btag = rman_get_bustag(sc->re_res); 731 sc->re_bhandle = rman_get_bushandle(sc->re_res); 732 733 error = rtl_check_mac_version(sc); 734 if (error) { 735 device_printf(dev, "check mac version failed\n"); 736 goto fail; 737 } 738 739 rtl_init_software_variable(sc); 740 if (pci_is_pcie(dev)) 741 sc->re_if_flags |= RL_FLAG_PCIE; 742 else 743 sc->re_if_flags &= ~RL_FLAG_PCIE; 744 device_printf(dev, "MAC version 0x%08x, MACFG %u%s%s%s\n", 745 (CSR_READ_4(sc, RE_TXCFG) & 0xFCF00000), sc->re_type, 746 sc->re_coalesce_tx_pkt ? ", software TX defrag" : "", 747 sc->re_pad_runt ? ", pad runt" : "", 748 sc->re_hw_enable_msi_msix ? ", support MSI" : ""); 749 750 /* 751 * Allocate interrupt 752 */ 753 if (pci_is_pcie(dev) && sc->re_hw_enable_msi_msix) 754 msi_enable = re_msi_enable; 755 else 756 msi_enable = 0; 757 sc->re_irq_type = pci_alloc_1intr(dev, msi_enable, 758 &sc->re_irq_rid, &irq_flags); 759 760 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->re_irq_rid, 761 irq_flags); 762 if (sc->re_irq == NULL) { 763 device_printf(dev, "couldn't map interrupt\n"); 764 error = ENXIO; 765 goto fail; 766 } 767 768 /* Disable ASPM */ 769 re_disable_aspm(dev); 770 771 rtl_exit_oob(sc); 772 rtl_hw_init(sc); 773 774 /* Reset the adapter. */ 775 rtl_reset(sc); 776 777 rtl_get_hw_mac_address(sc, eaddr); 778 if (sc->re_type == MACFG_3) /* Change PCI Latency time*/ 779 pci_write_config(dev, PCIR_LATTIMER, 0x40, 1); 780 781 /* Allocate DMA stuffs */ 782 error = re_allocmem(dev); 783 if (error) 784 goto fail; 785 786 if (pci_is_pcie(dev)) { 787 sc->re_bus_speed = 125; 788 } else { 789 uint8_t cfg2; 790 791 cfg2 = CSR_READ_1(sc, RE_CFG2); 792 switch (cfg2 & RE_CFG2_PCICLK_MASK) { 793 case RE_CFG2_PCICLK_33MHZ: 794 sc->re_bus_speed = 33; 795 break; 796 case RE_CFG2_PCICLK_66MHZ: 797 sc->re_bus_speed = 66; 798 break; 799 default: 800 device_printf(dev, "unknown bus speed, assume 33MHz\n"); 801 sc->re_bus_speed = 33; 802 break; 803 } 804 } 805 device_printf(dev, "bus speed %dMHz\n", sc->re_bus_speed); 806 807 /* Enable hardware checksum if available. */ 808 sc->re_tx_cstag = 1; 809 sc->re_rx_cstag = 1; 810 811 ifp = &sc->arpcom.ac_if; 812 ifp->if_softc = sc; 813 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 814 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 815 ifp->if_ioctl = re_ioctl; 816 ifp->if_start = re_start; 817 #ifdef IFPOLL_ENABLE 818 ifp->if_npoll = re_npoll; 819 #endif 820 ifp->if_watchdog = re_watchdog; 821 ifp->if_init = re_init; 822 if (!re_is_faste(sc)) 823 ifp->if_baudrate = 1000000000; 824 else 825 ifp->if_baudrate = 100000000; 826 ifp->if_nmbclusters = sc->re_rx_desc_cnt; 827 ifq_set_maxlen(&ifp->if_snd, qlen); 828 ifq_set_ready(&ifp->if_snd); 829 830 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 831 IFCAP_RXCSUM | IFCAP_TXCSUM; 832 ifp->if_capenable = ifp->if_capabilities; 833 /* NOTE: if_hwassist will be setup after the interface is up. */ 834 835 /* 836 * Call MI attach routine. 837 */ 838 ether_ifattach(ifp, eaddr, NULL); 839 840 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq)); 841 842 rtl_phy_power_up(sc); 843 rtl_hw_phy_config(sc); 844 rtl_clrwol(sc); 845 846 /* TODO: jumbo frame */ 847 CSR_WRITE_2(sc, RE_RxMaxSize, sc->re_rxbuf_size); 848 849 #ifdef IFPOLL_ENABLE 850 ifpoll_compat_setup(&sc->re_npoll, ctx, (struct sysctl_oid *)tree, 851 device_get_unit(dev), ifp->if_serializer); 852 #endif 853 854 /* Hook interrupt last to avoid having to lock softc */ 855 error = bus_setup_intr(dev, sc->re_irq, INTR_MPSAFE | INTR_HIFREQ, 856 re_intr, sc, &sc->re_intrhand, ifp->if_serializer); 857 if (error) { 858 device_printf(dev, "couldn't set up irq\n"); 859 ether_ifdetach(ifp); 860 goto fail; 861 } 862 863 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 864 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 865 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 866 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 867 if (!re_is_faste(sc)) { 868 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 869 0, NULL); 870 } 871 if (re_is_2500e(sc)) { 872 #ifndef IFM_2500_T 873 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_SX | IFM_FDX, 874 0, NULL); 875 #else 876 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T | IFM_FDX, 877 0, NULL); 878 #endif 879 } 880 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 881 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 882 rtl_ifmedia_upd(ifp); 883 884 fail: 885 if (error) 886 re_detach(dev); 887 888 return (error); 889 } 890 891 /* 892 * Shutdown hardware and free up resources. This can be called any 893 * time after the mutex has been initialized. It is called in both 894 * the error case in attach and the normal detach case so it needs 895 * to be careful about only freeing resources that have actually been 896 * allocated. 897 */ 898 static int 899 re_detach(device_t dev) 900 { 901 struct re_softc *sc = device_get_softc(dev); 902 struct ifnet *ifp = &sc->arpcom.ac_if; 903 904 /* These should only be active if attach succeeded */ 905 if (device_is_attached(dev)) { 906 lwkt_serialize_enter(ifp->if_serializer); 907 re_stop(sc, TRUE); 908 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand); 909 lwkt_serialize_exit(ifp->if_serializer); 910 911 ether_ifdetach(ifp); 912 } 913 ifmedia_removeall(&sc->media); 914 915 if (sc->re_irq) 916 bus_release_resource(dev, SYS_RES_IRQ, sc->re_irq_rid, 917 sc->re_irq); 918 919 if (sc->re_irq_type == PCI_INTR_TYPE_MSI) 920 pci_release_msi(dev); 921 922 if (sc->re_res) { 923 bus_release_resource(dev, sc->re_res_type, sc->re_res_rid, 924 sc->re_res); 925 } 926 rtl_cmac_unmap(sc); 927 928 /* Free DMA stuffs */ 929 re_freemem(dev); 930 931 return(0); 932 } 933 934 static void 935 re_setup_rxdesc(struct re_softc *sc, int idx) 936 { 937 bus_addr_t paddr; 938 uint32_t cmdstat; 939 struct re_desc *d; 940 941 paddr = sc->re_ldata.re_rx_paddr[idx]; 942 d = &sc->re_ldata.re_rx_list[idx]; 943 944 d->re_bufaddr_lo = htole32(RE_ADDR_LO(paddr)); 945 d->re_bufaddr_hi = htole32(RE_ADDR_HI(paddr)); 946 947 cmdstat = sc->re_rxbuf_size | RE_RDESC_CMD_OWN; 948 if (idx == (sc->re_rx_desc_cnt - 1)) 949 cmdstat |= RE_RDESC_CMD_EOR; 950 d->re_cmdstat = htole32(cmdstat); 951 } 952 953 static int 954 re_newbuf_std(struct re_softc *sc, int idx, int init) 955 { 956 bus_dma_segment_t seg; 957 bus_dmamap_t map; 958 struct mbuf *m; 959 int error, nsegs; 960 961 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 962 if (m == NULL) { 963 error = ENOBUFS; 964 965 if (init) { 966 if_printf(&sc->arpcom.ac_if, "m_getcl failed\n"); 967 return error; 968 } else { 969 goto back; 970 } 971 } 972 m->m_len = m->m_pkthdr.len = MCLBYTES; 973 974 /* 975 * NOTE: 976 * re(4) chips need address of the receive buffer to be 8-byte 977 * aligned, so don't call m_adj(m, ETHER_ALIGN) here. 978 */ 979 980 error = bus_dmamap_load_mbuf_segment(sc->re_ldata.re_rx_mtag, 981 sc->re_ldata.re_rx_spare, m, 982 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 983 if (error) { 984 m_freem(m); 985 if (init) { 986 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 987 return error; 988 } else { 989 goto back; 990 } 991 } 992 993 if (!init) { 994 bus_dmamap_sync(sc->re_ldata.re_rx_mtag, 995 sc->re_ldata.re_rx_dmamap[idx], 996 BUS_DMASYNC_POSTREAD); 997 bus_dmamap_unload(sc->re_ldata.re_rx_mtag, 998 sc->re_ldata.re_rx_dmamap[idx]); 999 } 1000 sc->re_ldata.re_rx_mbuf[idx] = m; 1001 sc->re_ldata.re_rx_paddr[idx] = seg.ds_addr; 1002 1003 map = sc->re_ldata.re_rx_dmamap[idx]; 1004 sc->re_ldata.re_rx_dmamap[idx] = sc->re_ldata.re_rx_spare; 1005 sc->re_ldata.re_rx_spare = map; 1006 back: 1007 re_setup_rxdesc(sc, idx); 1008 return error; 1009 } 1010 1011 #ifdef RE_JUMBO 1012 static int 1013 re_newbuf_jumbo(struct re_softc *sc, int idx, int init) 1014 { 1015 struct mbuf *m; 1016 struct re_jbuf *jbuf; 1017 int error = 0; 1018 1019 MGETHDR(m, init ? M_WAITOK : M_NOWAIT, MT_DATA); 1020 if (m == NULL) { 1021 error = ENOBUFS; 1022 if (init) { 1023 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 1024 return error; 1025 } else { 1026 goto back; 1027 } 1028 } 1029 1030 jbuf = re_jbuf_alloc(sc); 1031 if (jbuf == NULL) { 1032 m_freem(m); 1033 1034 error = ENOBUFS; 1035 if (init) { 1036 if_printf(&sc->arpcom.ac_if, "jpool is empty\n"); 1037 return error; 1038 } else { 1039 goto back; 1040 } 1041 } 1042 1043 m->m_ext.ext_arg = jbuf; 1044 m->m_ext.ext_buf = jbuf->re_buf; 1045 m->m_ext.ext_free = re_jbuf_free; 1046 m->m_ext.ext_ref = re_jbuf_ref; 1047 m->m_ext.ext_size = sc->re_rxbuf_size; 1048 1049 m->m_data = m->m_ext.ext_buf; 1050 m->m_flags |= M_EXT; 1051 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1052 1053 /* 1054 * NOTE: 1055 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer 1056 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here. 1057 */ 1058 1059 sc->re_ldata.re_rx_mbuf[idx] = m; 1060 sc->re_ldata.re_rx_paddr[idx] = jbuf->re_paddr; 1061 back: 1062 re_setup_rxdesc(sc, idx); 1063 return error; 1064 } 1065 #endif /* RE_JUMBO */ 1066 1067 static int 1068 re_tx_list_init(struct re_softc *sc) 1069 { 1070 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 1071 1072 sc->re_ldata.re_tx_prodidx = 0; 1073 sc->re_ldata.re_tx_considx = 0; 1074 sc->re_ldata.re_tx_free = sc->re_tx_desc_cnt; 1075 1076 return(0); 1077 } 1078 1079 static int 1080 re_rx_list_init(struct re_softc *sc) 1081 { 1082 int i, error; 1083 1084 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc)); 1085 1086 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 1087 error = sc->re_newbuf(sc, i, 1); 1088 if (error) 1089 return(error); 1090 } 1091 1092 sc->re_ldata.re_rx_prodidx = 0; 1093 sc->re_head = sc->re_tail = NULL; 1094 1095 return(0); 1096 } 1097 1098 #define RE_IP4_PACKET 0x1 1099 #define RE_TCP_PACKET 0x2 1100 #define RE_UDP_PACKET 0x4 1101 1102 static __inline uint8_t 1103 re_packet_type(struct re_softc *sc, uint32_t rxstat, uint32_t rxctrl) 1104 { 1105 uint8_t packet_type = 0; 1106 1107 if (sc->re_if_flags & RL_FLAG_DESCV2) { 1108 if (rxctrl & RE_RDESC_CTL_PROTOIP4) 1109 packet_type |= RE_IP4_PACKET; 1110 } else { 1111 if (rxstat & RE_RDESC_STAT_PROTOID) 1112 packet_type |= RE_IP4_PACKET; 1113 } 1114 if (RE_TCPPKT(rxstat)) 1115 packet_type |= RE_TCP_PACKET; 1116 else if (RE_UDPPKT(rxstat)) 1117 packet_type |= RE_UDP_PACKET; 1118 return packet_type; 1119 } 1120 1121 /* 1122 * RX handler for C+ and 8169. For the gigE chips, we support 1123 * the reception of jumbo frames that have been fragmented 1124 * across multiple 2K mbuf cluster buffers. 1125 */ 1126 static int 1127 re_rxeof(struct re_softc *sc) 1128 { 1129 struct ifnet *ifp = &sc->arpcom.ac_if; 1130 struct mbuf *m; 1131 struct re_desc *cur_rx; 1132 uint32_t rxstat, rxctrl; 1133 int i, total_len, rx = 0; 1134 1135 for (i = sc->re_ldata.re_rx_prodidx; 1136 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0; RE_RXDESC_INC(sc, i)) { 1137 cur_rx = &sc->re_ldata.re_rx_list[i]; 1138 m = sc->re_ldata.re_rx_mbuf[i]; 1139 total_len = RE_RXBYTES(cur_rx); 1140 rxstat = le32toh(cur_rx->re_cmdstat); 1141 rxctrl = le32toh(cur_rx->re_control); 1142 1143 rx = 1; 1144 1145 #ifdef INVARIANTS 1146 if (sc->re_flags & RE_F_USE_JPOOL) 1147 KKASSERT(rxstat & RE_RDESC_STAT_EOF); 1148 #endif 1149 1150 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1151 if (sc->re_flags & RE_F_DROP_RXFRAG) { 1152 re_setup_rxdesc(sc, i); 1153 continue; 1154 } 1155 1156 if (sc->re_newbuf(sc, i, 0)) { 1157 /* Drop upcoming fragments */ 1158 sc->re_flags |= RE_F_DROP_RXFRAG; 1159 continue; 1160 } 1161 1162 m->m_len = MCLBYTES; 1163 if (sc->re_head == NULL) { 1164 sc->re_head = sc->re_tail = m; 1165 } else { 1166 sc->re_tail->m_next = m; 1167 sc->re_tail = m; 1168 } 1169 continue; 1170 } else if (sc->re_flags & RE_F_DROP_RXFRAG) { 1171 /* 1172 * Last fragment of a multi-fragment packet. 1173 * 1174 * Since error already happened, this fragment 1175 * must be dropped as well as the fragment chain. 1176 */ 1177 re_setup_rxdesc(sc, i); 1178 re_free_rxchain(sc); 1179 sc->re_flags &= ~RE_F_DROP_RXFRAG; 1180 continue; 1181 } 1182 1183 rxstat >>= 1; 1184 if (rxstat & RE_RDESC_STAT_RXERRSUM) { 1185 IFNET_STAT_INC(ifp, ierrors, 1); 1186 /* 1187 * If this is part of a multi-fragment packet, 1188 * discard all the pieces. 1189 */ 1190 re_free_rxchain(sc); 1191 re_setup_rxdesc(sc, i); 1192 continue; 1193 } 1194 1195 /* 1196 * If allocating a replacement mbuf fails, 1197 * reload the current one. 1198 */ 1199 1200 if (sc->re_newbuf(sc, i, 0)) { 1201 IFNET_STAT_INC(ifp, ierrors, 1); 1202 continue; 1203 } 1204 1205 if (sc->re_head != NULL) { 1206 m->m_len = total_len % MCLBYTES; 1207 /* 1208 * Special case: if there's 4 bytes or less 1209 * in this buffer, the mbuf can be discarded: 1210 * the last 4 bytes is the CRC, which we don't 1211 * care about anyway. 1212 */ 1213 if (m->m_len <= ETHER_CRC_LEN) { 1214 sc->re_tail->m_len -= 1215 (ETHER_CRC_LEN - m->m_len); 1216 m_freem(m); 1217 } else { 1218 m->m_len -= ETHER_CRC_LEN; 1219 sc->re_tail->m_next = m; 1220 } 1221 m = sc->re_head; 1222 sc->re_head = sc->re_tail = NULL; 1223 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1224 } else { 1225 m->m_pkthdr.len = m->m_len = 1226 (total_len - ETHER_CRC_LEN); 1227 } 1228 1229 IFNET_STAT_INC(ifp, ipackets, 1); 1230 m->m_pkthdr.rcvif = ifp; 1231 1232 /* Do RX checksumming if enabled */ 1233 1234 if (ifp->if_capenable & IFCAP_RXCSUM) { 1235 uint8_t packet_type; 1236 1237 packet_type = re_packet_type(sc, rxstat, rxctrl); 1238 1239 /* Check IP header checksum */ 1240 if (packet_type & RE_IP4_PACKET) { 1241 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1242 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0) 1243 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1244 } 1245 1246 /* Check TCP/UDP checksum */ 1247 if (((packet_type & RE_TCP_PACKET) && 1248 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) || 1249 ((packet_type & RE_UDP_PACKET) && 1250 (rxstat & RE_RDESC_STAT_UDPSUMBAD) == 0)) { 1251 m->m_pkthdr.csum_flags |= 1252 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1253 CSUM_FRAG_NOT_CHECKED; 1254 m->m_pkthdr.csum_data = 0xffff; 1255 } 1256 } 1257 1258 if (rxctrl & RE_RDESC_CTL_HASTAG) { 1259 m->m_flags |= M_VLANTAG; 1260 m->m_pkthdr.ether_vlantag = 1261 be16toh((rxctrl & RE_RDESC_CTL_TAGDATA)); 1262 } 1263 ifp->if_input(ifp, m, NULL, -1); 1264 } 1265 1266 sc->re_ldata.re_rx_prodidx = i; 1267 1268 return rx; 1269 } 1270 1271 #undef RE_IP4_PACKET 1272 #undef RE_TCP_PACKET 1273 #undef RE_UDP_PACKET 1274 1275 static int 1276 re_tx_collect(struct re_softc *sc) 1277 { 1278 struct ifnet *ifp = &sc->arpcom.ac_if; 1279 uint32_t txstat; 1280 int idx, tx = 0; 1281 1282 for (idx = sc->re_ldata.re_tx_considx; 1283 sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt; 1284 RE_TXDESC_INC(sc, idx)) { 1285 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat); 1286 if (txstat & RE_TDESC_CMD_OWN) 1287 break; 1288 1289 tx = 1; 1290 1291 sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0; 1292 1293 /* 1294 * We only stash mbufs in the last descriptor 1295 * in a fragment chain, which also happens to 1296 * be the only place where the TX status bits 1297 * are valid. 1298 * 1299 * NOTE: 1300 * On 8125, RE_TDESC_CMD_EOF is no longer left 1301 * uncleared. 1302 */ 1303 if (sc->re_ldata.re_tx_mbuf[idx] != NULL) { 1304 bus_dmamap_unload(sc->re_ldata.re_tx_mtag, 1305 sc->re_ldata.re_tx_dmamap[idx]); 1306 m_freem(sc->re_ldata.re_tx_mbuf[idx]); 1307 sc->re_ldata.re_tx_mbuf[idx] = NULL; 1308 if (txstat & (RE_TDESC_STAT_EXCESSCOL| 1309 RE_TDESC_STAT_COLCNT)) 1310 IFNET_STAT_INC(ifp, collisions, 1); 1311 if (txstat & RE_TDESC_STAT_TXERRSUM) 1312 IFNET_STAT_INC(ifp, oerrors, 1); 1313 else 1314 IFNET_STAT_INC(ifp, opackets, 1); 1315 } 1316 sc->re_ldata.re_tx_free++; 1317 } 1318 sc->re_ldata.re_tx_considx = idx; 1319 1320 return tx; 1321 } 1322 1323 static int 1324 re_txeof(struct re_softc *sc) 1325 { 1326 struct ifnet *ifp = &sc->arpcom.ac_if; 1327 int tx; 1328 1329 tx = re_tx_collect(sc); 1330 1331 /* There is enough free TX descs */ 1332 if (sc->re_ldata.re_tx_free > RE_TXDESC_SPARE) 1333 ifq_clr_oactive(&ifp->if_snd); 1334 1335 /* 1336 * Some chips will ignore a second TX request issued while an 1337 * existing transmission is in progress. If the transmitter goes 1338 * idle but there are still packets waiting to be sent, we need 1339 * to restart the channel here to flush them out. This only seems 1340 * to be required with the PCIe devices. 1341 */ 1342 if (sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt) 1343 sc->re_start_xmit(sc); 1344 else 1345 ifp->if_timer = 0; 1346 1347 return tx; 1348 } 1349 1350 static void 1351 re_tick(void *xsc) 1352 { 1353 struct re_softc *sc = xsc; 1354 1355 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); 1356 re_tick_serialized(xsc); 1357 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); 1358 } 1359 1360 static void 1361 re_tick_serialized(void *xsc) 1362 { 1363 struct re_softc *sc = xsc; 1364 struct ifnet *ifp = &sc->arpcom.ac_if; 1365 1366 ASSERT_SERIALIZED(ifp->if_serializer); 1367 1368 if ((ifp->if_flags & IFF_RUNNING) == 0) 1369 return; 1370 1371 if (rtl_link_ok(sc)) { 1372 if ((sc->re_flags & RE_F_LINKED) == 0) 1373 re_link_up(sc); 1374 } else if (sc->re_flags & RE_F_LINKED) { 1375 re_link_down(sc); 1376 } 1377 callout_reset(&sc->re_timer, hz, re_tick, sc); 1378 } 1379 1380 #ifdef IFPOLL_ENABLE 1381 1382 static void 1383 re_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1384 { 1385 struct re_softc *sc = ifp->if_softc; 1386 1387 ASSERT_SERIALIZED(ifp->if_serializer); 1388 1389 if (sc->re_npoll.ifpc_stcount-- == 0) { 1390 uint32_t status; 1391 1392 sc->re_npoll.ifpc_stcount = sc->re_npoll.ifpc_stfrac; 1393 1394 status = sc->re_read_isr(sc); 1395 if (status) 1396 sc->re_write_isr(sc, status); 1397 1398 /* 1399 * XXX check behaviour on receiver stalls. 1400 */ 1401 1402 if (status & RE_ISR_SYSTEM_ERR) { 1403 rtl_reset(sc); 1404 re_init(sc); 1405 /* Done! */ 1406 return; 1407 } 1408 } 1409 1410 sc->rxcycles = count; 1411 re_rxeof(sc); 1412 re_txeof(sc); 1413 1414 if (!ifq_is_empty(&ifp->if_snd)) 1415 if_devstart(ifp); 1416 } 1417 1418 static void 1419 re_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1420 { 1421 struct re_softc *sc = ifp->if_softc; 1422 1423 ASSERT_SERIALIZED(ifp->if_serializer); 1424 1425 if (info != NULL) { 1426 int cpuid = sc->re_npoll.ifpc_cpuid; 1427 1428 info->ifpi_rx[cpuid].poll_func = re_npoll_compat; 1429 info->ifpi_rx[cpuid].arg = NULL; 1430 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1431 1432 if (ifp->if_flags & IFF_RUNNING) 1433 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 1434 ifq_set_cpuid(&ifp->if_snd, cpuid); 1435 } else { 1436 if (ifp->if_flags & IFF_RUNNING) 1437 re_setup_intr(sc, 1, sc->re_imtype); 1438 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq)); 1439 } 1440 } 1441 #endif /* IFPOLL_ENABLE */ 1442 1443 static void 1444 re_intr(void *arg) 1445 { 1446 struct re_softc *sc = arg; 1447 struct ifnet *ifp = &sc->arpcom.ac_if; 1448 uint32_t status; 1449 int proc; 1450 1451 ASSERT_SERIALIZED(ifp->if_serializer); 1452 1453 if ((sc->re_flags & RE_F_SUSPENDED) || 1454 (ifp->if_flags & IFF_RUNNING) == 0) 1455 return; 1456 1457 /* Disable interrupts. */ 1458 sc->re_write_imr(sc, 0); 1459 1460 status = sc->re_read_isr(sc); 1461 again: 1462 proc = 0; 1463 if (status) 1464 sc->re_write_isr(sc, status); 1465 if (status & sc->re_intrs) { 1466 if (status & RE_ISR_SYSTEM_ERR) { 1467 rtl_reset(sc); 1468 re_init(sc); 1469 /* Done! */ 1470 return; 1471 } 1472 proc |= re_rxeof(sc); 1473 proc |= re_txeof(sc); 1474 } 1475 1476 if (sc->re_imtype == RE_IMTYPE_SIM) { 1477 if ((sc->re_flags & RE_F_TIMER_INTR)) { 1478 if (!proc) { 1479 /* 1480 * Nothing needs to be processed, fallback 1481 * to use TX/RX interrupts. 1482 * 1483 * NOTE: This will re-enable interrupts. 1484 */ 1485 re_setup_intr(sc, 1, RE_IMTYPE_NONE); 1486 1487 /* 1488 * Recollect, mainly to avoid the possible 1489 * race introduced by changing interrupt 1490 * masks. 1491 */ 1492 re_rxeof(sc); 1493 re_txeof(sc); 1494 } else { 1495 /* Re-enable interrupts. */ 1496 sc->re_write_imr(sc, sc->re_intrs); 1497 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 1498 } 1499 } else if (proc) { 1500 /* 1501 * Assume that using simulated interrupt moderation 1502 * (hardware timer based) could reduce the interript 1503 * rate. 1504 * 1505 * NOTE: This will re-enable interrupts. 1506 */ 1507 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 1508 } else { 1509 /* Re-enable interrupts. */ 1510 sc->re_write_imr(sc, sc->re_intrs); 1511 } 1512 } else { 1513 status = sc->re_read_isr(sc); 1514 if (status & sc->re_intrs) { 1515 if (!ifq_is_empty(&ifp->if_snd)) 1516 if_devstart(ifp); 1517 /* NOTE: Interrupts are still disabled. */ 1518 goto again; 1519 } 1520 /* Re-enable interrupts. */ 1521 sc->re_write_imr(sc, sc->re_intrs); 1522 } 1523 1524 if (!ifq_is_empty(&ifp->if_snd)) 1525 if_devstart(ifp); 1526 } 1527 1528 static int 1529 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx0) 1530 { 1531 struct mbuf *m = *m_head; 1532 bus_dma_segment_t segs[RE_MAXSEGS]; 1533 bus_dmamap_t map; 1534 int error, maxsegs, idx, i, nsegs; 1535 struct re_desc *d, *tx_ring; 1536 uint32_t cmd_csum, ctl_csum, vlantag; 1537 1538 KASSERT(sc->re_ldata.re_tx_free > RE_TXDESC_SPARE, 1539 ("not enough free TX desc")); 1540 1541 if (sc->re_coalesce_tx_pkt && m->m_pkthdr.len != m->m_len) { 1542 struct mbuf *m_new; 1543 1544 m_new = m_defrag(m, M_NOWAIT); 1545 if (m_new == NULL) { 1546 error = ENOBUFS; 1547 goto back; 1548 } else { 1549 *m_head = m = m_new; 1550 if (m->m_pkthdr.len != m->m_len) { 1551 /* Still not configuous; give up. */ 1552 error = ENOBUFS; 1553 goto back; 1554 } 1555 } 1556 } 1557 1558 map = sc->re_ldata.re_tx_dmamap[*idx0]; 1559 1560 /* 1561 * Set up checksum offload. Note: checksum offload bits must 1562 * appear in all descriptors of a multi-descriptor transmit 1563 * attempt. (This is according to testing done with an 8169 1564 * chip. I'm not sure if this is a requirement or a bug.) 1565 */ 1566 cmd_csum = ctl_csum = 0; 1567 if (m->m_pkthdr.csum_flags & CSUM_IP) { 1568 cmd_csum |= RE_TDESC_CMD_IPCSUM; 1569 ctl_csum |= RE_TDESC_CTL_IPCSUM; 1570 } 1571 if (m->m_pkthdr.csum_flags & CSUM_TCP) { 1572 cmd_csum |= RE_TDESC_CMD_TCPCSUM; 1573 ctl_csum |= RE_TDESC_CTL_TCPCSUM; 1574 } 1575 if (m->m_pkthdr.csum_flags & CSUM_UDP) { 1576 cmd_csum |= RE_TDESC_CMD_UDPCSUM; 1577 ctl_csum |= RE_TDESC_CTL_UDPCSUM; 1578 } 1579 1580 /* For version2 descriptor, csum flags are set on re_control */ 1581 if (sc->re_if_flags & RL_FLAG_DESCV2) 1582 cmd_csum = 0; 1583 else 1584 ctl_csum = 0; 1585 1586 if (sc->re_pad_runt) { 1587 /* 1588 * With some of the RealTek chips, using the checksum offload 1589 * support in conjunction with the autopadding feature results 1590 * in the transmission of corrupt frames. For example, if we 1591 * need to send a really small IP fragment that's less than 60 1592 * bytes in size, and IP header checksumming is enabled, the 1593 * resulting ethernet frame that appears on the wire will 1594 * have garbled payload. To work around this, if TX checksum 1595 * offload is enabled, we always manually pad short frames out 1596 * to the minimum ethernet frame size. 1597 * 1598 * Note: this appears unnecessary for TCP, and doing it for TCP 1599 * with PCIe adapters seems to result in bad checksums. 1600 */ 1601 if ((m->m_pkthdr.csum_flags & 1602 (CSUM_DELAY_IP | CSUM_DELAY_DATA)) && 1603 (m->m_pkthdr.csum_flags & CSUM_TCP) == 0 && 1604 m->m_pkthdr.len < RE_MIN_FRAMELEN) { 1605 error = m_devpad(m, RE_MIN_FRAMELEN); 1606 if (error) 1607 goto back; 1608 } 1609 } 1610 1611 vlantag = 0; 1612 if (m->m_flags & M_VLANTAG) { 1613 vlantag = htobe16(m->m_pkthdr.ether_vlantag) | 1614 RE_TDESC_CTL_INSTAG; 1615 } 1616 1617 maxsegs = sc->re_ldata.re_tx_free; 1618 if (maxsegs > RE_MAXSEGS) 1619 maxsegs = RE_MAXSEGS; 1620 1621 error = bus_dmamap_load_mbuf_defrag(sc->re_ldata.re_tx_mtag, map, 1622 m_head, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1623 if (error) 1624 goto back; 1625 1626 m = *m_head; 1627 bus_dmamap_sync(sc->re_ldata.re_tx_mtag, map, BUS_DMASYNC_PREWRITE); 1628 1629 /* 1630 * Map the segment array into descriptors. We also keep track 1631 * of the end of the ring and set the end-of-ring bits as needed, 1632 * and we set the ownership bits in all except the very first 1633 * descriptor, whose ownership bits will be turned on later. 1634 */ 1635 tx_ring = sc->re_ldata.re_tx_list; 1636 idx = *idx0; 1637 i = 0; 1638 for (;;) { 1639 uint32_t cmdstat; 1640 1641 d = &tx_ring[idx]; 1642 1643 KKASSERT(sc->re_ldata.re_tx_mbuf[idx] == NULL); 1644 1645 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr)); 1646 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr)); 1647 1648 cmdstat = segs[i].ds_len; 1649 if (i == 0) { 1650 cmdstat |= RE_TDESC_CMD_SOF; 1651 } else if (i != nsegs - 1) { 1652 /* 1653 * Last descriptor's ownership will be transfered 1654 * later. 1655 */ 1656 cmdstat |= RE_TDESC_CMD_OWN; 1657 } 1658 if (idx == (sc->re_tx_desc_cnt - 1)) 1659 cmdstat |= RE_TDESC_CMD_EOR; 1660 1661 d->re_control = htole32(ctl_csum | vlantag); 1662 d->re_cmdstat = htole32(cmdstat | cmd_csum); 1663 1664 i++; 1665 if (i == nsegs) 1666 break; 1667 RE_TXDESC_INC(sc, idx); 1668 } 1669 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF); 1670 1671 /* Transfer ownership of packet to the chip. */ 1672 d->re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 1673 if (*idx0 != idx) 1674 tx_ring[*idx0].re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 1675 1676 /* 1677 * Insure that the map for this transmission 1678 * is placed at the array index of the last descriptor 1679 * in this chain. 1680 */ 1681 sc->re_ldata.re_tx_dmamap[*idx0] = sc->re_ldata.re_tx_dmamap[idx]; 1682 sc->re_ldata.re_tx_dmamap[idx] = map; 1683 1684 sc->re_ldata.re_tx_mbuf[idx] = m; 1685 sc->re_ldata.re_tx_free -= nsegs; 1686 1687 RE_TXDESC_INC(sc, idx); 1688 *idx0 = idx; 1689 back: 1690 if (error) { 1691 m_freem(*m_head); 1692 *m_head = NULL; 1693 } 1694 return error; 1695 } 1696 1697 /* 1698 * Main transmit routine for C+ and gigE NICs. 1699 */ 1700 1701 static void 1702 re_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1703 { 1704 struct re_softc *sc = ifp->if_softc; 1705 struct mbuf *m_head; 1706 int idx, need_trans, oactive, error; 1707 1708 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1709 ASSERT_SERIALIZED(ifp->if_serializer); 1710 1711 if ((sc->re_flags & RE_F_LINKED) == 0) { 1712 ifq_purge(&ifp->if_snd); 1713 return; 1714 } 1715 1716 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1717 return; 1718 1719 idx = sc->re_ldata.re_tx_prodidx; 1720 1721 need_trans = 0; 1722 oactive = 0; 1723 for (;;) { 1724 if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) { 1725 if (!oactive) { 1726 if (re_tx_collect(sc)) { 1727 oactive = 1; 1728 continue; 1729 } 1730 } 1731 ifq_set_oactive(&ifp->if_snd); 1732 break; 1733 } 1734 1735 m_head = ifq_dequeue(&ifp->if_snd); 1736 if (m_head == NULL) 1737 break; 1738 1739 error = re_encap(sc, &m_head, &idx); 1740 if (error) { 1741 /* m_head is freed by re_encap(), if we reach here */ 1742 IFNET_STAT_INC(ifp, oerrors, 1); 1743 1744 if (error == EFBIG && !oactive) { 1745 if (re_tx_collect(sc)) { 1746 oactive = 1; 1747 continue; 1748 } 1749 } 1750 ifq_set_oactive(&ifp->if_snd); 1751 break; 1752 } 1753 1754 oactive = 0; 1755 need_trans = 1; 1756 1757 /* 1758 * If there's a BPF listener, bounce a copy of this frame 1759 * to him. 1760 */ 1761 ETHER_BPF_MTAP(ifp, m_head); 1762 } 1763 1764 if (!need_trans) 1765 return; 1766 1767 sc->re_ldata.re_tx_prodidx = idx; 1768 1769 /* 1770 * RealTek put the TX poll request register in a different 1771 * location on the 8169 gigE chip. I don't know why. 1772 */ 1773 sc->re_start_xmit(sc); 1774 1775 /* 1776 * Set a timeout in case the chip goes out to lunch. 1777 */ 1778 ifp->if_timer = 5; 1779 } 1780 1781 static void 1782 re_link_up(struct re_softc *sc) 1783 { 1784 struct ifnet *ifp = &sc->arpcom.ac_if; 1785 int error; 1786 1787 ASSERT_SERIALIZED(ifp->if_serializer); 1788 1789 rtl_link_on_patch(sc); 1790 re_stop(sc, FALSE); 1791 rtl_set_eaddr(sc); 1792 1793 error = re_rx_list_init(sc); 1794 if (error) { 1795 re_stop(sc, TRUE); 1796 return; 1797 } 1798 error = re_tx_list_init(sc); 1799 if (error) { 1800 re_stop(sc, TRUE); 1801 return; 1802 } 1803 1804 /* 1805 * Load the addresses of the RX and TX lists into the chip. 1806 */ 1807 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI, 1808 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr)); 1809 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO, 1810 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr)); 1811 1812 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI, 1813 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr)); 1814 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO, 1815 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr)); 1816 1817 rtl_hw_start(sc); 1818 1819 #ifdef IFPOLL_ENABLE 1820 /* 1821 * Disable interrupts if we are polling. 1822 */ 1823 if (ifp->if_flags & IFF_NPOLLING) 1824 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 1825 else /* otherwise ... */ 1826 #endif /* IFPOLL_ENABLE */ 1827 /* 1828 * Enable interrupts. 1829 */ 1830 re_setup_intr(sc, 1, sc->re_imtype); 1831 sc->re_write_isr(sc, sc->re_intrs); 1832 1833 sc->re_flags |= RE_F_LINKED; 1834 ifp->if_link_state = LINK_STATE_UP; 1835 if_link_state_change(ifp); 1836 1837 if (bootverbose) 1838 if_printf(ifp, "link UP\n"); 1839 1840 if (!ifq_is_empty(&ifp->if_snd)) 1841 if_devstart(ifp); 1842 } 1843 1844 static void 1845 re_link_down(struct re_softc *sc) 1846 { 1847 struct ifnet *ifp = &sc->arpcom.ac_if; 1848 1849 /* NOTE: re_stop() will reset RE_F_LINKED. */ 1850 ifp->if_link_state = LINK_STATE_DOWN; 1851 if_link_state_change(ifp); 1852 1853 re_stop(sc, FALSE); 1854 rtl_ifmedia_upd(ifp); 1855 1856 if (bootverbose) 1857 if_printf(ifp, "link DOWN\n"); 1858 } 1859 1860 static void 1861 re_init(void *xsc) 1862 { 1863 struct re_softc *sc = xsc; 1864 struct ifnet *ifp = &sc->arpcom.ac_if; 1865 1866 ASSERT_SERIALIZED(ifp->if_serializer); 1867 1868 re_stop(sc, TRUE); 1869 if (rtl_link_ok(sc)) { 1870 if (bootverbose) 1871 if_printf(ifp, "link is UP in if_init\n"); 1872 re_link_up(sc); 1873 } 1874 1875 ifp->if_flags |= IFF_RUNNING; 1876 ifq_clr_oactive(&ifp->if_snd); 1877 1878 callout_reset(&sc->re_timer, hz, re_tick, sc); 1879 } 1880 1881 static int 1882 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1883 { 1884 struct re_softc *sc = ifp->if_softc; 1885 struct ifreq *ifr = (struct ifreq *)data; 1886 int error = 0, mask; 1887 1888 ASSERT_SERIALIZED(ifp->if_serializer); 1889 1890 switch(command) { 1891 case SIOCSIFMTU: 1892 #ifdef RE_JUMBO 1893 if (ifr->ifr_mtu > sc->re_maxmtu) { 1894 error = EINVAL; 1895 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1896 ifp->if_mtu = ifr->ifr_mtu; 1897 if (ifp->if_flags & IFF_RUNNING) 1898 ifp->if_init(sc); 1899 } 1900 #else 1901 error = EOPNOTSUPP; 1902 #endif 1903 break; 1904 1905 case SIOCSIFFLAGS: 1906 if (ifp->if_flags & IFF_UP) { 1907 if (ifp->if_flags & IFF_RUNNING) { 1908 if ((ifp->if_flags ^ sc->re_saved_ifflags) & 1909 (IFF_PROMISC | IFF_ALLMULTI)) 1910 rtl_set_rx_packet_filter(sc); 1911 } else { 1912 re_init(sc); 1913 } 1914 } else if (ifp->if_flags & IFF_RUNNING) { 1915 re_stop(sc, TRUE); 1916 } 1917 sc->re_saved_ifflags = ifp->if_flags; 1918 break; 1919 1920 case SIOCADDMULTI: 1921 case SIOCDELMULTI: 1922 rtl_set_rx_packet_filter(sc); 1923 break; 1924 1925 case SIOCGIFMEDIA: 1926 case SIOCSIFMEDIA: 1927 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1928 break; 1929 1930 case SIOCSIFCAP: 1931 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & 1932 ifp->if_capabilities; 1933 ifp->if_capenable ^= mask; 1934 1935 /* NOTE: re_init will setup if_hwassist. */ 1936 ifp->if_hwassist = 0; 1937 1938 /* Setup flags for the backend. */ 1939 if (ifp->if_capenable & IFCAP_RXCSUM) 1940 sc->re_rx_cstag = 1; 1941 else 1942 sc->re_rx_cstag = 0; 1943 if (ifp->if_capenable & IFCAP_TXCSUM) 1944 sc->re_tx_cstag = 1; 1945 else 1946 sc->re_tx_cstag = 0; 1947 1948 if (mask && (ifp->if_flags & IFF_RUNNING)) 1949 re_init(sc); 1950 break; 1951 1952 default: 1953 error = ether_ioctl(ifp, command, data); 1954 break; 1955 } 1956 return(error); 1957 } 1958 1959 static void 1960 re_watchdog(struct ifnet *ifp) 1961 { 1962 struct re_softc *sc = ifp->if_softc; 1963 1964 ASSERT_SERIALIZED(ifp->if_serializer); 1965 1966 IFNET_STAT_INC(ifp, oerrors, 1); 1967 1968 re_txeof(sc); 1969 re_rxeof(sc); 1970 1971 if (sc->re_ldata.re_tx_free != sc->re_tx_desc_cnt) { 1972 if_printf(ifp, "watchdog timeout, txd free %d\n", 1973 sc->re_ldata.re_tx_free); 1974 rtl_reset(sc); 1975 re_init(sc); 1976 } 1977 } 1978 1979 /* 1980 * Stop the adapter and free any mbufs allocated to the 1981 * RX and TX lists. 1982 */ 1983 static void 1984 re_stop(struct re_softc *sc, boolean_t full_stop) 1985 { 1986 struct ifnet *ifp = &sc->arpcom.ac_if; 1987 int i; 1988 1989 ASSERT_SERIALIZED(ifp->if_serializer); 1990 1991 /* Stop the adapter. */ 1992 rtl_stop(sc); 1993 1994 ifp->if_timer = 0; 1995 if (full_stop) { 1996 callout_stop(&sc->re_timer); 1997 ifp->if_flags &= ~IFF_RUNNING; 1998 } 1999 ifq_clr_oactive(&ifp->if_snd); 2000 sc->re_flags &= ~(RE_F_TIMER_INTR | RE_F_DROP_RXFRAG | RE_F_LINKED); 2001 2002 re_free_rxchain(sc); 2003 2004 /* Free the TX list buffers. */ 2005 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 2006 if (sc->re_ldata.re_tx_mbuf[i] != NULL) { 2007 bus_dmamap_unload(sc->re_ldata.re_tx_mtag, 2008 sc->re_ldata.re_tx_dmamap[i]); 2009 m_freem(sc->re_ldata.re_tx_mbuf[i]); 2010 sc->re_ldata.re_tx_mbuf[i] = NULL; 2011 } 2012 } 2013 2014 /* Free the RX list buffers. */ 2015 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 2016 if (sc->re_ldata.re_rx_mbuf[i] != NULL) { 2017 if ((sc->re_flags & RE_F_USE_JPOOL) == 0) { 2018 bus_dmamap_unload(sc->re_ldata.re_rx_mtag, 2019 sc->re_ldata.re_rx_dmamap[i]); 2020 } 2021 m_freem(sc->re_ldata.re_rx_mbuf[i]); 2022 sc->re_ldata.re_rx_mbuf[i] = NULL; 2023 } 2024 } 2025 } 2026 2027 /* 2028 * Device suspend routine. Stop the interface and save some PCI 2029 * settings in case the BIOS doesn't restore them properly on 2030 * resume. 2031 */ 2032 static int 2033 re_suspend(device_t dev) 2034 { 2035 #ifndef BURN_BRIDGES 2036 int i; 2037 #endif 2038 struct re_softc *sc = device_get_softc(dev); 2039 struct ifnet *ifp = &sc->arpcom.ac_if; 2040 2041 lwkt_serialize_enter(ifp->if_serializer); 2042 2043 re_stop(sc, TRUE); 2044 2045 #ifndef BURN_BRIDGES 2046 for (i = 0; i < 5; i++) 2047 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2048 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2049 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2050 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2051 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2052 #endif 2053 2054 sc->re_flags |= RE_F_SUSPENDED; 2055 2056 lwkt_serialize_exit(ifp->if_serializer); 2057 2058 return (0); 2059 } 2060 2061 /* 2062 * Device resume routine. Restore some PCI settings in case the BIOS 2063 * doesn't, re-enable busmastering, and restart the interface if 2064 * appropriate. 2065 */ 2066 static int 2067 re_resume(device_t dev) 2068 { 2069 struct re_softc *sc = device_get_softc(dev); 2070 struct ifnet *ifp = &sc->arpcom.ac_if; 2071 #ifndef BURN_BRIDGES 2072 int i; 2073 #endif 2074 2075 lwkt_serialize_enter(ifp->if_serializer); 2076 2077 #ifndef BURN_BRIDGES 2078 /* better way to do this? */ 2079 for (i = 0; i < 5; i++) 2080 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2081 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2082 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2083 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2084 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2085 2086 /* reenable busmastering */ 2087 pci_enable_busmaster(dev); 2088 pci_enable_io(dev, SYS_RES_IOPORT); 2089 #endif 2090 2091 /* reinitialize interface if necessary */ 2092 if (ifp->if_flags & IFF_UP) 2093 re_init(sc); 2094 2095 sc->re_flags &= ~RE_F_SUSPENDED; 2096 2097 lwkt_serialize_exit(ifp->if_serializer); 2098 2099 return (0); 2100 } 2101 2102 /* 2103 * Stop all chip I/O so that the kernel's probe routines don't 2104 * get confused by errant DMAs when rebooting. 2105 */ 2106 static void 2107 re_shutdown(device_t dev) 2108 { 2109 struct re_softc *sc = device_get_softc(dev); 2110 struct ifnet *ifp = &sc->arpcom.ac_if; 2111 2112 lwkt_serialize_enter(ifp->if_serializer); 2113 re_stop(sc, TRUE); 2114 rtl_hw_d3_para(sc); 2115 rtl_phy_power_down(sc); 2116 lwkt_serialize_exit(ifp->if_serializer); 2117 } 2118 2119 static int 2120 re_sysctl_rxtime(SYSCTL_HANDLER_ARGS) 2121 { 2122 struct re_softc *sc = arg1; 2123 2124 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_rx_time); 2125 } 2126 2127 static int 2128 re_sysctl_txtime(SYSCTL_HANDLER_ARGS) 2129 { 2130 struct re_softc *sc = arg1; 2131 2132 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_tx_time); 2133 } 2134 2135 static int 2136 re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *hwtime) 2137 { 2138 struct re_softc *sc = arg1; 2139 struct ifnet *ifp = &sc->arpcom.ac_if; 2140 int error, v; 2141 2142 lwkt_serialize_enter(ifp->if_serializer); 2143 2144 v = *hwtime; 2145 error = sysctl_handle_int(oidp, &v, 0, req); 2146 if (error || req->newptr == NULL) 2147 goto back; 2148 2149 if (v <= 0) { 2150 error = EINVAL; 2151 goto back; 2152 } 2153 2154 if (v != *hwtime) { 2155 *hwtime = v; 2156 2157 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2158 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_HW) 2159 re_setup_hw_im(sc); 2160 } 2161 back: 2162 lwkt_serialize_exit(ifp->if_serializer); 2163 return error; 2164 } 2165 2166 static int 2167 re_sysctl_simtime(SYSCTL_HANDLER_ARGS) 2168 { 2169 struct re_softc *sc = arg1; 2170 struct ifnet *ifp = &sc->arpcom.ac_if; 2171 int error, v; 2172 2173 lwkt_serialize_enter(ifp->if_serializer); 2174 2175 v = sc->re_sim_time; 2176 error = sysctl_handle_int(oidp, &v, 0, req); 2177 if (error || req->newptr == NULL) 2178 goto back; 2179 2180 if (v <= 0) { 2181 error = EINVAL; 2182 goto back; 2183 } 2184 2185 if (v != sc->re_sim_time) { 2186 sc->re_sim_time = v; 2187 2188 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2189 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_SIM) { 2190 #ifdef foo 2191 /* 2192 * Following code causes various strange 2193 * performance problems. Hmm ... 2194 */ 2195 sc->re_write_imr(sc, 0); 2196 CSR_WRITE_4(sc, RE_TIMERINT, 0); 2197 CSR_READ_4(sc, RE_TIMERINT); /* flush */ 2198 2199 sc->re_write_imr(sc, sc->re_intrs); 2200 re_setup_sim_im(sc); 2201 #else 2202 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 2203 DELAY(10); 2204 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 2205 #endif 2206 } 2207 } 2208 back: 2209 lwkt_serialize_exit(ifp->if_serializer); 2210 return error; 2211 } 2212 2213 static int 2214 re_sysctl_imtype(SYSCTL_HANDLER_ARGS) 2215 { 2216 struct re_softc *sc = arg1; 2217 struct ifnet *ifp = &sc->arpcom.ac_if; 2218 int error, v; 2219 2220 lwkt_serialize_enter(ifp->if_serializer); 2221 2222 v = sc->re_imtype; 2223 error = sysctl_handle_int(oidp, &v, 0, req); 2224 if (error || req->newptr == NULL) 2225 goto back; 2226 2227 if (v != RE_IMTYPE_HW && v != RE_IMTYPE_SIM && v != RE_IMTYPE_NONE) { 2228 error = EINVAL; 2229 goto back; 2230 } 2231 if (v == RE_IMTYPE_HW && (sc->re_caps & RE_C_HWIM) == 0) { 2232 /* Can't do hardware interrupt moderation */ 2233 error = EOPNOTSUPP; 2234 goto back; 2235 } 2236 2237 if (v != sc->re_imtype) { 2238 sc->re_imtype = v; 2239 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 2240 IFF_RUNNING) 2241 re_setup_intr(sc, 1, sc->re_imtype); 2242 } 2243 back: 2244 lwkt_serialize_exit(ifp->if_serializer); 2245 return error; 2246 } 2247 2248 static void 2249 re_setup_hw_im(struct re_softc *sc) 2250 { 2251 KKASSERT(sc->re_caps & RE_C_HWIM); 2252 2253 /* 2254 * Interrupt moderation 2255 * 2256 * 0xABCD 2257 * A - unknown (maybe TX related) 2258 * B - TX timer (unit: 25us) 2259 * C - unknown (maybe RX related) 2260 * D - RX timer (unit: 25us) 2261 * 2262 * 2263 * re(4)'s interrupt moderation is actually controlled by 2264 * two variables, like most other NICs (bge, bce etc.) 2265 * o timer 2266 * o number of packets [P] 2267 * 2268 * The logic relationship between these two variables is 2269 * similar to other NICs too: 2270 * if (timer expire || packets > [P]) 2271 * Interrupt is delivered 2272 * 2273 * Currently we only know how to set 'timer', but not 2274 * 'number of packets', which should be ~30, as far as I 2275 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2276 */ 2277 CSR_WRITE_2(sc, RE_IM, 2278 RE_IM_RXTIME(sc->re_rx_time) | 2279 RE_IM_TXTIME(sc->re_tx_time) | 2280 RE_IM_MAGIC); 2281 } 2282 2283 static void 2284 re_disable_hw_im(struct re_softc *sc) 2285 { 2286 if (sc->re_caps & RE_C_HWIM) 2287 CSR_WRITE_2(sc, RE_IM, 0); 2288 } 2289 2290 static void 2291 re_setup_sim_im(struct re_softc *sc) 2292 { 2293 uint32_t ticks; 2294 2295 if (sc->re_if_flags & RL_FLAG_PCIE) { 2296 ticks = sc->re_sim_time * sc->re_bus_speed; 2297 } else { 2298 /* 2299 * Datasheet says tick decreases at bus speed, 2300 * but it seems the clock runs a little bit 2301 * faster, so we do some compensation here. 2302 */ 2303 ticks = (sc->re_sim_time * sc->re_bus_speed * 8) / 5; 2304 } 2305 CSR_WRITE_4(sc, RE_TIMERINT, ticks); 2306 2307 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 2308 sc->re_flags |= RE_F_TIMER_INTR; 2309 } 2310 2311 static void 2312 re_disable_sim_im(struct re_softc *sc) 2313 { 2314 CSR_WRITE_4(sc, RE_TIMERINT, 0); 2315 sc->re_flags &= ~RE_F_TIMER_INTR; 2316 } 2317 2318 static void 2319 re_config_imtype(struct re_softc *sc, int imtype) 2320 { 2321 switch (imtype) { 2322 case RE_IMTYPE_HW: 2323 KKASSERT(sc->re_caps & RE_C_HWIM); 2324 /* FALL THROUGH */ 2325 case RE_IMTYPE_NONE: 2326 sc->re_intrs = RE_INTRS; 2327 sc->re_rx_ack = RE_ISR_RX_OK | RE_ISR_FIFO_OFLOW | 2328 RE_ISR_RX_OVERRUN; 2329 sc->re_tx_ack = RE_ISR_TX_OK; 2330 break; 2331 2332 case RE_IMTYPE_SIM: 2333 sc->re_intrs = RE_INTRS_TIMER; 2334 sc->re_rx_ack = RE_ISR_PCS_TIMEOUT; 2335 sc->re_tx_ack = RE_ISR_PCS_TIMEOUT; 2336 break; 2337 2338 default: 2339 panic("%s: unknown imtype %d", 2340 sc->arpcom.ac_if.if_xname, imtype); 2341 } 2342 } 2343 2344 static void 2345 re_setup_intr(struct re_softc *sc, int enable_intrs, int imtype) 2346 { 2347 re_config_imtype(sc, imtype); 2348 2349 if (enable_intrs) 2350 sc->re_write_imr(sc, sc->re_intrs); 2351 else 2352 sc->re_write_imr(sc, 0); 2353 2354 sc->re_npoll.ifpc_stcount = 0; 2355 2356 switch (imtype) { 2357 case RE_IMTYPE_NONE: 2358 re_disable_sim_im(sc); 2359 re_disable_hw_im(sc); 2360 break; 2361 2362 case RE_IMTYPE_HW: 2363 KKASSERT(sc->re_caps & RE_C_HWIM); 2364 re_disable_sim_im(sc); 2365 re_setup_hw_im(sc); 2366 break; 2367 2368 case RE_IMTYPE_SIM: 2369 re_disable_hw_im(sc); 2370 re_setup_sim_im(sc); 2371 break; 2372 2373 default: 2374 panic("%s: unknown imtype %d", 2375 sc->arpcom.ac_if.if_xname, imtype); 2376 } 2377 } 2378 2379 static int 2380 re_jpool_alloc(struct re_softc *sc) 2381 { 2382 struct re_list_data *ldata = &sc->re_ldata; 2383 struct re_jbuf *jbuf; 2384 bus_addr_t paddr; 2385 bus_size_t jpool_size; 2386 bus_dmamem_t dmem; 2387 caddr_t buf; 2388 int i, error; 2389 2390 lwkt_serialize_init(&ldata->re_jbuf_serializer); 2391 2392 ldata->re_jbuf = kmalloc(sizeof(struct re_jbuf) * RE_JBUF_COUNT(sc), 2393 M_DEVBUF, M_WAITOK | M_ZERO); 2394 2395 jpool_size = RE_JBUF_COUNT(sc) * RE_JBUF_SIZE; 2396 2397 error = bus_dmamem_coherent(sc->re_parent_tag, 2398 RE_RXBUF_ALIGN, 0, 2399 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2400 jpool_size, BUS_DMA_WAITOK, &dmem); 2401 if (error) { 2402 device_printf(sc->dev, "could not allocate jumbo memory\n"); 2403 return error; 2404 } 2405 ldata->re_jpool_tag = dmem.dmem_tag; 2406 ldata->re_jpool_map = dmem.dmem_map; 2407 ldata->re_jpool = dmem.dmem_addr; 2408 paddr = dmem.dmem_busaddr; 2409 2410 /* ..and split it into 9KB chunks */ 2411 SLIST_INIT(&ldata->re_jbuf_free); 2412 2413 buf = ldata->re_jpool; 2414 for (i = 0; i < RE_JBUF_COUNT(sc); i++) { 2415 jbuf = &ldata->re_jbuf[i]; 2416 2417 jbuf->re_sc = sc; 2418 jbuf->re_inuse = 0; 2419 jbuf->re_slot = i; 2420 jbuf->re_buf = buf; 2421 jbuf->re_paddr = paddr; 2422 2423 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 2424 2425 buf += RE_JBUF_SIZE; 2426 paddr += RE_JBUF_SIZE; 2427 } 2428 return 0; 2429 } 2430 2431 static void 2432 re_jpool_free(struct re_softc *sc) 2433 { 2434 struct re_list_data *ldata = &sc->re_ldata; 2435 2436 if (ldata->re_jpool_tag != NULL) { 2437 bus_dmamap_unload(ldata->re_jpool_tag, ldata->re_jpool_map); 2438 bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool, 2439 ldata->re_jpool_map); 2440 bus_dma_tag_destroy(ldata->re_jpool_tag); 2441 ldata->re_jpool_tag = NULL; 2442 } 2443 2444 if (ldata->re_jbuf != NULL) { 2445 kfree(ldata->re_jbuf, M_DEVBUF); 2446 ldata->re_jbuf = NULL; 2447 } 2448 } 2449 2450 #ifdef RE_JUMBO 2451 static struct re_jbuf * 2452 re_jbuf_alloc(struct re_softc *sc) 2453 { 2454 struct re_list_data *ldata = &sc->re_ldata; 2455 struct re_jbuf *jbuf; 2456 2457 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 2458 2459 jbuf = SLIST_FIRST(&ldata->re_jbuf_free); 2460 if (jbuf != NULL) { 2461 SLIST_REMOVE_HEAD(&ldata->re_jbuf_free, re_link); 2462 jbuf->re_inuse = 1; 2463 } 2464 2465 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 2466 2467 return jbuf; 2468 } 2469 2470 static void 2471 re_jbuf_free(void *arg) 2472 { 2473 struct re_jbuf *jbuf = arg; 2474 struct re_softc *sc = jbuf->re_sc; 2475 struct re_list_data *ldata = &sc->re_ldata; 2476 2477 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 2478 panic("%s: free wrong jumbo buffer", 2479 sc->arpcom.ac_if.if_xname); 2480 } else if (jbuf->re_inuse == 0) { 2481 panic("%s: jumbo buffer already freed", 2482 sc->arpcom.ac_if.if_xname); 2483 } 2484 2485 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 2486 atomic_subtract_int(&jbuf->re_inuse, 1); 2487 if (jbuf->re_inuse == 0) 2488 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 2489 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 2490 } 2491 2492 static void 2493 re_jbuf_ref(void *arg) 2494 { 2495 struct re_jbuf *jbuf = arg; 2496 struct re_softc *sc = jbuf->re_sc; 2497 struct re_list_data *ldata = &sc->re_ldata; 2498 2499 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 2500 panic("%s: ref wrong jumbo buffer", 2501 sc->arpcom.ac_if.if_xname); 2502 } else if (jbuf->re_inuse == 0) { 2503 panic("%s: jumbo buffer already freed", 2504 sc->arpcom.ac_if.if_xname); 2505 } 2506 atomic_add_int(&jbuf->re_inuse, 1); 2507 } 2508 #endif /* RE_JUMBO */ 2509 2510 static void 2511 re_disable_aspm(device_t dev) 2512 { 2513 uint16_t link_cap, link_ctrl; 2514 uint8_t pcie_ptr, reg; 2515 2516 pcie_ptr = pci_get_pciecap_ptr(dev); 2517 if (pcie_ptr == 0) 2518 return; 2519 2520 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 2521 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 2522 return; 2523 2524 if (bootverbose) 2525 device_printf(dev, "disable ASPM\n"); 2526 2527 reg = pcie_ptr + PCIER_LINKCTRL; 2528 link_ctrl = pci_read_config(dev, reg, 2); 2529 link_ctrl &= ~(PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1); 2530 pci_write_config(dev, reg, link_ctrl, 2); 2531 } 2532 2533 static void 2534 re_start_xmit(struct re_softc *sc) 2535 { 2536 CSR_WRITE_1(sc, RE_TPPOLL, RE_NPQ); 2537 } 2538 2539 static void 2540 re_write_imr(struct re_softc *sc, uint32_t val) 2541 { 2542 CSR_WRITE_2(sc, RE_IMR, val); 2543 } 2544 2545 static void 2546 re_write_isr(struct re_softc *sc, uint32_t val) 2547 { 2548 CSR_WRITE_2(sc, RE_ISR, val); 2549 } 2550 2551 static uint32_t 2552 re_read_isr(struct re_softc *sc) 2553 { 2554 return CSR_READ_2(sc, RE_ISR); 2555 } 2556 2557 static void 2558 re_start_xmit_8125(struct re_softc *sc) 2559 { 2560 CSR_WRITE_2(sc, RE_TPPOLL_8125, RE_NPQ_8125); 2561 } 2562 2563 static void 2564 re_write_imr_8125(struct re_softc *sc, uint32_t val) 2565 { 2566 CSR_WRITE_4(sc, RE_IMR0_8125, val); 2567 } 2568 2569 static void 2570 re_write_isr_8125(struct re_softc *sc, uint32_t val) 2571 { 2572 CSR_WRITE_4(sc, RE_ISR0_8125, val); 2573 } 2574 2575 static uint32_t 2576 re_read_isr_8125(struct re_softc *sc) 2577 { 2578 return CSR_READ_4(sc, RE_ISR0_8125); 2579 } 2580