Lines Matching refs:txr

306 igb_tx_intr(struct igb_tx_ring *txr, int hdr)  in igb_tx_intr()  argument
309 igb_txeof(txr, hdr); in igb_tx_intr()
310 if (!ifsq_is_empty(txr->ifsq)) in igb_tx_intr()
311 ifsq_devstart(txr->ifsq); in igb_tx_intr()
315 igb_try_txgc(struct igb_tx_ring *txr, int16_t dec) in igb_try_txgc() argument
318 if (txr->tx_running > 0) { in igb_try_txgc()
319 txr->tx_running -= dec; in igb_try_txgc()
320 if (txr->tx_running <= 0 && txr->tx_nmbuf && in igb_try_txgc()
321 txr->tx_avail < txr->num_tx_desc && in igb_try_txgc()
322 txr->tx_avail + txr->intr_nsegs > txr->num_tx_desc) in igb_try_txgc()
323 igb_txgc(txr); in igb_try_txgc()
330 struct igb_tx_ring *txr = xtxr; in igb_txgc_timer() local
331 struct ifnet *ifp = &txr->sc->arpcom.ac_if; in igb_txgc_timer()
337 if (!lwkt_serialize_try(&txr->tx_serialize)) in igb_txgc_timer()
342 lwkt_serialize_exit(&txr->tx_serialize); in igb_txgc_timer()
345 igb_try_txgc(txr, IGB_TX_RUNNING_DEC); in igb_txgc_timer()
347 lwkt_serialize_exit(&txr->tx_serialize); in igb_txgc_timer()
349 callout_reset(&txr->tx_gc_timer, 1, igb_txgc_timer, txr); in igb_txgc_timer()
353 igb_free_txbuf(struct igb_tx_ring *txr, struct igb_tx_buf *txbuf) in igb_free_txbuf() argument
357 KKASSERT(txr->tx_nmbuf > 0); in igb_free_txbuf()
358 txr->tx_nmbuf--; in igb_free_txbuf()
360 bus_dmamap_unload(txr->tx_tag, txbuf->map); in igb_free_txbuf()
1066 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_init() local
1068 ifsq_clr_oactive(txr->ifsq); in igb_init()
1069 ifsq_watchdog_start(&txr->tx_watchdog); in igb_init()
1072 callout_reset_bycpu(&txr->tx_gc_timer, 1, in igb_init()
1073 igb_txgc_timer, txr, txr->tx_intr_cpuid); in igb_init()
1440 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_stop() local
1442 ifsq_clr_oactive(txr->ifsq); in igb_stop()
1443 ifsq_watchdog_stop(&txr->tx_watchdog); in igb_stop()
1444 txr->tx_flags &= ~IGB_TXFLAG_ENABLED; in igb_stop()
1446 txr->tx_running = 0; in igb_stop()
1447 callout_stop(&txr->tx_gc_timer); in igb_stop()
1625 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_setup_ifp() local
1627 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); in igb_setup_ifp()
1628 ifsq_set_priv(ifsq, txr); in igb_setup_ifp()
1629 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); in igb_setup_ifp()
1630 txr->ifsq = ifsq; in igb_setup_ifp()
1632 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog, 0); in igb_setup_ifp()
1747 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_add_sysctl() local
1752 CTLFLAG_RW, &txr->tx_packets, "TXed packets"); in igb_add_sysctl()
1756 CTLFLAG_RD, &txr->tx_nmbuf, 0, "# of pending TX mbufs"); in igb_add_sysctl()
1760 CTLFLAG_RW, &txr->tx_gc, "# of TX desc GC"); in igb_add_sysctl()
1792 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_alloc_rings() local
1795 txr->sc = sc; in igb_alloc_rings()
1796 txr->me = i; in igb_alloc_rings()
1797 txr->tx_intr_cpuid = -1; in igb_alloc_rings()
1798 lwkt_serialize_init(&txr->tx_serialize); in igb_alloc_rings()
1799 callout_init_mp(&txr->tx_gc_timer); in igb_alloc_rings()
1801 error = igb_create_tx_ring(txr); in igb_alloc_rings()
1835 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_free_rings() local
1837 igb_destroy_tx_ring(txr, txr->num_tx_desc); in igb_free_rings()
1853 igb_create_tx_ring(struct igb_tx_ring *txr) in igb_create_tx_ring() argument
1861 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); in igb_create_tx_ring()
1864 device_printf(txr->sc->dev, in igb_create_tx_ring()
1867 txr->num_tx_desc = IGB_DEFAULT_TXD; in igb_create_tx_ring()
1869 txr->num_tx_desc = ntxd; in igb_create_tx_ring()
1875 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), in igb_create_tx_ring()
1877 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, in igb_create_tx_ring()
1879 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); in igb_create_tx_ring()
1880 if (txr->txdma.dma_vaddr == NULL) { in igb_create_tx_ring()
1881 device_printf(txr->sc->dev, in igb_create_tx_ring()
1885 txr->tx_base = txr->txdma.dma_vaddr; in igb_create_tx_ring()
1886 bzero(txr->tx_base, tsize); in igb_create_tx_ring()
1889 sizeof(struct igb_tx_buf) * txr->num_tx_desc); in igb_create_tx_ring()
1890 txr->tx_buf = kmalloc(tsize, M_DEVBUF, in igb_create_tx_ring()
1896 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, in igb_create_tx_ring()
1898 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); in igb_create_tx_ring()
1899 if (txr->tx_hdr == NULL) { in igb_create_tx_ring()
1900 device_printf(txr->sc->dev, in igb_create_tx_ring()
1908 error = bus_dma_tag_create(txr->sc->parent_tag, in igb_create_tx_ring()
1917 &txr->tx_tag); in igb_create_tx_ring()
1919 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); in igb_create_tx_ring()
1920 kfree(txr->tx_buf, M_DEVBUF); in igb_create_tx_ring()
1921 txr->tx_buf = NULL; in igb_create_tx_ring()
1928 for (i = 0; i < txr->num_tx_desc; ++i) { in igb_create_tx_ring()
1929 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; in igb_create_tx_ring()
1931 error = bus_dmamap_create(txr->tx_tag, in igb_create_tx_ring()
1934 device_printf(txr->sc->dev, in igb_create_tx_ring()
1936 igb_destroy_tx_ring(txr, i); in igb_create_tx_ring()
1941 if (txr->sc->hw.mac.type == e1000_82575) in igb_create_tx_ring()
1942 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0; in igb_create_tx_ring()
1947 if (txr->sc->hw.mac.type == e1000_82575) { in igb_create_tx_ring()
1953 txr->intr_nsegs = 1; in igb_create_tx_ring()
1955 txr->intr_nsegs = txr->num_tx_desc / 16; in igb_create_tx_ring()
1957 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS; in igb_create_tx_ring()
1963 igb_free_tx_ring(struct igb_tx_ring *txr) in igb_free_tx_ring() argument
1967 for (i = 0; i < txr->num_tx_desc; ++i) { in igb_free_tx_ring()
1968 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; in igb_free_tx_ring()
1971 igb_free_txbuf(txr, txbuf); in igb_free_tx_ring()
1976 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) in igb_destroy_tx_ring() argument
1980 if (txr->txdma.dma_vaddr != NULL) { in igb_destroy_tx_ring()
1981 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); in igb_destroy_tx_ring()
1982 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, in igb_destroy_tx_ring()
1983 txr->txdma.dma_map); in igb_destroy_tx_ring()
1984 bus_dma_tag_destroy(txr->txdma.dma_tag); in igb_destroy_tx_ring()
1985 txr->txdma.dma_vaddr = NULL; in igb_destroy_tx_ring()
1988 if (txr->tx_hdr != NULL) { in igb_destroy_tx_ring()
1989 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); in igb_destroy_tx_ring()
1990 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, in igb_destroy_tx_ring()
1991 txr->tx_hdr_dmap); in igb_destroy_tx_ring()
1992 bus_dma_tag_destroy(txr->tx_hdr_dtag); in igb_destroy_tx_ring()
1993 txr->tx_hdr = NULL; in igb_destroy_tx_ring()
1996 if (txr->tx_buf == NULL) in igb_destroy_tx_ring()
2000 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; in igb_destroy_tx_ring()
2003 bus_dmamap_destroy(txr->tx_tag, txbuf->map); in igb_destroy_tx_ring()
2005 bus_dma_tag_destroy(txr->tx_tag); in igb_destroy_tx_ring()
2007 kfree(txr->tx_buf, M_DEVBUF); in igb_destroy_tx_ring()
2008 txr->tx_buf = NULL; in igb_destroy_tx_ring()
2012 igb_init_tx_ring(struct igb_tx_ring *txr) in igb_init_tx_ring() argument
2015 bzero(txr->tx_base, in igb_init_tx_ring()
2016 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); in igb_init_tx_ring()
2019 *(txr->tx_hdr) = 0; in igb_init_tx_ring()
2022 txr->next_avail_desc = 0; in igb_init_tx_ring()
2023 txr->next_to_clean = 0; in igb_init_tx_ring()
2024 txr->tx_nsegs = 0; in igb_init_tx_ring()
2025 txr->tx_running = 0; in igb_init_tx_ring()
2026 txr->tx_nmbuf = 0; in igb_init_tx_ring()
2029 txr->tx_avail = txr->num_tx_desc; in igb_init_tx_ring()
2032 txr->tx_flags |= IGB_TXFLAG_ENABLED; in igb_init_tx_ring()
2044 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_init_tx_unit() local
2045 uint64_t bus_addr = txr->txdma.dma_paddr; in igb_init_tx_unit()
2046 uint64_t hdr_paddr = txr->tx_hdr_paddr; in igb_init_tx_unit()
2051 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); in igb_init_tx_unit()
2104 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) in igb_txcsum_ctx() argument
2116 ctxd = txr->next_avail_desc; in igb_txcsum_ctx()
2117 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; in igb_txcsum_ctx()
2155 if (txr->sc->hw.mac.type == e1000_82575) in igb_txcsum_ctx()
2156 mss_l4len_idx = txr->me << 4; in igb_txcsum_ctx()
2165 if (++ctxd == txr->num_tx_desc) in igb_txcsum_ctx()
2167 txr->next_avail_desc = ctxd; in igb_txcsum_ctx()
2168 --txr->tx_avail; in igb_txcsum_ctx()
2174 igb_txeof(struct igb_tx_ring *txr, int hdr) in igb_txeof() argument
2178 if (txr->tx_avail == txr->num_tx_desc) in igb_txeof()
2181 first = txr->next_to_clean; in igb_txeof()
2185 avail = txr->tx_avail; in igb_txeof()
2187 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; in igb_txeof()
2189 KKASSERT(avail < txr->num_tx_desc); in igb_txeof()
2193 igb_free_txbuf(txr, txbuf); in igb_txeof()
2195 if (++first == txr->num_tx_desc) in igb_txeof()
2198 txr->next_to_clean = first; in igb_txeof()
2199 txr->tx_avail = avail; in igb_txeof()
2205 if (txr->tx_avail > IGB_MAX_SCATTER + IGB_TX_RESERVED) { in igb_txeof()
2206 ifsq_clr_oactive(txr->ifsq); in igb_txeof()
2214 ifsq_watchdog_set_count(&txr->tx_watchdog, 0); in igb_txeof()
2216 txr->tx_running = IGB_TX_RUNNING; in igb_txeof()
2220 igb_txgc(struct igb_tx_ring *txr) in igb_txgc() argument
2227 if (txr->tx_avail == txr->num_tx_desc) in igb_txgc()
2230 hdr = E1000_READ_REG(&txr->sc->hw, E1000_TDH(txr->me)), in igb_txgc()
2231 first = txr->next_to_clean; in igb_txgc()
2234 txr->tx_gc++; in igb_txgc()
2237 avail = txr->tx_avail; in igb_txgc()
2240 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; in igb_txgc()
2243 KKASSERT(avail < txr->num_tx_desc); in igb_txgc()
2247 igb_free_txbuf(txr, txbuf); in igb_txgc()
2249 if (++first == txr->num_tx_desc) in igb_txgc()
2253 if (txr->tx_nmbuf) in igb_txgc()
2254 txr->tx_running = IGB_TX_RUNNING; in igb_txgc()
3358 struct igb_tx_ring *txr = arg; in igb_npoll_tx() local
3360 ASSERT_SERIALIZED(&txr->tx_serialize); in igb_npoll_tx()
3361 igb_tx_intr(txr, *(txr->tx_hdr)); in igb_npoll_tx()
3362 igb_try_txgc(txr, 1); in igb_npoll_tx()
3391 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_npoll() local
3396 info->ifpi_tx[cpu].arg = txr; in igb_npoll()
3397 info->ifpi_tx[cpu].serializer = &txr->tx_serialize; in igb_npoll()
3398 ifsq_set_cpuid(txr->ifsq, cpu); in igb_npoll()
3413 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_npoll() local
3415 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); in igb_npoll()
3439 struct igb_tx_ring *txr = &sc->tx_rings[0]; in igb_intr() local
3452 if (eicr & txr->tx_intr_mask) { in igb_intr()
3453 lwkt_serialize_enter(&txr->tx_serialize); in igb_intr()
3454 igb_tx_intr(txr, *(txr->tx_hdr)); in igb_intr()
3455 lwkt_serialize_exit(&txr->tx_serialize); in igb_intr()
3513 struct igb_tx_ring *txr = &sc->tx_rings[0]; in igb_intr_shared() local
3515 lwkt_serialize_enter(&txr->tx_serialize); in igb_intr_shared()
3516 igb_tx_intr(txr, *(txr->tx_hdr)); in igb_intr_shared()
3517 lwkt_serialize_exit(&txr->tx_serialize); in igb_intr_shared()
3532 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, in igb_encap() argument
3545 error = igb_tso_pullup(txr, m_headp); in igb_encap()
3560 tx_buf = &txr->tx_buf[txr->next_avail_desc]; in igb_encap()
3564 maxsegs = txr->tx_avail - IGB_TX_RESERVED; in igb_encap()
3568 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, in igb_encap()
3572 txr->sc->mbuf_defrag_failed++; in igb_encap()
3574 txr->sc->no_tx_dma_setup++; in igb_encap()
3580 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); in igb_encap()
3593 igb_tso_ctx(txr, m_head, &hdrlen); in igb_encap()
3597 txr->tx_nsegs++; in igb_encap()
3599 } else if (igb_txcsum_ctx(txr, m_head)) { in igb_encap()
3604 txr->tx_nsegs++; in igb_encap()
3609 txr->tx_nsegs += nsegs; in igb_encap()
3610 if (txr->tx_nsegs >= txr->intr_nsegs) { in igb_encap()
3615 txr->tx_nsegs = 0; in igb_encap()
3627 if (txr->sc->hw.mac.type == e1000_82575) in igb_encap()
3628 olinfo_status |= txr->me << 4; in igb_encap()
3631 i = txr->next_avail_desc; in igb_encap()
3636 tx_buf = &txr->tx_buf[i]; in igb_encap()
3637 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; in igb_encap()
3644 if (++i == txr->num_tx_desc) in igb_encap()
3649 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); in igb_encap()
3650 txr->next_avail_desc = i; in igb_encap()
3651 txr->tx_avail -= nsegs; in igb_encap()
3652 txr->tx_nmbuf++; in igb_encap()
3668 ++txr->tx_packets; in igb_encap()
3678 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); in igb_start() local
3682 KKASSERT(txr->ifsq == ifsq); in igb_start()
3683 ASSERT_SERIALIZED(&txr->tx_serialize); in igb_start()
3688 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) { in igb_start()
3694 if (txr->tx_avail <= IGB_MAX_SCATTER + IGB_TX_RESERVED) { in igb_start()
3697 ifsq_watchdog_set_count(&txr->tx_watchdog, 5); in igb_start()
3705 if (igb_encap(txr, &m_head, &nsegs, &idx)) { in igb_start()
3718 if (nsegs >= txr->wreg_nsegs) { in igb_start()
3719 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); in igb_start()
3728 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); in igb_start()
3729 txr->tx_running = IGB_TX_RUNNING; in igb_start()
3735 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); in igb_watchdog() local
3740 KKASSERT(txr->ifsq == ifsq); in igb_watchdog()
3749 ifsq_watchdog_set_count(&txr->tx_watchdog, 5); in igb_watchdog()
3754 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, in igb_watchdog()
3755 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), in igb_watchdog()
3756 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); in igb_watchdog()
3759 txr->me, txr->tx_avail, txr->next_to_clean); in igb_watchdog()
3863 struct igb_tx_ring *txr = &sc->tx_rings[0]; in igb_sysctl_tx_intr_nsegs() local
3866 nsegs = txr->intr_nsegs; in igb_sysctl_tx_intr_nsegs()
3875 if (nsegs >= txr->num_tx_desc - IGB_MAX_SCATTER - IGB_TX_RESERVED) { in igb_sysctl_tx_intr_nsegs()
3949 const struct igb_tx_ring *txr; in igb_init_unshared_intr() local
4048 txr = &sc->tx_rings[i]; in igb_init_unshared_intr()
4056 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; in igb_init_unshared_intr()
4060 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; in igb_init_unshared_intr()
4091 txr = &sc->tx_rings[i]; in igb_init_unshared_intr()
4099 (txr->tx_intr_vec | E1000_IVAR_VALID) << 8; in igb_init_unshared_intr()
4103 (txr->tx_intr_vec | E1000_IVAR_VALID) << 24; in igb_init_unshared_intr()
4155 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_vec0, int intr_vecmax) in igb_set_txintr_mask() argument
4157 if (txr->sc->hw.mac.type == e1000_82575) { in igb_set_txintr_mask()
4158 txr->tx_intr_vec = 0; /* unused */ in igb_set_txintr_mask()
4159 switch (txr->me) { in igb_set_txintr_mask()
4161 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; in igb_set_txintr_mask()
4164 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; in igb_set_txintr_mask()
4167 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; in igb_set_txintr_mask()
4170 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; in igb_set_txintr_mask()
4173 panic("unsupported # of TX ring, %d\n", txr->me); in igb_set_txintr_mask()
4178 txr->tx_intr_vec = intr_vec % intr_vecmax; in igb_set_txintr_mask()
4179 txr->tx_intr_mask = 1 << txr->tx_intr_vec; in igb_set_txintr_mask()
4274 struct igb_tx_ring *txr; in igb_alloc_intr() local
4301 txr = &sc->tx_rings[i]; in igb_alloc_intr()
4302 txr->tx_intr_vec = 0; in igb_alloc_intr()
4303 txr->tx_intr_mask = 0; in igb_alloc_intr()
4304 txr->tx_intr_cpuid = -1; in igb_alloc_intr()
4415 txr = &sc->tx_rings[i]; in igb_alloc_intr()
4416 if (txr->tx_intr_cpuid < 0) in igb_alloc_intr()
4417 txr->tx_intr_cpuid = 0; in igb_alloc_intr()
4562 struct igb_tx_ring *txr = NULL; in igb_alloc_msix() local
4577 txr = &sc->tx_rings[j]; in igb_alloc_msix()
4578 KKASSERT(txr->tx_intr_cpuid < 0); in igb_alloc_msix()
4582 rxr->rx_txr = txr; in igb_alloc_msix()
4589 if (txr != NULL) { in igb_alloc_msix()
4594 i, txr->me); in igb_alloc_msix()
4596 txr->tx_intr_vec = rxr->rx_intr_vec; in igb_alloc_msix()
4597 txr->tx_intr_mask = rxr->rx_intr_mask; in igb_alloc_msix()
4598 txr->tx_intr_cpuid = intr->intr_cpuid; in igb_alloc_msix()
4611 struct igb_tx_ring *txr = &sc->tx_rings[i]; in igb_alloc_msix() local
4613 if (txr->tx_intr_cpuid >= 0) { in igb_alloc_msix()
4619 txr->tx_intr_vec = x; in igb_alloc_msix()
4620 txr->tx_intr_mask = 1 << txr->tx_intr_vec; in igb_alloc_msix()
4623 intr->intr_serialize = &txr->tx_serialize; in igb_alloc_msix()
4625 intr->intr_funcarg = txr; in igb_alloc_msix()
4631 txr->tx_intr_cpuid = intr->intr_cpuid; in igb_alloc_msix()
4733 struct igb_tx_ring *txr = arg; in igb_msix_tx() local
4735 ASSERT_SERIALIZED(&txr->tx_serialize); in igb_msix_tx()
4737 igb_tx_intr(txr, *(txr->tx_hdr)); in igb_msix_tx()
4738 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); in igb_msix_tx()
4799 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) in igb_tso_pullup() argument
4823 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) { in igb_tso_pullup()
4834 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) in igb_tso_ctx() argument
4846 ctxd = txr->next_avail_desc; in igb_tso_ctx()
4847 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; in igb_tso_ctx()
4870 if (txr->sc->hw.mac.type == e1000_82575) in igb_tso_ctx()
4871 mss_l4len_idx |= txr->me << 4; in igb_tso_ctx()
4879 if (++ctxd == txr->num_tx_desc) in igb_tso_ctx()
4881 txr->next_avail_desc = ctxd; in igb_tso_ctx()
4882 --txr->tx_avail; in igb_tso_ctx()
4924 struct igb_tx_ring *txr; in igb_msix_rxtx() local
4937 txr = rxr->rx_txr; in igb_msix_rxtx()
4938 hdr = *(txr->tx_hdr); in igb_msix_rxtx()
4939 if (hdr != txr->next_to_clean) { in igb_msix_rxtx()
4940 lwkt_serialize_enter(&txr->tx_serialize); in igb_msix_rxtx()
4941 igb_tx_intr(txr, hdr); in igb_msix_rxtx()
4942 lwkt_serialize_exit(&txr->tx_serialize); in igb_msix_rxtx()