1e948693eSPhilip Paeps /*- 2929c7febSAndrew Rybchenko * Copyright (c) 2010-2016 Solarflare Communications Inc. 3e948693eSPhilip Paeps * All rights reserved. 4e948693eSPhilip Paeps * 5e948693eSPhilip Paeps * This software was developed in part by Philip Paeps under contract for 6e948693eSPhilip Paeps * Solarflare Communications, Inc. 7e948693eSPhilip Paeps * 8e948693eSPhilip Paeps * Redistribution and use in source and binary forms, with or without 93c838a9fSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 10e948693eSPhilip Paeps * 113c838a9fSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 123c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer. 133c838a9fSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 143c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 153c838a9fSAndrew Rybchenko * and/or other materials provided with the distribution. 163c838a9fSAndrew Rybchenko * 173c838a9fSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 183c838a9fSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 193c838a9fSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 203c838a9fSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 213c838a9fSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 223c838a9fSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 233c838a9fSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 243c838a9fSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 253c838a9fSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 263c838a9fSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 273c838a9fSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 283c838a9fSAndrew Rybchenko * 293c838a9fSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 303c838a9fSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 313c838a9fSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 32e948693eSPhilip Paeps */ 33e948693eSPhilip Paeps 34cf07c70dSGeorge V. Neville-Neil /* Theory of operation: 35cf07c70dSGeorge V. Neville-Neil * 36cf07c70dSGeorge V. Neville-Neil * Tx queues allocation and mapping 37cf07c70dSGeorge V. Neville-Neil * 38cf07c70dSGeorge V. Neville-Neil * One Tx queue with enabled checksum offload is allocated per Rx channel 39cf07c70dSGeorge V. Neville-Neil * (event queue). Also 2 Tx queues (one without checksum offload and one 40cf07c70dSGeorge V. Neville-Neil * with IP checksum offload only) are allocated and bound to event queue 0. 41cf07c70dSGeorge V. Neville-Neil * sfxge_txq_type is used as Tx queue label. 42cf07c70dSGeorge V. Neville-Neil * 43cf07c70dSGeorge V. Neville-Neil * So, event queue plus label mapping to Tx queue index is: 44cf07c70dSGeorge V. Neville-Neil * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 45cf07c70dSGeorge V. Neville-Neil * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 46cf07c70dSGeorge V. Neville-Neil * See sfxge_get_txq_by_label() sfxge_ev.c 47cf07c70dSGeorge V. Neville-Neil */ 48cf07c70dSGeorge V. Neville-Neil 49e948693eSPhilip Paeps #include <sys/cdefs.h> 50e948693eSPhilip Paeps __FBSDID("$FreeBSD$"); 51e948693eSPhilip Paeps 523bbc1e08SAndrew Rybchenko #include "opt_rss.h" 533bbc1e08SAndrew Rybchenko 548ec07310SGleb Smirnoff #include <sys/param.h> 558ec07310SGleb Smirnoff #include <sys/malloc.h> 56e948693eSPhilip Paeps #include <sys/mbuf.h> 57e948693eSPhilip Paeps #include <sys/smp.h> 58e948693eSPhilip Paeps #include <sys/socket.h> 59e948693eSPhilip Paeps #include <sys/sysctl.h> 60060a95efSGeorge V. Neville-Neil #include <sys/syslog.h> 61a45a0da1SAndrew Rybchenko #include <sys/limits.h> 62e948693eSPhilip Paeps 63e948693eSPhilip Paeps #include <net/bpf.h> 64e948693eSPhilip Paeps #include <net/ethernet.h> 65e948693eSPhilip Paeps #include <net/if.h> 66e948693eSPhilip Paeps #include <net/if_vlan_var.h> 67e948693eSPhilip Paeps 68e948693eSPhilip Paeps #include <netinet/in.h> 69e948693eSPhilip Paeps #include <netinet/ip.h> 70e948693eSPhilip Paeps #include <netinet/ip6.h> 71e948693eSPhilip Paeps #include <netinet/tcp.h> 72e948693eSPhilip Paeps 733bbc1e08SAndrew Rybchenko #ifdef RSS 743bbc1e08SAndrew Rybchenko #include <net/rss_config.h> 753bbc1e08SAndrew Rybchenko #endif 763bbc1e08SAndrew Rybchenko 77e948693eSPhilip Paeps #include "common/efx.h" 78e948693eSPhilip Paeps 79e948693eSPhilip Paeps #include "sfxge.h" 80e948693eSPhilip Paeps #include "sfxge_tx.h" 81e948693eSPhilip Paeps 82060a95efSGeorge V. Neville-Neil 83060a95efSGeorge V. Neville-Neil #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 84060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 85060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 86060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 87060a95efSGeorge V. Neville-Neil &sfxge_tx_dpl_get_max, 0, 8893929f25SAndrew Rybchenko "Maximum number of any packets in deferred packet get-list"); 8993929f25SAndrew Rybchenko 9093929f25SAndrew Rybchenko #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 9193929f25SAndrew Rybchenko SFXGE_PARAM(tx_dpl_get_non_tcp_max) 9293929f25SAndrew Rybchenko static int sfxge_tx_dpl_get_non_tcp_max = 9393929f25SAndrew Rybchenko SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 9493929f25SAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 9593929f25SAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 9693929f25SAndrew Rybchenko &sfxge_tx_dpl_get_non_tcp_max, 0, 9793929f25SAndrew Rybchenko "Maximum number of non-TCP packets in deferred packet get-list"); 98060a95efSGeorge V. Neville-Neil 99060a95efSGeorge V. Neville-Neil #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 100060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 101060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 102060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 103060a95efSGeorge V. Neville-Neil &sfxge_tx_dpl_put_max, 0, 10493929f25SAndrew Rybchenko "Maximum number of any packets in deferred packet put-list"); 105060a95efSGeorge V. Neville-Neil 1063c838a9fSAndrew Rybchenko #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) 107a45a0da1SAndrew Rybchenko static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); 1083c838a9fSAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); 1093c838a9fSAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, 1103c838a9fSAndrew Rybchenko &sfxge_tso_fw_assisted, 0, 111a45a0da1SAndrew Rybchenko "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); 1123c838a9fSAndrew Rybchenko 113060a95efSGeorge V. Neville-Neil 114f6222d7bSAndrew Rybchenko static const struct { 115f6222d7bSAndrew Rybchenko const char *name; 116f6222d7bSAndrew Rybchenko size_t offset; 117f6222d7bSAndrew Rybchenko } sfxge_tx_stats[] = { 118f6222d7bSAndrew Rybchenko #define SFXGE_TX_STAT(name, member) \ 119f6222d7bSAndrew Rybchenko { #name, offsetof(struct sfxge_txq, member) } 120f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_bursts, tso_bursts), 121f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_packets, tso_packets), 122f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 123f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 124f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 125f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_collapses, collapses), 126f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_drops, drops), 127f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_get_overflow, get_overflow), 128f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 129f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_put_overflow, put_overflow), 130f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 131f6222d7bSAndrew Rybchenko }; 132f6222d7bSAndrew Rybchenko 133f6222d7bSAndrew Rybchenko 134e948693eSPhilip Paeps /* Forward declarations. */ 1350b28bbdcSAndrew Rybchenko static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 136e948693eSPhilip Paeps static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 137e948693eSPhilip Paeps static void sfxge_tx_qunblock(struct sfxge_txq *txq); 138e948693eSPhilip Paeps static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1393c838a9fSAndrew Rybchenko const bus_dma_segment_t *dma_seg, int n_dma_seg, 1403c838a9fSAndrew Rybchenko int vlan_tagged); 1413c838a9fSAndrew Rybchenko 1423c838a9fSAndrew Rybchenko static int 1433c838a9fSAndrew Rybchenko sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf) 1443c838a9fSAndrew Rybchenko { 1453c838a9fSAndrew Rybchenko uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? 1463c838a9fSAndrew Rybchenko mbuf->m_pkthdr.ether_vtag : 1473c838a9fSAndrew Rybchenko 0); 1483c838a9fSAndrew Rybchenko 1493c838a9fSAndrew Rybchenko if (this_tag == txq->hw_vlan_tci) 1503c838a9fSAndrew Rybchenko return (0); 1513c838a9fSAndrew Rybchenko 1523c838a9fSAndrew Rybchenko efx_tx_qdesc_vlantci_create(txq->common, 1533c838a9fSAndrew Rybchenko bswap16(this_tag), 1543c838a9fSAndrew Rybchenko &txq->pend_desc[0]); 1553c838a9fSAndrew Rybchenko txq->n_pend_desc = 1; 1563c838a9fSAndrew Rybchenko txq->hw_vlan_tci = this_tag; 1573c838a9fSAndrew Rybchenko return (1); 1583c838a9fSAndrew Rybchenko } 1593c838a9fSAndrew Rybchenko 1603c838a9fSAndrew Rybchenko static inline void 1613c838a9fSAndrew Rybchenko sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) 1623c838a9fSAndrew Rybchenko { 1633c838a9fSAndrew Rybchenko KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); 1643c838a9fSAndrew Rybchenko if (__predict_false(*pstmp == 1653c838a9fSAndrew Rybchenko &txq->stmp[txq->ptr_mask])) 1663c838a9fSAndrew Rybchenko *pstmp = &txq->stmp[0]; 1673c838a9fSAndrew Rybchenko else 1683c838a9fSAndrew Rybchenko (*pstmp)++; 1693c838a9fSAndrew Rybchenko } 1703c838a9fSAndrew Rybchenko 171e948693eSPhilip Paeps 172e948693eSPhilip Paeps void 173cc933626SAndrew Rybchenko sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 174e948693eSPhilip Paeps { 175e948693eSPhilip Paeps unsigned int completed; 176e948693eSPhilip Paeps 177763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 178e948693eSPhilip Paeps 179e948693eSPhilip Paeps completed = txq->completed; 180e948693eSPhilip Paeps while (completed != txq->pending) { 181e948693eSPhilip Paeps struct sfxge_tx_mapping *stmp; 182e948693eSPhilip Paeps unsigned int id; 183e948693eSPhilip Paeps 184385b1d8eSGeorge V. Neville-Neil id = completed++ & txq->ptr_mask; 185e948693eSPhilip Paeps 186e948693eSPhilip Paeps stmp = &txq->stmp[id]; 187e948693eSPhilip Paeps if (stmp->flags & TX_BUF_UNMAP) { 188e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 189e948693eSPhilip Paeps if (stmp->flags & TX_BUF_MBUF) { 190e948693eSPhilip Paeps struct mbuf *m = stmp->u.mbuf; 191e948693eSPhilip Paeps do 192e948693eSPhilip Paeps m = m_free(m); 193e948693eSPhilip Paeps while (m != NULL); 194e948693eSPhilip Paeps } else { 195e948693eSPhilip Paeps free(stmp->u.heap_buf, M_SFXGE); 196e948693eSPhilip Paeps } 197e948693eSPhilip Paeps stmp->flags = 0; 198e948693eSPhilip Paeps } 199e948693eSPhilip Paeps } 200e948693eSPhilip Paeps txq->completed = completed; 201e948693eSPhilip Paeps 202e948693eSPhilip Paeps /* Check whether we need to unblock the queue. */ 203e948693eSPhilip Paeps mb(); 204e948693eSPhilip Paeps if (txq->blocked) { 205e948693eSPhilip Paeps unsigned int level; 206e948693eSPhilip Paeps 207e948693eSPhilip Paeps level = txq->added - txq->completed; 208385b1d8eSGeorge V. Neville-Neil if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 209e948693eSPhilip Paeps sfxge_tx_qunblock(txq); 210e948693eSPhilip Paeps } 211e948693eSPhilip Paeps } 212e948693eSPhilip Paeps 2130b28bbdcSAndrew Rybchenko static unsigned int 21493929f25SAndrew Rybchenko sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 21593929f25SAndrew Rybchenko { 216453130d9SPedro F. Giffuni /* Absence of TCP checksum flags does not mean that it is non-TCP 21793929f25SAndrew Rybchenko * but it should be true if user wants to achieve high throughput. 21893929f25SAndrew Rybchenko */ 21993929f25SAndrew Rybchenko return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 22093929f25SAndrew Rybchenko } 22193929f25SAndrew Rybchenko 222e948693eSPhilip Paeps /* 223e948693eSPhilip Paeps * Reorder the put list and append it to the get list. 224e948693eSPhilip Paeps */ 225e948693eSPhilip Paeps static void 226e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 227e948693eSPhilip Paeps { 228e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 229e948693eSPhilip Paeps struct mbuf *mbuf, *get_next, **get_tailp; 230e948693eSPhilip Paeps volatile uintptr_t *putp; 231e948693eSPhilip Paeps uintptr_t put; 232e948693eSPhilip Paeps unsigned int count; 23393929f25SAndrew Rybchenko unsigned int non_tcp_count; 234e948693eSPhilip Paeps 235763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 236e948693eSPhilip Paeps 237e948693eSPhilip Paeps stdp = &txq->dpl; 238e948693eSPhilip Paeps 239e948693eSPhilip Paeps /* Acquire the put list. */ 240e948693eSPhilip Paeps putp = &stdp->std_put; 241fb8ccc78SMarius Strobl put = atomic_readandclear_ptr(putp); 242e948693eSPhilip Paeps mbuf = (void *)put; 243e948693eSPhilip Paeps 244e948693eSPhilip Paeps if (mbuf == NULL) 245e948693eSPhilip Paeps return; 246e948693eSPhilip Paeps 247e948693eSPhilip Paeps /* Reverse the put list. */ 248e948693eSPhilip Paeps get_tailp = &mbuf->m_nextpkt; 249e948693eSPhilip Paeps get_next = NULL; 250e948693eSPhilip Paeps 251e948693eSPhilip Paeps count = 0; 25293929f25SAndrew Rybchenko non_tcp_count = 0; 253e948693eSPhilip Paeps do { 254e948693eSPhilip Paeps struct mbuf *put_next; 255e948693eSPhilip Paeps 25693929f25SAndrew Rybchenko non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 257e948693eSPhilip Paeps put_next = mbuf->m_nextpkt; 258e948693eSPhilip Paeps mbuf->m_nextpkt = get_next; 259e948693eSPhilip Paeps get_next = mbuf; 260e948693eSPhilip Paeps mbuf = put_next; 261e948693eSPhilip Paeps 262e948693eSPhilip Paeps count++; 263e948693eSPhilip Paeps } while (mbuf != NULL); 264e948693eSPhilip Paeps 265bce6d281SAndrew Rybchenko if (count > stdp->std_put_hiwat) 266bce6d281SAndrew Rybchenko stdp->std_put_hiwat = count; 267bce6d281SAndrew Rybchenko 268e948693eSPhilip Paeps /* Append the reversed put list to the get list. */ 269e948693eSPhilip Paeps KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 270e948693eSPhilip Paeps *stdp->std_getp = get_next; 271e948693eSPhilip Paeps stdp->std_getp = get_tailp; 272bc85c897SGeorge V. Neville-Neil stdp->std_get_count += count; 27393929f25SAndrew Rybchenko stdp->std_get_non_tcp_count += non_tcp_count; 274e948693eSPhilip Paeps } 275e948693eSPhilip Paeps 276e948693eSPhilip Paeps static void 277e948693eSPhilip Paeps sfxge_tx_qreap(struct sfxge_txq *txq) 278e948693eSPhilip Paeps { 279763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 280e948693eSPhilip Paeps 281e948693eSPhilip Paeps txq->reaped = txq->completed; 282e948693eSPhilip Paeps } 283e948693eSPhilip Paeps 284e948693eSPhilip Paeps static void 285e948693eSPhilip Paeps sfxge_tx_qlist_post(struct sfxge_txq *txq) 286e948693eSPhilip Paeps { 287e948693eSPhilip Paeps unsigned int old_added; 2883c838a9fSAndrew Rybchenko unsigned int block_level; 289e948693eSPhilip Paeps unsigned int level; 290e948693eSPhilip Paeps int rc; 291e948693eSPhilip Paeps 292763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 293e948693eSPhilip Paeps 294e948693eSPhilip Paeps KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 2953c838a9fSAndrew Rybchenko KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, 296e948693eSPhilip Paeps ("txq->n_pend_desc too large")); 297e948693eSPhilip Paeps KASSERT(!txq->blocked, ("txq->blocked")); 298e948693eSPhilip Paeps 299e948693eSPhilip Paeps old_added = txq->added; 300e948693eSPhilip Paeps 301e948693eSPhilip Paeps /* Post the fragment list. */ 3023c838a9fSAndrew Rybchenko rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, 303e948693eSPhilip Paeps txq->reaped, &txq->added); 3043c838a9fSAndrew Rybchenko KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); 305e948693eSPhilip Paeps 3063c838a9fSAndrew Rybchenko /* If efx_tx_qdesc_post() had to refragment, our information about 307e948693eSPhilip Paeps * buffers to free may be associated with the wrong 308e948693eSPhilip Paeps * descriptors. 309e948693eSPhilip Paeps */ 310e948693eSPhilip Paeps KASSERT(txq->added - old_added == txq->n_pend_desc, 3113c838a9fSAndrew Rybchenko ("efx_tx_qdesc_post() refragmented descriptors")); 312e948693eSPhilip Paeps 313e948693eSPhilip Paeps level = txq->added - txq->reaped; 314385b1d8eSGeorge V. Neville-Neil KASSERT(level <= txq->entries, ("overfilled TX queue")); 315e948693eSPhilip Paeps 316e948693eSPhilip Paeps /* Clear the fragment list. */ 317e948693eSPhilip Paeps txq->n_pend_desc = 0; 318e948693eSPhilip Paeps 3193c838a9fSAndrew Rybchenko /* 3203c838a9fSAndrew Rybchenko * Set the block level to ensure there is space to generate a 3213c838a9fSAndrew Rybchenko * large number of descriptors for TSO. 3223c838a9fSAndrew Rybchenko */ 3233c838a9fSAndrew Rybchenko block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; 3243c838a9fSAndrew Rybchenko 325e948693eSPhilip Paeps /* Have we reached the block level? */ 3263c838a9fSAndrew Rybchenko if (level < block_level) 327e948693eSPhilip Paeps return; 328e948693eSPhilip Paeps 329e948693eSPhilip Paeps /* Reap, and check again */ 330e948693eSPhilip Paeps sfxge_tx_qreap(txq); 331e948693eSPhilip Paeps level = txq->added - txq->reaped; 3323c838a9fSAndrew Rybchenko if (level < block_level) 333e948693eSPhilip Paeps return; 334e948693eSPhilip Paeps 335e948693eSPhilip Paeps txq->blocked = 1; 336e948693eSPhilip Paeps 337e948693eSPhilip Paeps /* 338e948693eSPhilip Paeps * Avoid a race with completion interrupt handling that could leave 339e948693eSPhilip Paeps * the queue blocked. 340e948693eSPhilip Paeps */ 341e948693eSPhilip Paeps mb(); 342e948693eSPhilip Paeps sfxge_tx_qreap(txq); 343e948693eSPhilip Paeps level = txq->added - txq->reaped; 3443c838a9fSAndrew Rybchenko if (level < block_level) { 345e948693eSPhilip Paeps mb(); 346e948693eSPhilip Paeps txq->blocked = 0; 347e948693eSPhilip Paeps } 348e948693eSPhilip Paeps } 349e948693eSPhilip Paeps 350e948693eSPhilip Paeps static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 351e948693eSPhilip Paeps { 352e948693eSPhilip Paeps bus_dmamap_t *used_map; 353e948693eSPhilip Paeps bus_dmamap_t map; 354e948693eSPhilip Paeps bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 355e948693eSPhilip Paeps unsigned int id; 356e948693eSPhilip Paeps struct sfxge_tx_mapping *stmp; 3573c838a9fSAndrew Rybchenko efx_desc_t *desc; 358e948693eSPhilip Paeps int n_dma_seg; 359e948693eSPhilip Paeps int rc; 360e948693eSPhilip Paeps int i; 3613c838a9fSAndrew Rybchenko int eop; 3623c838a9fSAndrew Rybchenko int vlan_tagged; 363e948693eSPhilip Paeps 364e948693eSPhilip Paeps KASSERT(!txq->blocked, ("txq->blocked")); 365e948693eSPhilip Paeps 366e948693eSPhilip Paeps if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 367e948693eSPhilip Paeps prefetch_read_many(mbuf->m_data); 368e948693eSPhilip Paeps 369851128b8SAndrew Rybchenko if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { 370e948693eSPhilip Paeps rc = EINTR; 371e948693eSPhilip Paeps goto reject; 372e948693eSPhilip Paeps } 373e948693eSPhilip Paeps 374e948693eSPhilip Paeps /* Load the packet for DMA. */ 375385b1d8eSGeorge V. Neville-Neil id = txq->added & txq->ptr_mask; 376e948693eSPhilip Paeps stmp = &txq->stmp[id]; 377e948693eSPhilip Paeps rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 378e948693eSPhilip Paeps mbuf, dma_seg, &n_dma_seg, 0); 379e948693eSPhilip Paeps if (rc == EFBIG) { 380e948693eSPhilip Paeps /* Try again. */ 381c6499eccSGleb Smirnoff struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 382e948693eSPhilip Paeps SFXGE_TX_MAPPING_MAX_SEG); 383e948693eSPhilip Paeps if (new_mbuf == NULL) 384e948693eSPhilip Paeps goto reject; 385e948693eSPhilip Paeps ++txq->collapses; 386e948693eSPhilip Paeps mbuf = new_mbuf; 387e948693eSPhilip Paeps rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 388e948693eSPhilip Paeps stmp->map, mbuf, 389e948693eSPhilip Paeps dma_seg, &n_dma_seg, 0); 390e948693eSPhilip Paeps } 391e948693eSPhilip Paeps if (rc != 0) 392e948693eSPhilip Paeps goto reject; 393e948693eSPhilip Paeps 394e948693eSPhilip Paeps /* Make the packet visible to the hardware. */ 395e948693eSPhilip Paeps bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 396e948693eSPhilip Paeps 397e948693eSPhilip Paeps used_map = &stmp->map; 398e948693eSPhilip Paeps 3993c838a9fSAndrew Rybchenko vlan_tagged = sfxge_tx_maybe_insert_tag(txq, mbuf); 4003c838a9fSAndrew Rybchenko if (vlan_tagged) { 4013c838a9fSAndrew Rybchenko sfxge_next_stmp(txq, &stmp); 4023c838a9fSAndrew Rybchenko } 403e948693eSPhilip Paeps if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 4043c838a9fSAndrew Rybchenko rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, vlan_tagged); 405e948693eSPhilip Paeps if (rc < 0) 406e948693eSPhilip Paeps goto reject_mapped; 4073c838a9fSAndrew Rybchenko stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; 408e948693eSPhilip Paeps } else { 409e948693eSPhilip Paeps /* Add the mapping to the fragment list, and set flags 410e948693eSPhilip Paeps * for the buffer. 411e948693eSPhilip Paeps */ 4123c838a9fSAndrew Rybchenko 413e948693eSPhilip Paeps i = 0; 414e948693eSPhilip Paeps for (;;) { 4153c838a9fSAndrew Rybchenko desc = &txq->pend_desc[i + vlan_tagged]; 4163c838a9fSAndrew Rybchenko eop = (i == n_dma_seg - 1); 4173c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 4183c838a9fSAndrew Rybchenko dma_seg[i].ds_addr, 4193c838a9fSAndrew Rybchenko dma_seg[i].ds_len, 4203c838a9fSAndrew Rybchenko eop, 4213c838a9fSAndrew Rybchenko desc); 4223c838a9fSAndrew Rybchenko if (eop) 423e948693eSPhilip Paeps break; 424e948693eSPhilip Paeps i++; 4253c838a9fSAndrew Rybchenko sfxge_next_stmp(txq, &stmp); 426e948693eSPhilip Paeps } 4273c838a9fSAndrew Rybchenko txq->n_pend_desc = n_dma_seg + vlan_tagged; 428e948693eSPhilip Paeps } 429e948693eSPhilip Paeps 430e948693eSPhilip Paeps /* 431e948693eSPhilip Paeps * If the mapping required more than one descriptor 432e948693eSPhilip Paeps * then we need to associate the DMA map with the last 433e948693eSPhilip Paeps * descriptor, not the first. 434e948693eSPhilip Paeps */ 435e948693eSPhilip Paeps if (used_map != &stmp->map) { 436e948693eSPhilip Paeps map = stmp->map; 437e948693eSPhilip Paeps stmp->map = *used_map; 438e948693eSPhilip Paeps *used_map = map; 439e948693eSPhilip Paeps } 440e948693eSPhilip Paeps 441e948693eSPhilip Paeps stmp->u.mbuf = mbuf; 442e948693eSPhilip Paeps stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 443e948693eSPhilip Paeps 444e948693eSPhilip Paeps /* Post the fragment list. */ 445e948693eSPhilip Paeps sfxge_tx_qlist_post(txq); 446e948693eSPhilip Paeps 447b7b0edd1SGeorge V. Neville-Neil return (0); 448e948693eSPhilip Paeps 449e948693eSPhilip Paeps reject_mapped: 450e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, *used_map); 451e948693eSPhilip Paeps reject: 452e948693eSPhilip Paeps /* Drop the packet on the floor. */ 453e948693eSPhilip Paeps m_freem(mbuf); 454e948693eSPhilip Paeps ++txq->drops; 455e948693eSPhilip Paeps 456b7b0edd1SGeorge V. Neville-Neil return (rc); 457e948693eSPhilip Paeps } 458e948693eSPhilip Paeps 459e948693eSPhilip Paeps /* 460e948693eSPhilip Paeps * Drain the deferred packet list into the transmit queue. 461e948693eSPhilip Paeps */ 462e948693eSPhilip Paeps static void 463e948693eSPhilip Paeps sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 464e948693eSPhilip Paeps { 465e948693eSPhilip Paeps struct sfxge_softc *sc; 466e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 467e948693eSPhilip Paeps struct mbuf *mbuf, *next; 468e948693eSPhilip Paeps unsigned int count; 46993929f25SAndrew Rybchenko unsigned int non_tcp_count; 470e948693eSPhilip Paeps unsigned int pushed; 471e948693eSPhilip Paeps int rc; 472e948693eSPhilip Paeps 473763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 474e948693eSPhilip Paeps 475e948693eSPhilip Paeps sc = txq->sc; 476e948693eSPhilip Paeps stdp = &txq->dpl; 477e948693eSPhilip Paeps pushed = txq->added; 478e948693eSPhilip Paeps 47906824d2cSAndrew Rybchenko if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { 480e948693eSPhilip Paeps prefetch_read_many(sc->enp); 481e948693eSPhilip Paeps prefetch_read_many(txq->common); 48206824d2cSAndrew Rybchenko } 483e948693eSPhilip Paeps 484e948693eSPhilip Paeps mbuf = stdp->std_get; 485bc85c897SGeorge V. Neville-Neil count = stdp->std_get_count; 48693929f25SAndrew Rybchenko non_tcp_count = stdp->std_get_non_tcp_count; 48793929f25SAndrew Rybchenko 48893929f25SAndrew Rybchenko if (count > stdp->std_get_hiwat) 48993929f25SAndrew Rybchenko stdp->std_get_hiwat = count; 490e948693eSPhilip Paeps 491e948693eSPhilip Paeps while (count != 0) { 492e948693eSPhilip Paeps KASSERT(mbuf != NULL, ("mbuf == NULL")); 493e948693eSPhilip Paeps 494e948693eSPhilip Paeps next = mbuf->m_nextpkt; 495e948693eSPhilip Paeps mbuf->m_nextpkt = NULL; 496e948693eSPhilip Paeps 497e948693eSPhilip Paeps ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 498e948693eSPhilip Paeps 499e948693eSPhilip Paeps if (next != NULL) 500e948693eSPhilip Paeps prefetch_read_many(next); 501e948693eSPhilip Paeps 502e948693eSPhilip Paeps rc = sfxge_tx_queue_mbuf(txq, mbuf); 503e948693eSPhilip Paeps --count; 50493929f25SAndrew Rybchenko non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 505e948693eSPhilip Paeps mbuf = next; 506e948693eSPhilip Paeps if (rc != 0) 507e948693eSPhilip Paeps continue; 508e948693eSPhilip Paeps 509e948693eSPhilip Paeps if (txq->blocked) 510e948693eSPhilip Paeps break; 511e948693eSPhilip Paeps 512e948693eSPhilip Paeps /* Push the fragments to the hardware in batches. */ 513e948693eSPhilip Paeps if (txq->added - pushed >= SFXGE_TX_BATCH) { 5143c838a9fSAndrew Rybchenko efx_tx_qpush(txq->common, txq->added, pushed); 515e948693eSPhilip Paeps pushed = txq->added; 516e948693eSPhilip Paeps } 517e948693eSPhilip Paeps } 518e948693eSPhilip Paeps 519e948693eSPhilip Paeps if (count == 0) { 520e948693eSPhilip Paeps KASSERT(mbuf == NULL, ("mbuf != NULL")); 52193929f25SAndrew Rybchenko KASSERT(non_tcp_count == 0, 52293929f25SAndrew Rybchenko ("inconsistent TCP/non-TCP detection")); 523e948693eSPhilip Paeps stdp->std_get = NULL; 524bc85c897SGeorge V. Neville-Neil stdp->std_get_count = 0; 52593929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = 0; 526e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 527e948693eSPhilip Paeps } else { 528e948693eSPhilip Paeps stdp->std_get = mbuf; 529bc85c897SGeorge V. Neville-Neil stdp->std_get_count = count; 53093929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = non_tcp_count; 531e948693eSPhilip Paeps } 532e948693eSPhilip Paeps 533e948693eSPhilip Paeps if (txq->added != pushed) 5343c838a9fSAndrew Rybchenko efx_tx_qpush(txq->common, txq->added, pushed); 535e948693eSPhilip Paeps 536bc85c897SGeorge V. Neville-Neil KASSERT(txq->blocked || stdp->std_get_count == 0, 537e948693eSPhilip Paeps ("queue unblocked but count is non-zero")); 538e948693eSPhilip Paeps } 539e948693eSPhilip Paeps 5403c838a9fSAndrew Rybchenko #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) 541e948693eSPhilip Paeps 542e948693eSPhilip Paeps /* 543e948693eSPhilip Paeps * Service the deferred packet list. 544e948693eSPhilip Paeps * 545e948693eSPhilip Paeps * NOTE: drops the txq mutex! 546e948693eSPhilip Paeps */ 5470b28bbdcSAndrew Rybchenko static void 548e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq) 549e948693eSPhilip Paeps { 550763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 551e948693eSPhilip Paeps 552e948693eSPhilip Paeps do { 553e948693eSPhilip Paeps if (SFXGE_TX_QDPL_PENDING(txq)) 554e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(txq); 555e948693eSPhilip Paeps 556e948693eSPhilip Paeps if (!txq->blocked) 557e948693eSPhilip Paeps sfxge_tx_qdpl_drain(txq); 558e948693eSPhilip Paeps 559763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 560e948693eSPhilip Paeps } while (SFXGE_TX_QDPL_PENDING(txq) && 561763cab71SAndrew Rybchenko SFXGE_TXQ_TRYLOCK(txq)); 562e948693eSPhilip Paeps } 563e948693eSPhilip Paeps 564e948693eSPhilip Paeps /* 565d6e9f736SAndrew Rybchenko * Put a packet on the deferred packet get-list. 566e948693eSPhilip Paeps */ 5670b28bbdcSAndrew Rybchenko static int 568d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) 569e948693eSPhilip Paeps { 570e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 571e948693eSPhilip Paeps 572e948693eSPhilip Paeps stdp = &txq->dpl; 573e948693eSPhilip Paeps 574e948693eSPhilip Paeps KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 575e948693eSPhilip Paeps 576763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 577e948693eSPhilip Paeps 57893929f25SAndrew Rybchenko if (stdp->std_get_count >= stdp->std_get_max) { 57993929f25SAndrew Rybchenko txq->get_overflow++; 580c1974e29SGleb Smirnoff return (ENOBUFS); 58193929f25SAndrew Rybchenko } 58293929f25SAndrew Rybchenko if (sfxge_is_mbuf_non_tcp(mbuf)) { 58393929f25SAndrew Rybchenko if (stdp->std_get_non_tcp_count >= 58493929f25SAndrew Rybchenko stdp->std_get_non_tcp_max) { 58593929f25SAndrew Rybchenko txq->get_non_tcp_overflow++; 58693929f25SAndrew Rybchenko return (ENOBUFS); 58793929f25SAndrew Rybchenko } 58893929f25SAndrew Rybchenko stdp->std_get_non_tcp_count++; 58993929f25SAndrew Rybchenko } 590c1974e29SGleb Smirnoff 591e948693eSPhilip Paeps *(stdp->std_getp) = mbuf; 592e948693eSPhilip Paeps stdp->std_getp = &mbuf->m_nextpkt; 593bc85c897SGeorge V. Neville-Neil stdp->std_get_count++; 594d6e9f736SAndrew Rybchenko 595d6e9f736SAndrew Rybchenko return (0); 596d6e9f736SAndrew Rybchenko } 597d6e9f736SAndrew Rybchenko 598d6e9f736SAndrew Rybchenko /* 599d6e9f736SAndrew Rybchenko * Put a packet on the deferred packet put-list. 600d6e9f736SAndrew Rybchenko * 601d6e9f736SAndrew Rybchenko * We overload the csum_data field in the mbuf to keep track of this length 602d6e9f736SAndrew Rybchenko * because there is no cheap alternative to avoid races. 603d6e9f736SAndrew Rybchenko */ 604d6e9f736SAndrew Rybchenko static int 605d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) 606d6e9f736SAndrew Rybchenko { 607d6e9f736SAndrew Rybchenko struct sfxge_tx_dpl *stdp; 608e948693eSPhilip Paeps volatile uintptr_t *putp; 609e948693eSPhilip Paeps uintptr_t old; 610e948693eSPhilip Paeps uintptr_t new; 611e948693eSPhilip Paeps unsigned old_len; 612e948693eSPhilip Paeps 613d6e9f736SAndrew Rybchenko KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 614d6e9f736SAndrew Rybchenko 615d6e9f736SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 616d6e9f736SAndrew Rybchenko 617d6e9f736SAndrew Rybchenko stdp = &txq->dpl; 618e948693eSPhilip Paeps putp = &stdp->std_put; 619e948693eSPhilip Paeps new = (uintptr_t)mbuf; 620e948693eSPhilip Paeps 621e948693eSPhilip Paeps do { 622e948693eSPhilip Paeps old = *putp; 623b7b0edd1SGeorge V. Neville-Neil if (old != 0) { 624e948693eSPhilip Paeps struct mbuf *mp = (struct mbuf *)old; 625e948693eSPhilip Paeps old_len = mp->m_pkthdr.csum_data; 626e948693eSPhilip Paeps } else 627e948693eSPhilip Paeps old_len = 0; 62893929f25SAndrew Rybchenko if (old_len >= stdp->std_put_max) { 62993929f25SAndrew Rybchenko atomic_add_long(&txq->put_overflow, 1); 630c1974e29SGleb Smirnoff return (ENOBUFS); 63193929f25SAndrew Rybchenko } 632e948693eSPhilip Paeps mbuf->m_pkthdr.csum_data = old_len + 1; 633e948693eSPhilip Paeps mbuf->m_nextpkt = (void *)old; 634fb8ccc78SMarius Strobl } while (atomic_cmpset_ptr(putp, old, new) == 0); 635e948693eSPhilip Paeps 636e948693eSPhilip Paeps return (0); 637e948693eSPhilip Paeps } 638e948693eSPhilip Paeps 639e948693eSPhilip Paeps /* 640e948693eSPhilip Paeps * Called from if_transmit - will try to grab the txq lock and enqueue to the 641c071447aSAndrew Rybchenko * put list if it succeeds, otherwise try to push onto the defer list if space. 642e948693eSPhilip Paeps */ 6433c838a9fSAndrew Rybchenko static int 644e948693eSPhilip Paeps sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 645e948693eSPhilip Paeps { 646e948693eSPhilip Paeps int rc; 647e948693eSPhilip Paeps 648d7ac87d3SGleb Smirnoff if (!SFXGE_LINK_UP(txq->sc)) { 64993929f25SAndrew Rybchenko atomic_add_long(&txq->netdown_drops, 1); 6500e4ebe6cSAndrew Rybchenko return (ENETDOWN); 651d7ac87d3SGleb Smirnoff } 652d7ac87d3SGleb Smirnoff 653e948693eSPhilip Paeps /* 654e948693eSPhilip Paeps * Try to grab the txq lock. If we are able to get the lock, 655e948693eSPhilip Paeps * the packet will be appended to the "get list" of the deferred 656e948693eSPhilip Paeps * packet list. Otherwise, it will be pushed on the "put list". 657e948693eSPhilip Paeps */ 658deee1de4SAndrew Rybchenko if (SFXGE_TXQ_TRYLOCK(txq)) { 659d6e9f736SAndrew Rybchenko /* First swizzle put-list to get-list to keep order */ 660d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_swizzle(txq); 661d6e9f736SAndrew Rybchenko 662d6e9f736SAndrew Rybchenko rc = sfxge_tx_qdpl_put_locked(txq, m); 663deee1de4SAndrew Rybchenko 664deee1de4SAndrew Rybchenko /* Try to service the list. */ 665deee1de4SAndrew Rybchenko sfxge_tx_qdpl_service(txq); 666deee1de4SAndrew Rybchenko /* Lock has been dropped. */ 667d6e9f736SAndrew Rybchenko } else { 668d6e9f736SAndrew Rybchenko rc = sfxge_tx_qdpl_put_unlocked(txq, m); 669e948693eSPhilip Paeps 670e948693eSPhilip Paeps /* 671e948693eSPhilip Paeps * Try to grab the lock again. 672e948693eSPhilip Paeps * 673d6e9f736SAndrew Rybchenko * If we are able to get the lock, we need to process 674d6e9f736SAndrew Rybchenko * the deferred packet list. If we are not able to get 675d6e9f736SAndrew Rybchenko * the lock, another thread is processing the list. 676e948693eSPhilip Paeps */ 677f080384cSAndrew Rybchenko if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { 678e948693eSPhilip Paeps sfxge_tx_qdpl_service(txq); 679e948693eSPhilip Paeps /* Lock has been dropped. */ 680e948693eSPhilip Paeps } 681deee1de4SAndrew Rybchenko } 682deee1de4SAndrew Rybchenko 683deee1de4SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 684e948693eSPhilip Paeps 685f080384cSAndrew Rybchenko return (rc); 686e948693eSPhilip Paeps } 687e948693eSPhilip Paeps 688e948693eSPhilip Paeps static void 689e948693eSPhilip Paeps sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 690e948693eSPhilip Paeps { 691e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp = &txq->dpl; 692e948693eSPhilip Paeps struct mbuf *mbuf, *next; 693e948693eSPhilip Paeps 694763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 695e948693eSPhilip Paeps 696e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(txq); 697e948693eSPhilip Paeps for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 698e948693eSPhilip Paeps next = mbuf->m_nextpkt; 699e948693eSPhilip Paeps m_freem(mbuf); 700e948693eSPhilip Paeps } 701e948693eSPhilip Paeps stdp->std_get = NULL; 702bc85c897SGeorge V. Neville-Neil stdp->std_get_count = 0; 70393929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = 0; 704e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 705e948693eSPhilip Paeps 706763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 707e948693eSPhilip Paeps } 708e948693eSPhilip Paeps 709e948693eSPhilip Paeps void 710e948693eSPhilip Paeps sfxge_if_qflush(struct ifnet *ifp) 711e948693eSPhilip Paeps { 712e948693eSPhilip Paeps struct sfxge_softc *sc; 713cb552e88SAndrew Rybchenko unsigned int i; 714e948693eSPhilip Paeps 715e948693eSPhilip Paeps sc = ifp->if_softc; 716e948693eSPhilip Paeps 717e2b05fe2SAndrew Rybchenko for (i = 0; i < sc->txq_count; i++) 718e948693eSPhilip Paeps sfxge_tx_qdpl_flush(sc->txq[i]); 719e948693eSPhilip Paeps } 720e948693eSPhilip Paeps 721a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY 722a32efb97SAndrew Rybchenko 723a32efb97SAndrew Rybchenko /* There is little space for user data in mbuf pkthdr, so we 724a32efb97SAndrew Rybchenko * use l*hlen fields which are not used by the driver otherwise 725a32efb97SAndrew Rybchenko * to store header offsets. 726a32efb97SAndrew Rybchenko * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. 727a32efb97SAndrew Rybchenko */ 728a32efb97SAndrew Rybchenko 729a32efb97SAndrew Rybchenko 730a32efb97SAndrew Rybchenko #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) 731a32efb97SAndrew Rybchenko /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ 732a32efb97SAndrew Rybchenko #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) 733a32efb97SAndrew Rybchenko #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) 734a32efb97SAndrew Rybchenko #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) 735a32efb97SAndrew Rybchenko 736a32efb97SAndrew Rybchenko static void sfxge_parse_tx_packet(struct mbuf *mbuf) 737a32efb97SAndrew Rybchenko { 738a32efb97SAndrew Rybchenko struct ether_header *eh = mtod(mbuf, struct ether_header *); 739a32efb97SAndrew Rybchenko const struct tcphdr *th; 740a32efb97SAndrew Rybchenko struct tcphdr th_copy; 741a32efb97SAndrew Rybchenko 742a32efb97SAndrew Rybchenko /* Find network protocol and header */ 743a32efb97SAndrew Rybchenko TSO_MBUF_PROTO(mbuf) = eh->ether_type; 744a32efb97SAndrew Rybchenko if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { 745a32efb97SAndrew Rybchenko struct ether_vlan_header *veh = 746a32efb97SAndrew Rybchenko mtod(mbuf, struct ether_vlan_header *); 747a32efb97SAndrew Rybchenko TSO_MBUF_PROTO(mbuf) = veh->evl_proto; 748a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l2hlen = sizeof(*veh); 749a32efb97SAndrew Rybchenko } else { 750a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l2hlen = sizeof(*eh); 751a32efb97SAndrew Rybchenko } 752a32efb97SAndrew Rybchenko 753a32efb97SAndrew Rybchenko /* Find TCP header */ 754a32efb97SAndrew Rybchenko if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { 755a32efb97SAndrew Rybchenko const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); 756a32efb97SAndrew Rybchenko 757a32efb97SAndrew Rybchenko KASSERT(iph->ip_p == IPPROTO_TCP, 758a32efb97SAndrew Rybchenko ("TSO required on non-TCP packet")); 759a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; 760a32efb97SAndrew Rybchenko TSO_MBUF_PACKETID(mbuf) = iph->ip_id; 761a32efb97SAndrew Rybchenko } else { 762a32efb97SAndrew Rybchenko KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), 763a32efb97SAndrew Rybchenko ("TSO required on non-IP packet")); 764a32efb97SAndrew Rybchenko KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == 765a32efb97SAndrew Rybchenko IPPROTO_TCP, 766a32efb97SAndrew Rybchenko ("TSO required on non-TCP packet")); 767a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); 768a32efb97SAndrew Rybchenko TSO_MBUF_PACKETID(mbuf) = 0; 769a32efb97SAndrew Rybchenko } 770a32efb97SAndrew Rybchenko 771a32efb97SAndrew Rybchenko KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, 772a32efb97SAndrew Rybchenko ("network header is fragmented in mbuf")); 773a32efb97SAndrew Rybchenko 774a32efb97SAndrew Rybchenko /* We need TCP header including flags (window is the next) */ 775a32efb97SAndrew Rybchenko if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { 776a32efb97SAndrew Rybchenko m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), 777a32efb97SAndrew Rybchenko (caddr_t)&th_copy); 778a32efb97SAndrew Rybchenko th = &th_copy; 779a32efb97SAndrew Rybchenko } else { 780a32efb97SAndrew Rybchenko th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); 781a32efb97SAndrew Rybchenko } 782a32efb97SAndrew Rybchenko 783a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; 784a32efb97SAndrew Rybchenko TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); 785a32efb97SAndrew Rybchenko 786a32efb97SAndrew Rybchenko /* These flags must not be duplicated */ 787a32efb97SAndrew Rybchenko /* 788a32efb97SAndrew Rybchenko * RST should not be duplicated as well, but FreeBSD kernel 789a32efb97SAndrew Rybchenko * generates TSO packets with RST flag. So, do not assert 790a32efb97SAndrew Rybchenko * its absence. 791a32efb97SAndrew Rybchenko */ 792a32efb97SAndrew Rybchenko KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 793a32efb97SAndrew Rybchenko ("incompatible TCP flag 0x%x on TSO packet", 794a32efb97SAndrew Rybchenko th->th_flags & (TH_URG | TH_SYN))); 795a32efb97SAndrew Rybchenko TSO_MBUF_FLAGS(mbuf) = th->th_flags; 796a32efb97SAndrew Rybchenko } 797a32efb97SAndrew Rybchenko #endif 798a32efb97SAndrew Rybchenko 799e948693eSPhilip Paeps /* 800e948693eSPhilip Paeps * TX start -- called by the stack. 801e948693eSPhilip Paeps */ 802e948693eSPhilip Paeps int 803e948693eSPhilip Paeps sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 804e948693eSPhilip Paeps { 805e948693eSPhilip Paeps struct sfxge_softc *sc; 806e948693eSPhilip Paeps struct sfxge_txq *txq; 807e948693eSPhilip Paeps int rc; 808e948693eSPhilip Paeps 809e948693eSPhilip Paeps sc = (struct sfxge_softc *)ifp->if_softc; 810e948693eSPhilip Paeps 81187581ab8SAndrew Rybchenko /* 81287581ab8SAndrew Rybchenko * Transmit may be called when interface is up from the kernel 81387581ab8SAndrew Rybchenko * point of view, but not yet up (in progress) from the driver 81487581ab8SAndrew Rybchenko * point of view. I.e. link aggregation bring up. 81587581ab8SAndrew Rybchenko * Transmit may be called when interface is up from the driver 81687581ab8SAndrew Rybchenko * point of view, but already down from the kernel point of 81787581ab8SAndrew Rybchenko * view. I.e. Rx when interface shutdown is in progress. 81887581ab8SAndrew Rybchenko */ 81987581ab8SAndrew Rybchenko KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), 82087581ab8SAndrew Rybchenko ("interface not up")); 821e948693eSPhilip Paeps 822e948693eSPhilip Paeps /* Pick the desired transmit queue. */ 823db7e53aaSAndrew Rybchenko if (m->m_pkthdr.csum_flags & 824db7e53aaSAndrew Rybchenko (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO)) { 825e948693eSPhilip Paeps int index = 0; 826e948693eSPhilip Paeps 8273bbc1e08SAndrew Rybchenko #ifdef RSS 8283bbc1e08SAndrew Rybchenko uint32_t bucket_id; 8293bbc1e08SAndrew Rybchenko 8303bbc1e08SAndrew Rybchenko /* 8313bbc1e08SAndrew Rybchenko * Select a TX queue which matches the corresponding 8323bbc1e08SAndrew Rybchenko * RX queue for the hash in order to assign both 8333bbc1e08SAndrew Rybchenko * TX and RX parts of the flow to the same CPU 8343bbc1e08SAndrew Rybchenko */ 8353bbc1e08SAndrew Rybchenko if (rss_m2bucket(m, &bucket_id) == 0) 8363bbc1e08SAndrew Rybchenko index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1)); 8373bbc1e08SAndrew Rybchenko #else 838c2529042SHans Petter Selasky /* check if flowid is set */ 839c2529042SHans Petter Selasky if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 840e948693eSPhilip Paeps uint32_t hash = m->m_pkthdr.flowid; 841e948693eSPhilip Paeps 842e948693eSPhilip Paeps index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; 843e948693eSPhilip Paeps } 8443bbc1e08SAndrew Rybchenko #endif 845a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY 846a32efb97SAndrew Rybchenko if (m->m_pkthdr.csum_flags & CSUM_TSO) 847a32efb97SAndrew Rybchenko sfxge_parse_tx_packet(m); 848a32efb97SAndrew Rybchenko #endif 849e948693eSPhilip Paeps txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; 850e948693eSPhilip Paeps } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 851e948693eSPhilip Paeps txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 852e948693eSPhilip Paeps } else { 853e948693eSPhilip Paeps txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 854e948693eSPhilip Paeps } 855e948693eSPhilip Paeps 856e948693eSPhilip Paeps rc = sfxge_tx_packet_add(txq, m); 8570e4ebe6cSAndrew Rybchenko if (rc != 0) 8580e4ebe6cSAndrew Rybchenko m_freem(m); 859e948693eSPhilip Paeps 860e948693eSPhilip Paeps return (rc); 861e948693eSPhilip Paeps } 862e948693eSPhilip Paeps 863e948693eSPhilip Paeps /* 864e948693eSPhilip Paeps * Software "TSO". Not quite as good as doing it in hardware, but 865e948693eSPhilip Paeps * still faster than segmenting in the stack. 866e948693eSPhilip Paeps */ 867e948693eSPhilip Paeps 868e948693eSPhilip Paeps struct sfxge_tso_state { 869e948693eSPhilip Paeps /* Output position */ 870e948693eSPhilip Paeps unsigned out_len; /* Remaining length in current segment */ 871e948693eSPhilip Paeps unsigned seqnum; /* Current sequence number */ 872e948693eSPhilip Paeps unsigned packet_space; /* Remaining space in current packet */ 873a45a0da1SAndrew Rybchenko unsigned segs_space; /* Remaining number of DMA segments 874a45a0da1SAndrew Rybchenko for the packet (FATSOv2 only) */ 875e948693eSPhilip Paeps 876e948693eSPhilip Paeps /* Input position */ 877e948693eSPhilip Paeps uint64_t dma_addr; /* DMA address of current position */ 878e948693eSPhilip Paeps unsigned in_len; /* Remaining length in current mbuf */ 879e948693eSPhilip Paeps 880e948693eSPhilip Paeps const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 881e948693eSPhilip Paeps u_short protocol; /* Network protocol (after VLAN decap) */ 882e948693eSPhilip Paeps ssize_t nh_off; /* Offset of network header */ 883e948693eSPhilip Paeps ssize_t tcph_off; /* Offset of TCP header */ 884e948693eSPhilip Paeps unsigned header_len; /* Number of bytes of header */ 885d0f73877SAndrew Rybchenko unsigned seg_size; /* TCP segment size */ 8863c838a9fSAndrew Rybchenko int fw_assisted; /* Use FW-assisted TSO */ 8873c838a9fSAndrew Rybchenko u_short packet_id; /* IPv4 packet ID from the original packet */ 888a32efb97SAndrew Rybchenko uint8_t tcp_flags; /* TCP flags */ 8893c838a9fSAndrew Rybchenko efx_desc_t header_desc; /* Precomputed header descriptor for 8903c838a9fSAndrew Rybchenko * FW-assisted TSO */ 891e948693eSPhilip Paeps }; 892e948693eSPhilip Paeps 893a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 8940b28bbdcSAndrew Rybchenko static const struct ip *tso_iph(const struct sfxge_tso_state *tso) 895e948693eSPhilip Paeps { 896e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IP), 897e948693eSPhilip Paeps ("tso_iph() in non-IPv4 state")); 898e948693eSPhilip Paeps return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 899e948693eSPhilip Paeps } 900a32efb97SAndrew Rybchenko 9010b28bbdcSAndrew Rybchenko static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 902e948693eSPhilip Paeps { 903e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 904e948693eSPhilip Paeps ("tso_ip6h() in non-IPv6 state")); 905e948693eSPhilip Paeps return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 906e948693eSPhilip Paeps } 907a32efb97SAndrew Rybchenko 9080b28bbdcSAndrew Rybchenko static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 909e948693eSPhilip Paeps { 910e948693eSPhilip Paeps return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 911e948693eSPhilip Paeps } 912a32efb97SAndrew Rybchenko #endif 913a32efb97SAndrew Rybchenko 914e948693eSPhilip Paeps 915e948693eSPhilip Paeps /* Size of preallocated TSO header buffers. Larger blocks must be 916e948693eSPhilip Paeps * allocated from the heap. 917e948693eSPhilip Paeps */ 918e948693eSPhilip Paeps #define TSOH_STD_SIZE 128 919e948693eSPhilip Paeps 920e948693eSPhilip Paeps /* At most half the descriptors in the queue at any time will refer to 921e948693eSPhilip Paeps * a TSO header buffer, since they must always be followed by a 922e948693eSPhilip Paeps * payload descriptor referring to an mbuf. 923e948693eSPhilip Paeps */ 924385b1d8eSGeorge V. Neville-Neil #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 925e948693eSPhilip Paeps #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 926385b1d8eSGeorge V. Neville-Neil #define TSOH_PAGE_COUNT(_txq_entries) \ 927057b4402SPedro F. Giffuni howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE) 928e948693eSPhilip Paeps 929e948693eSPhilip Paeps static int tso_init(struct sfxge_txq *txq) 930e948693eSPhilip Paeps { 931e948693eSPhilip Paeps struct sfxge_softc *sc = txq->sc; 932385b1d8eSGeorge V. Neville-Neil unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 933e948693eSPhilip Paeps int i, rc; 934e948693eSPhilip Paeps 935e948693eSPhilip Paeps /* Allocate TSO header buffers */ 936385b1d8eSGeorge V. Neville-Neil txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 937e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 938e948693eSPhilip Paeps 939385b1d8eSGeorge V. Neville-Neil for (i = 0; i < tsoh_page_count; i++) { 940e948693eSPhilip Paeps rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 941b7b0edd1SGeorge V. Neville-Neil if (rc != 0) 942e948693eSPhilip Paeps goto fail; 943e948693eSPhilip Paeps } 944e948693eSPhilip Paeps 945b7b0edd1SGeorge V. Neville-Neil return (0); 946e948693eSPhilip Paeps 947e948693eSPhilip Paeps fail: 948e948693eSPhilip Paeps while (i-- > 0) 949e948693eSPhilip Paeps sfxge_dma_free(&txq->tsoh_buffer[i]); 950e948693eSPhilip Paeps free(txq->tsoh_buffer, M_SFXGE); 951e948693eSPhilip Paeps txq->tsoh_buffer = NULL; 952b7b0edd1SGeorge V. Neville-Neil return (rc); 953e948693eSPhilip Paeps } 954e948693eSPhilip Paeps 955e948693eSPhilip Paeps static void tso_fini(struct sfxge_txq *txq) 956e948693eSPhilip Paeps { 957e948693eSPhilip Paeps int i; 958e948693eSPhilip Paeps 959b7b0edd1SGeorge V. Neville-Neil if (txq->tsoh_buffer != NULL) { 960385b1d8eSGeorge V. Neville-Neil for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 961e948693eSPhilip Paeps sfxge_dma_free(&txq->tsoh_buffer[i]); 962e948693eSPhilip Paeps free(txq->tsoh_buffer, M_SFXGE); 963e948693eSPhilip Paeps } 964e948693eSPhilip Paeps } 965e948693eSPhilip Paeps 9663c838a9fSAndrew Rybchenko static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, 9673c838a9fSAndrew Rybchenko const bus_dma_segment_t *hdr_dma_seg, 9683c838a9fSAndrew Rybchenko struct mbuf *mbuf) 969e948693eSPhilip Paeps { 9703c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); 971a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 972a32efb97SAndrew Rybchenko struct ether_header *eh = mtod(mbuf, struct ether_header *); 973b2c43c38SAndrew Rybchenko const struct tcphdr *th; 974b2c43c38SAndrew Rybchenko struct tcphdr th_copy; 975a32efb97SAndrew Rybchenko #endif 976e948693eSPhilip Paeps 977a45a0da1SAndrew Rybchenko tso->fw_assisted = txq->tso_fw_assisted; 978e948693eSPhilip Paeps tso->mbuf = mbuf; 979e948693eSPhilip Paeps 980e948693eSPhilip Paeps /* Find network protocol and header */ 981a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 982e948693eSPhilip Paeps tso->protocol = eh->ether_type; 983e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_VLAN)) { 984e948693eSPhilip Paeps struct ether_vlan_header *veh = 985e948693eSPhilip Paeps mtod(mbuf, struct ether_vlan_header *); 986e948693eSPhilip Paeps tso->protocol = veh->evl_proto; 987e948693eSPhilip Paeps tso->nh_off = sizeof(*veh); 988e948693eSPhilip Paeps } else { 989e948693eSPhilip Paeps tso->nh_off = sizeof(*eh); 990e948693eSPhilip Paeps } 991a32efb97SAndrew Rybchenko #else 992a32efb97SAndrew Rybchenko tso->protocol = TSO_MBUF_PROTO(mbuf); 993a32efb97SAndrew Rybchenko tso->nh_off = mbuf->m_pkthdr.l2hlen; 994a32efb97SAndrew Rybchenko tso->tcph_off = mbuf->m_pkthdr.l3hlen; 995a32efb97SAndrew Rybchenko tso->packet_id = TSO_MBUF_PACKETID(mbuf); 996a32efb97SAndrew Rybchenko #endif 997e948693eSPhilip Paeps 998a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 999e948693eSPhilip Paeps /* Find TCP header */ 1000e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_IP)) { 1001e948693eSPhilip Paeps KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 1002e948693eSPhilip Paeps ("TSO required on non-TCP packet")); 1003e948693eSPhilip Paeps tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 10043c838a9fSAndrew Rybchenko tso->packet_id = tso_iph(tso)->ip_id; 1005e948693eSPhilip Paeps } else { 1006e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 1007e948693eSPhilip Paeps ("TSO required on non-IP packet")); 1008e948693eSPhilip Paeps KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 1009e948693eSPhilip Paeps ("TSO required on non-TCP packet")); 1010e948693eSPhilip Paeps tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 10113c838a9fSAndrew Rybchenko tso->packet_id = 0; 10123c838a9fSAndrew Rybchenko } 1013a32efb97SAndrew Rybchenko #endif 1014a32efb97SAndrew Rybchenko 1015a32efb97SAndrew Rybchenko 10163c838a9fSAndrew Rybchenko if (tso->fw_assisted && 10173c838a9fSAndrew Rybchenko __predict_false(tso->tcph_off > 10183c838a9fSAndrew Rybchenko encp->enc_tx_tso_tcp_header_offset_limit)) { 10193c838a9fSAndrew Rybchenko tso->fw_assisted = 0; 1020e948693eSPhilip Paeps } 1021e948693eSPhilip Paeps 1022a32efb97SAndrew Rybchenko 1023a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1024b2c43c38SAndrew Rybchenko KASSERT(mbuf->m_len >= tso->tcph_off, 1025b2c43c38SAndrew Rybchenko ("network header is fragmented in mbuf")); 1026b2c43c38SAndrew Rybchenko /* We need TCP header including flags (window is the next) */ 1027b2c43c38SAndrew Rybchenko if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { 1028b2c43c38SAndrew Rybchenko m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), 1029b2c43c38SAndrew Rybchenko (caddr_t)&th_copy); 1030b2c43c38SAndrew Rybchenko th = &th_copy; 1031b2c43c38SAndrew Rybchenko } else { 1032b2c43c38SAndrew Rybchenko th = tso_tcph(tso); 1033b2c43c38SAndrew Rybchenko } 1034b2c43c38SAndrew Rybchenko tso->header_len = tso->tcph_off + 4 * th->th_off; 1035a32efb97SAndrew Rybchenko #else 1036a32efb97SAndrew Rybchenko tso->header_len = mbuf->m_pkthdr.l4hlen; 1037a32efb97SAndrew Rybchenko #endif 1038d0f73877SAndrew Rybchenko tso->seg_size = mbuf->m_pkthdr.tso_segsz; 1039e948693eSPhilip Paeps 1040a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1041b2c43c38SAndrew Rybchenko tso->seqnum = ntohl(th->th_seq); 1042e948693eSPhilip Paeps 1043e948693eSPhilip Paeps /* These flags must not be duplicated */ 10441217b24eSAndrew Rybchenko /* 10451217b24eSAndrew Rybchenko * RST should not be duplicated as well, but FreeBSD kernel 10461217b24eSAndrew Rybchenko * generates TSO packets with RST flag. So, do not assert 10471217b24eSAndrew Rybchenko * its absence. 10481217b24eSAndrew Rybchenko */ 10491217b24eSAndrew Rybchenko KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 10501217b24eSAndrew Rybchenko ("incompatible TCP flag 0x%x on TSO packet", 10511217b24eSAndrew Rybchenko th->th_flags & (TH_URG | TH_SYN))); 1052a32efb97SAndrew Rybchenko tso->tcp_flags = th->th_flags; 1053a32efb97SAndrew Rybchenko #else 1054a32efb97SAndrew Rybchenko tso->seqnum = TSO_MBUF_SEQNUM(mbuf); 1055a32efb97SAndrew Rybchenko tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); 1056a32efb97SAndrew Rybchenko #endif 1057e948693eSPhilip Paeps 1058e948693eSPhilip Paeps tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 10593c838a9fSAndrew Rybchenko 10603c838a9fSAndrew Rybchenko if (tso->fw_assisted) { 10613c838a9fSAndrew Rybchenko if (hdr_dma_seg->ds_len >= tso->header_len) 10623c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 10633c838a9fSAndrew Rybchenko hdr_dma_seg->ds_addr, 10643c838a9fSAndrew Rybchenko tso->header_len, 10653c838a9fSAndrew Rybchenko B_FALSE, 10663c838a9fSAndrew Rybchenko &tso->header_desc); 10673c838a9fSAndrew Rybchenko else 10683c838a9fSAndrew Rybchenko tso->fw_assisted = 0; 10693c838a9fSAndrew Rybchenko } 1070e948693eSPhilip Paeps } 1071e948693eSPhilip Paeps 1072e948693eSPhilip Paeps /* 1073e948693eSPhilip Paeps * tso_fill_packet_with_fragment - form descriptors for the current fragment 1074e948693eSPhilip Paeps * 1075e948693eSPhilip Paeps * Form descriptors for the current fragment, until we reach the end 1076e948693eSPhilip Paeps * of fragment or end-of-packet. Return 0 on success, 1 if not enough 1077e948693eSPhilip Paeps * space. 1078e948693eSPhilip Paeps */ 1079e948693eSPhilip Paeps static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 1080e948693eSPhilip Paeps struct sfxge_tso_state *tso) 1081e948693eSPhilip Paeps { 10823c838a9fSAndrew Rybchenko efx_desc_t *desc; 1083e948693eSPhilip Paeps int n; 1084a45a0da1SAndrew Rybchenko uint64_t dma_addr = tso->dma_addr; 1085a45a0da1SAndrew Rybchenko boolean_t eop; 1086e948693eSPhilip Paeps 1087e948693eSPhilip Paeps if (tso->in_len == 0 || tso->packet_space == 0) 1088e948693eSPhilip Paeps return; 1089e948693eSPhilip Paeps 1090e948693eSPhilip Paeps KASSERT(tso->in_len > 0, ("TSO input length went negative")); 1091e948693eSPhilip Paeps KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 1092e948693eSPhilip Paeps 1093a45a0da1SAndrew Rybchenko if (tso->fw_assisted & SFXGE_FATSOV2) { 1094a45a0da1SAndrew Rybchenko n = tso->in_len; 1095a45a0da1SAndrew Rybchenko tso->out_len -= n; 1096a45a0da1SAndrew Rybchenko tso->seqnum += n; 1097a45a0da1SAndrew Rybchenko tso->in_len = 0; 1098a45a0da1SAndrew Rybchenko if (n < tso->packet_space) { 1099a45a0da1SAndrew Rybchenko tso->packet_space -= n; 1100a45a0da1SAndrew Rybchenko tso->segs_space--; 1101a45a0da1SAndrew Rybchenko } else { 1102a45a0da1SAndrew Rybchenko tso->packet_space = tso->seg_size - 1103a45a0da1SAndrew Rybchenko (n - tso->packet_space) % tso->seg_size; 1104a45a0da1SAndrew Rybchenko tso->segs_space = 1105a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - 1106a45a0da1SAndrew Rybchenko (tso->packet_space != tso->seg_size); 1107a45a0da1SAndrew Rybchenko } 1108a45a0da1SAndrew Rybchenko } else { 1109e948693eSPhilip Paeps n = min(tso->in_len, tso->packet_space); 1110e948693eSPhilip Paeps tso->packet_space -= n; 1111e948693eSPhilip Paeps tso->out_len -= n; 1112a45a0da1SAndrew Rybchenko tso->dma_addr += n; 1113e948693eSPhilip Paeps tso->in_len -= n; 1114a45a0da1SAndrew Rybchenko } 1115a45a0da1SAndrew Rybchenko 1116a45a0da1SAndrew Rybchenko /* 1117a45a0da1SAndrew Rybchenko * It is OK to use binary OR below to avoid extra branching 1118a45a0da1SAndrew Rybchenko * since all conditions may always be checked. 1119a45a0da1SAndrew Rybchenko */ 1120a45a0da1SAndrew Rybchenko eop = (tso->out_len == 0) | (tso->packet_space == 0) | 1121a45a0da1SAndrew Rybchenko (tso->segs_space == 0); 1122e948693eSPhilip Paeps 1123e948693eSPhilip Paeps desc = &txq->pend_desc[txq->n_pend_desc++]; 1124a45a0da1SAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); 1125e948693eSPhilip Paeps } 1126e948693eSPhilip Paeps 1127e948693eSPhilip Paeps /* Callback from bus_dmamap_load() for long TSO headers. */ 1128e948693eSPhilip Paeps static void tso_map_long_header(void *dma_addr_ret, 1129e948693eSPhilip Paeps bus_dma_segment_t *segs, int nseg, 1130e948693eSPhilip Paeps int error) 1131e948693eSPhilip Paeps { 1132e948693eSPhilip Paeps *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 1133e948693eSPhilip Paeps __predict_true(nseg == 1)) ? 1134e948693eSPhilip Paeps segs->ds_addr : 0); 1135e948693eSPhilip Paeps } 1136e948693eSPhilip Paeps 1137e948693eSPhilip Paeps /* 1138e948693eSPhilip Paeps * tso_start_new_packet - generate a new header and prepare for the new packet 1139e948693eSPhilip Paeps * 1140e948693eSPhilip Paeps * Generate a new header and prepare for the new packet. Return 0 on 1141e948693eSPhilip Paeps * success, or an error code if failed to alloc header. 1142e948693eSPhilip Paeps */ 1143e948693eSPhilip Paeps static int tso_start_new_packet(struct sfxge_txq *txq, 1144e948693eSPhilip Paeps struct sfxge_tso_state *tso, 11453c838a9fSAndrew Rybchenko unsigned int *idp) 1146e948693eSPhilip Paeps { 11473c838a9fSAndrew Rybchenko unsigned int id = *idp; 1148e948693eSPhilip Paeps struct tcphdr *tsoh_th; 1149e948693eSPhilip Paeps unsigned ip_length; 1150e948693eSPhilip Paeps caddr_t header; 1151e948693eSPhilip Paeps uint64_t dma_addr; 1152e948693eSPhilip Paeps bus_dmamap_t map; 11533c838a9fSAndrew Rybchenko efx_desc_t *desc; 1154e948693eSPhilip Paeps int rc; 1155e948693eSPhilip Paeps 11563c838a9fSAndrew Rybchenko if (tso->fw_assisted) { 1157a45a0da1SAndrew Rybchenko if (tso->fw_assisted & SFXGE_FATSOV2) { 1158a45a0da1SAndrew Rybchenko /* Add 2 FATSOv2 option descriptors */ 1159a45a0da1SAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc]; 1160a45a0da1SAndrew Rybchenko efx_tx_qdesc_tso2_create(txq->common, 1161a45a0da1SAndrew Rybchenko tso->packet_id, 1162a45a0da1SAndrew Rybchenko tso->seqnum, 1163a45a0da1SAndrew Rybchenko tso->seg_size, 1164a45a0da1SAndrew Rybchenko desc, 1165a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_OPT_NDESCS); 1166a45a0da1SAndrew Rybchenko desc += EFX_TX_FATSOV2_OPT_NDESCS; 1167a45a0da1SAndrew Rybchenko txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; 1168a45a0da1SAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1169a45a0da1SAndrew Rybchenko id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; 1170a45a0da1SAndrew Rybchenko 1171a45a0da1SAndrew Rybchenko tso->segs_space = 1172a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; 1173a45a0da1SAndrew Rybchenko } else { 1174a32efb97SAndrew Rybchenko uint8_t tcp_flags = tso->tcp_flags; 11753c838a9fSAndrew Rybchenko 11763c838a9fSAndrew Rybchenko if (tso->out_len > tso->seg_size) 11773c838a9fSAndrew Rybchenko tcp_flags &= ~(TH_FIN | TH_PUSH); 11783c838a9fSAndrew Rybchenko 1179a45a0da1SAndrew Rybchenko /* Add FATSOv1 option descriptor */ 11803c838a9fSAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc++]; 11813c838a9fSAndrew Rybchenko efx_tx_qdesc_tso_create(txq->common, 11823c838a9fSAndrew Rybchenko tso->packet_id, 11833c838a9fSAndrew Rybchenko tso->seqnum, 11843c838a9fSAndrew Rybchenko tcp_flags, 11853c838a9fSAndrew Rybchenko desc++); 11863c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 11873c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 11883c838a9fSAndrew Rybchenko 1189a45a0da1SAndrew Rybchenko tso->seqnum += tso->seg_size; 1190a45a0da1SAndrew Rybchenko tso->segs_space = UINT_MAX; 1191a45a0da1SAndrew Rybchenko } 1192a45a0da1SAndrew Rybchenko 11933c838a9fSAndrew Rybchenko /* Header DMA descriptor */ 11943c838a9fSAndrew Rybchenko *desc = tso->header_desc; 11953c838a9fSAndrew Rybchenko txq->n_pend_desc++; 11963c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 11973c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 11983c838a9fSAndrew Rybchenko } else { 1199e948693eSPhilip Paeps /* Allocate a DMA-mapped header buffer. */ 1200e948693eSPhilip Paeps if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 1201e948693eSPhilip Paeps unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 1202e948693eSPhilip Paeps unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 1203e948693eSPhilip Paeps 1204e948693eSPhilip Paeps header = (txq->tsoh_buffer[page_index].esm_base + 1205e948693eSPhilip Paeps buf_index * TSOH_STD_SIZE); 1206e948693eSPhilip Paeps dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 1207e948693eSPhilip Paeps buf_index * TSOH_STD_SIZE); 1208e948693eSPhilip Paeps map = txq->tsoh_buffer[page_index].esm_map; 1209e948693eSPhilip Paeps 12103c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, 12113c838a9fSAndrew Rybchenko ("stmp flags are not 0")); 1212e948693eSPhilip Paeps } else { 12133c838a9fSAndrew Rybchenko struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 12143c838a9fSAndrew Rybchenko 1215e948693eSPhilip Paeps /* We cannot use bus_dmamem_alloc() as that may sleep */ 1216e948693eSPhilip Paeps header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 1217e948693eSPhilip Paeps if (__predict_false(!header)) 1218b7b0edd1SGeorge V. Neville-Neil return (ENOMEM); 1219e948693eSPhilip Paeps rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 1220e948693eSPhilip Paeps header, tso->header_len, 1221e948693eSPhilip Paeps tso_map_long_header, &dma_addr, 1222e948693eSPhilip Paeps BUS_DMA_NOWAIT); 1223e948693eSPhilip Paeps if (__predict_false(dma_addr == 0)) { 1224e948693eSPhilip Paeps if (rc == 0) { 1225e948693eSPhilip Paeps /* Succeeded but got >1 segment */ 1226e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, 1227e948693eSPhilip Paeps stmp->map); 1228e948693eSPhilip Paeps rc = EINVAL; 1229e948693eSPhilip Paeps } 1230e948693eSPhilip Paeps free(header, M_SFXGE); 1231b7b0edd1SGeorge V. Neville-Neil return (rc); 1232e948693eSPhilip Paeps } 1233e948693eSPhilip Paeps map = stmp->map; 1234e948693eSPhilip Paeps 1235e948693eSPhilip Paeps txq->tso_long_headers++; 1236e948693eSPhilip Paeps stmp->u.heap_buf = header; 1237e948693eSPhilip Paeps stmp->flags = TX_BUF_UNMAP; 1238e948693eSPhilip Paeps } 1239e948693eSPhilip Paeps 1240e948693eSPhilip Paeps tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1241e948693eSPhilip Paeps 1242e948693eSPhilip Paeps /* Copy and update the headers. */ 1243a35485aaSAndrew Rybchenko m_copydata(tso->mbuf, 0, tso->header_len, header); 1244e948693eSPhilip Paeps 1245e948693eSPhilip Paeps tsoh_th->th_seq = htonl(tso->seqnum); 1246d0f73877SAndrew Rybchenko tso->seqnum += tso->seg_size; 1247d0f73877SAndrew Rybchenko if (tso->out_len > tso->seg_size) { 1248e948693eSPhilip Paeps /* This packet will not finish the TSO burst. */ 1249d0f73877SAndrew Rybchenko ip_length = tso->header_len - tso->nh_off + tso->seg_size; 1250e948693eSPhilip Paeps tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1251e948693eSPhilip Paeps } else { 1252e948693eSPhilip Paeps /* This packet will be the last in the TSO burst. */ 1253e948693eSPhilip Paeps ip_length = tso->header_len - tso->nh_off + tso->out_len; 1254e948693eSPhilip Paeps } 1255e948693eSPhilip Paeps 1256e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_IP)) { 1257e948693eSPhilip Paeps struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1258e948693eSPhilip Paeps tsoh_iph->ip_len = htons(ip_length); 1259e948693eSPhilip Paeps /* XXX We should increment ip_id, but FreeBSD doesn't 1260e948693eSPhilip Paeps * currently allocate extra IDs for multiple segments. 1261e948693eSPhilip Paeps */ 1262e948693eSPhilip Paeps } else { 1263e948693eSPhilip Paeps struct ip6_hdr *tsoh_iph = 1264e948693eSPhilip Paeps (struct ip6_hdr *)(header + tso->nh_off); 1265e948693eSPhilip Paeps tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1266e948693eSPhilip Paeps } 1267e948693eSPhilip Paeps 1268e948693eSPhilip Paeps /* Make the header visible to the hardware. */ 1269e948693eSPhilip Paeps bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1270e948693eSPhilip Paeps 1271e948693eSPhilip Paeps /* Form a descriptor for this header. */ 1272e948693eSPhilip Paeps desc = &txq->pend_desc[txq->n_pend_desc++]; 12733c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 12743c838a9fSAndrew Rybchenko dma_addr, 12753c838a9fSAndrew Rybchenko tso->header_len, 12763c838a9fSAndrew Rybchenko 0, 12773c838a9fSAndrew Rybchenko desc); 12783c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 1279a45a0da1SAndrew Rybchenko 1280a45a0da1SAndrew Rybchenko tso->segs_space = UINT_MAX; 12813c838a9fSAndrew Rybchenko } 12823c838a9fSAndrew Rybchenko tso->packet_space = tso->seg_size; 12833c838a9fSAndrew Rybchenko txq->tso_packets++; 12843c838a9fSAndrew Rybchenko *idp = id; 1285e948693eSPhilip Paeps 1286b7b0edd1SGeorge V. Neville-Neil return (0); 1287e948693eSPhilip Paeps } 1288e948693eSPhilip Paeps 1289e948693eSPhilip Paeps static int 1290e948693eSPhilip Paeps sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 12913c838a9fSAndrew Rybchenko const bus_dma_segment_t *dma_seg, int n_dma_seg, 12923c838a9fSAndrew Rybchenko int vlan_tagged) 1293e948693eSPhilip Paeps { 1294e948693eSPhilip Paeps struct sfxge_tso_state tso; 12953c838a9fSAndrew Rybchenko unsigned int id; 1296a35485aaSAndrew Rybchenko unsigned skipped = 0; 1297e948693eSPhilip Paeps 12983c838a9fSAndrew Rybchenko tso_start(txq, &tso, dma_seg, mbuf); 1299e948693eSPhilip Paeps 1300a35485aaSAndrew Rybchenko while (dma_seg->ds_len + skipped <= tso.header_len) { 1301a35485aaSAndrew Rybchenko skipped += dma_seg->ds_len; 1302e948693eSPhilip Paeps --n_dma_seg; 1303e948693eSPhilip Paeps KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1304e948693eSPhilip Paeps ++dma_seg; 1305e948693eSPhilip Paeps } 1306cfaf34ffSAndrew Rybchenko tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); 1307a35485aaSAndrew Rybchenko tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1308e948693eSPhilip Paeps 13093c838a9fSAndrew Rybchenko id = (txq->added + vlan_tagged) & txq->ptr_mask; 13103c838a9fSAndrew Rybchenko if (__predict_false(tso_start_new_packet(txq, &tso, &id))) 1311385b1d8eSGeorge V. Neville-Neil return (-1); 1312e948693eSPhilip Paeps 1313e948693eSPhilip Paeps while (1) { 1314e948693eSPhilip Paeps tso_fill_packet_with_fragment(txq, &tso); 13153c838a9fSAndrew Rybchenko /* Exactly one DMA descriptor is added */ 13163c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 13173c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 1318e948693eSPhilip Paeps 1319e948693eSPhilip Paeps /* Move onto the next fragment? */ 1320e948693eSPhilip Paeps if (tso.in_len == 0) { 1321e948693eSPhilip Paeps --n_dma_seg; 1322e948693eSPhilip Paeps if (n_dma_seg == 0) 1323e948693eSPhilip Paeps break; 1324e948693eSPhilip Paeps ++dma_seg; 1325e948693eSPhilip Paeps tso.in_len = dma_seg->ds_len; 1326e948693eSPhilip Paeps tso.dma_addr = dma_seg->ds_addr; 1327e948693eSPhilip Paeps } 1328e948693eSPhilip Paeps 1329e948693eSPhilip Paeps /* End of packet? */ 1330a45a0da1SAndrew Rybchenko if ((tso.packet_space == 0) | (tso.segs_space == 0)) { 1331a45a0da1SAndrew Rybchenko unsigned int n_fatso_opt_desc = 1332a45a0da1SAndrew Rybchenko (tso.fw_assisted & SFXGE_FATSOV2) ? 1333a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_OPT_NDESCS : 1334a45a0da1SAndrew Rybchenko (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; 1335a45a0da1SAndrew Rybchenko 1336e948693eSPhilip Paeps /* If the queue is now full due to tiny MSS, 1337e948693eSPhilip Paeps * or we can't create another header, discard 1338e948693eSPhilip Paeps * the remainder of the input mbuf but do not 1339e948693eSPhilip Paeps * roll back the work we have done. 1340e948693eSPhilip Paeps */ 1341a45a0da1SAndrew Rybchenko if (txq->n_pend_desc + n_fatso_opt_desc + 1342a45a0da1SAndrew Rybchenko 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { 1343e1a3d10eSAndrew Rybchenko txq->tso_pdrop_too_many++; 1344e948693eSPhilip Paeps break; 1345e1a3d10eSAndrew Rybchenko } 1346e948693eSPhilip Paeps if (__predict_false(tso_start_new_packet(txq, &tso, 13473c838a9fSAndrew Rybchenko &id))) { 1348e1a3d10eSAndrew Rybchenko txq->tso_pdrop_no_rsrc++; 1349e948693eSPhilip Paeps break; 1350e1a3d10eSAndrew Rybchenko } 1351e948693eSPhilip Paeps } 1352e948693eSPhilip Paeps } 1353e948693eSPhilip Paeps 1354e948693eSPhilip Paeps txq->tso_bursts++; 1355b7b0edd1SGeorge V. Neville-Neil return (id); 1356e948693eSPhilip Paeps } 1357e948693eSPhilip Paeps 1358e948693eSPhilip Paeps static void 1359e948693eSPhilip Paeps sfxge_tx_qunblock(struct sfxge_txq *txq) 1360e948693eSPhilip Paeps { 1361e948693eSPhilip Paeps struct sfxge_softc *sc; 1362e948693eSPhilip Paeps struct sfxge_evq *evq; 1363e948693eSPhilip Paeps 1364e948693eSPhilip Paeps sc = txq->sc; 1365e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1366e948693eSPhilip Paeps 1367763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1368e948693eSPhilip Paeps 1369851128b8SAndrew Rybchenko if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) 1370e948693eSPhilip Paeps return; 1371e948693eSPhilip Paeps 1372763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1373e948693eSPhilip Paeps 1374e948693eSPhilip Paeps if (txq->blocked) { 1375e948693eSPhilip Paeps unsigned int level; 1376e948693eSPhilip Paeps 1377e948693eSPhilip Paeps level = txq->added - txq->completed; 13786d73545eSAndrew Rybchenko if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { 13796d73545eSAndrew Rybchenko /* reaped must be in sync with blocked */ 13806d73545eSAndrew Rybchenko sfxge_tx_qreap(txq); 1381e948693eSPhilip Paeps txq->blocked = 0; 1382e948693eSPhilip Paeps } 13836d73545eSAndrew Rybchenko } 1384e948693eSPhilip Paeps 1385e948693eSPhilip Paeps sfxge_tx_qdpl_service(txq); 1386e948693eSPhilip Paeps /* note: lock has been dropped */ 1387e948693eSPhilip Paeps } 1388e948693eSPhilip Paeps 1389e948693eSPhilip Paeps void 1390e948693eSPhilip Paeps sfxge_tx_qflush_done(struct sfxge_txq *txq) 1391e948693eSPhilip Paeps { 1392e948693eSPhilip Paeps 1393e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_DONE; 1394e948693eSPhilip Paeps } 1395e948693eSPhilip Paeps 1396e948693eSPhilip Paeps static void 1397e948693eSPhilip Paeps sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1398e948693eSPhilip Paeps { 1399e948693eSPhilip Paeps struct sfxge_txq *txq; 1400e948693eSPhilip Paeps struct sfxge_evq *evq; 1401e948693eSPhilip Paeps unsigned int count; 1402e948693eSPhilip Paeps 14033c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 14043c838a9fSAndrew Rybchenko 1405e948693eSPhilip Paeps txq = sc->txq[index]; 1406e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1407e948693eSPhilip Paeps 14083c838a9fSAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1409763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1410e948693eSPhilip Paeps 1411e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1412e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_STARTED")); 1413e948693eSPhilip Paeps 1414e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_INITIALIZED; 14153c838a9fSAndrew Rybchenko 14163c838a9fSAndrew Rybchenko if (txq->flush_state != SFXGE_FLUSH_DONE) { 1417e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_PENDING; 1418e948693eSPhilip Paeps 14193c838a9fSAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1420763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1421e948693eSPhilip Paeps 14223c838a9fSAndrew Rybchenko /* Flush the transmit queue. */ 14233c838a9fSAndrew Rybchenko if (efx_tx_qflush(txq->common) != 0) { 14243c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", 14253c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 14263c838a9fSAndrew Rybchenko txq->flush_state = SFXGE_FLUSH_DONE; 14273c838a9fSAndrew Rybchenko } else { 1428e948693eSPhilip Paeps count = 0; 1429e948693eSPhilip Paeps do { 1430e948693eSPhilip Paeps /* Spin for 100ms. */ 1431e948693eSPhilip Paeps DELAY(100000); 1432e948693eSPhilip Paeps if (txq->flush_state != SFXGE_FLUSH_PENDING) 1433e948693eSPhilip Paeps break; 1434e948693eSPhilip Paeps } while (++count < 20); 14353c838a9fSAndrew Rybchenko } 1436763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1437763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1438e948693eSPhilip Paeps 1439e948693eSPhilip Paeps KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1440e948693eSPhilip Paeps ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1441e948693eSPhilip Paeps 14423c838a9fSAndrew Rybchenko if (txq->flush_state != SFXGE_FLUSH_DONE) { 14433c838a9fSAndrew Rybchenko /* Flush timeout */ 14443c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", 14453c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 1446e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_DONE; 14473c838a9fSAndrew Rybchenko } 14483c838a9fSAndrew Rybchenko } 1449e948693eSPhilip Paeps 1450e948693eSPhilip Paeps txq->blocked = 0; 1451e948693eSPhilip Paeps txq->pending = txq->added; 1452e948693eSPhilip Paeps 1453cc933626SAndrew Rybchenko sfxge_tx_qcomplete(txq, evq); 1454e948693eSPhilip Paeps KASSERT(txq->completed == txq->added, 1455e948693eSPhilip Paeps ("txq->completed != txq->added")); 1456e948693eSPhilip Paeps 1457e948693eSPhilip Paeps sfxge_tx_qreap(txq); 1458e948693eSPhilip Paeps KASSERT(txq->reaped == txq->completed, 1459e948693eSPhilip Paeps ("txq->reaped != txq->completed")); 1460e948693eSPhilip Paeps 1461e948693eSPhilip Paeps txq->added = 0; 1462e948693eSPhilip Paeps txq->pending = 0; 1463e948693eSPhilip Paeps txq->completed = 0; 1464e948693eSPhilip Paeps txq->reaped = 0; 1465e948693eSPhilip Paeps 1466e948693eSPhilip Paeps /* Destroy the common code transmit queue. */ 1467e948693eSPhilip Paeps efx_tx_qdestroy(txq->common); 1468e948693eSPhilip Paeps txq->common = NULL; 1469e948693eSPhilip Paeps 1470e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1471385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries)); 1472e948693eSPhilip Paeps 1473763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1474763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1475e948693eSPhilip Paeps } 1476e948693eSPhilip Paeps 1477a45a0da1SAndrew Rybchenko /* 1478a45a0da1SAndrew Rybchenko * Estimate maximum number of Tx descriptors required for TSO packet. 1479a45a0da1SAndrew Rybchenko * With minimum MSS and maximum mbuf length we might need more (even 1480a45a0da1SAndrew Rybchenko * than a ring-ful of descriptors), but this should not happen in 1481a45a0da1SAndrew Rybchenko * practice except due to deliberate attack. In that case we will 1482a45a0da1SAndrew Rybchenko * truncate the output at a packet boundary. 1483a45a0da1SAndrew Rybchenko */ 1484a45a0da1SAndrew Rybchenko static unsigned int 1485a45a0da1SAndrew Rybchenko sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, 1486a45a0da1SAndrew Rybchenko unsigned int tso_fw_assisted) 1487a45a0da1SAndrew Rybchenko { 1488a45a0da1SAndrew Rybchenko /* One descriptor for every input fragment */ 1489a45a0da1SAndrew Rybchenko unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; 1490a45a0da1SAndrew Rybchenko unsigned int sw_tso_max_descs; 1491a45a0da1SAndrew Rybchenko unsigned int fa_tso_v1_max_descs = 0; 1492a45a0da1SAndrew Rybchenko unsigned int fa_tso_v2_max_descs = 0; 1493a45a0da1SAndrew Rybchenko 1494a45a0da1SAndrew Rybchenko /* VLAN tagging Tx option descriptor may be required */ 1495a45a0da1SAndrew Rybchenko if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) 1496a45a0da1SAndrew Rybchenko max_descs++; 1497a45a0da1SAndrew Rybchenko 1498a45a0da1SAndrew Rybchenko if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { 1499a45a0da1SAndrew Rybchenko /* 1500a45a0da1SAndrew Rybchenko * Plus header and payload descriptor for each output segment. 1501a45a0da1SAndrew Rybchenko * Minus one since header fragment is already counted. 1502a45a0da1SAndrew Rybchenko * Even if FATSO is used, we should be ready to fallback 1503a45a0da1SAndrew Rybchenko * to do it in the driver. 1504a45a0da1SAndrew Rybchenko */ 1505a45a0da1SAndrew Rybchenko sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; 1506a45a0da1SAndrew Rybchenko 1507a45a0da1SAndrew Rybchenko /* FW assisted TSOv1 requires one more descriptor per segment 1508a45a0da1SAndrew Rybchenko * in comparison to SW TSO */ 1509a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV1) 1510a45a0da1SAndrew Rybchenko fa_tso_v1_max_descs = 1511a45a0da1SAndrew Rybchenko sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; 1512a45a0da1SAndrew Rybchenko 1513a45a0da1SAndrew Rybchenko /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra 1514a45a0da1SAndrew Rybchenko * descriptors per superframe limited by number of DMA fetches 1515a45a0da1SAndrew Rybchenko * per packet. The first packet header is already counted. 1516a45a0da1SAndrew Rybchenko */ 1517a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV2) { 1518a45a0da1SAndrew Rybchenko fa_tso_v2_max_descs = 1519a45a0da1SAndrew Rybchenko howmany(SFXGE_TX_MAPPING_MAX_SEG, 1520a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * 1521a45a0da1SAndrew Rybchenko (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; 1522a45a0da1SAndrew Rybchenko } 1523a45a0da1SAndrew Rybchenko 1524a45a0da1SAndrew Rybchenko max_descs += MAX(sw_tso_max_descs, 1525a45a0da1SAndrew Rybchenko MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); 1526a45a0da1SAndrew Rybchenko } 1527a45a0da1SAndrew Rybchenko 1528a45a0da1SAndrew Rybchenko return (max_descs); 1529a45a0da1SAndrew Rybchenko } 1530a45a0da1SAndrew Rybchenko 1531e948693eSPhilip Paeps static int 1532e948693eSPhilip Paeps sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1533e948693eSPhilip Paeps { 1534e948693eSPhilip Paeps struct sfxge_txq *txq; 1535e948693eSPhilip Paeps efsys_mem_t *esmp; 1536e948693eSPhilip Paeps uint16_t flags; 1537a45a0da1SAndrew Rybchenko unsigned int tso_fw_assisted; 1538e948693eSPhilip Paeps struct sfxge_evq *evq; 15393c838a9fSAndrew Rybchenko unsigned int desc_index; 1540e948693eSPhilip Paeps int rc; 1541e948693eSPhilip Paeps 15423c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 15433c838a9fSAndrew Rybchenko 1544e948693eSPhilip Paeps txq = sc->txq[index]; 1545e948693eSPhilip Paeps esmp = &txq->mem; 1546e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1547e948693eSPhilip Paeps 1548e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1549e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1550e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1551e948693eSPhilip Paeps ("evq->init_state != SFXGE_EVQ_STARTED")); 1552e948693eSPhilip Paeps 1553e948693eSPhilip Paeps /* Program the buffer table. */ 1554e948693eSPhilip Paeps if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1555385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1556385b1d8eSGeorge V. Neville-Neil return (rc); 1557e948693eSPhilip Paeps 1558e948693eSPhilip Paeps /* Determine the kind of queue we are creating. */ 1559a45a0da1SAndrew Rybchenko tso_fw_assisted = 0; 1560e948693eSPhilip Paeps switch (txq->type) { 1561e948693eSPhilip Paeps case SFXGE_TXQ_NON_CKSUM: 1562e948693eSPhilip Paeps flags = 0; 1563e948693eSPhilip Paeps break; 1564e948693eSPhilip Paeps case SFXGE_TXQ_IP_CKSUM: 15659dd0e15fSAndrew Rybchenko flags = EFX_TXQ_CKSUM_IPV4; 1566e948693eSPhilip Paeps break; 1567e948693eSPhilip Paeps case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 15689dd0e15fSAndrew Rybchenko flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 1569a45a0da1SAndrew Rybchenko tso_fw_assisted = sc->tso_fw_assisted; 1570a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV2) 1571a45a0da1SAndrew Rybchenko flags |= EFX_TXQ_FATSOV2; 1572e948693eSPhilip Paeps break; 1573e948693eSPhilip Paeps default: 1574e948693eSPhilip Paeps KASSERT(0, ("Impossible TX queue")); 1575e948693eSPhilip Paeps flags = 0; 1576e948693eSPhilip Paeps break; 1577e948693eSPhilip Paeps } 1578e948693eSPhilip Paeps 1579e948693eSPhilip Paeps /* Create the common code transmit queue. */ 1580cf07c70dSGeorge V. Neville-Neil if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1581385b1d8eSGeorge V. Neville-Neil sc->txq_entries, txq->buf_base_id, flags, evq->common, 1582a45a0da1SAndrew Rybchenko &txq->common, &desc_index)) != 0) { 1583a45a0da1SAndrew Rybchenko /* Retry if no FATSOv2 resources, otherwise fail */ 1584a45a0da1SAndrew Rybchenko if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) 1585a45a0da1SAndrew Rybchenko goto fail; 1586a45a0da1SAndrew Rybchenko 1587a45a0da1SAndrew Rybchenko /* Looks like all FATSOv2 contexts are used */ 1588a45a0da1SAndrew Rybchenko flags &= ~EFX_TXQ_FATSOV2; 1589a45a0da1SAndrew Rybchenko tso_fw_assisted &= ~SFXGE_FATSOV2; 1590a45a0da1SAndrew Rybchenko if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1591a45a0da1SAndrew Rybchenko sc->txq_entries, txq->buf_base_id, flags, evq->common, 15923c838a9fSAndrew Rybchenko &txq->common, &desc_index)) != 0) 1593e948693eSPhilip Paeps goto fail; 1594a45a0da1SAndrew Rybchenko } 1595e948693eSPhilip Paeps 15963c838a9fSAndrew Rybchenko /* Initialise queue descriptor indexes */ 15973c838a9fSAndrew Rybchenko txq->added = txq->pending = txq->completed = txq->reaped = desc_index; 15983c838a9fSAndrew Rybchenko 1599763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1600e948693eSPhilip Paeps 1601e948693eSPhilip Paeps /* Enable the transmit queue. */ 1602e948693eSPhilip Paeps efx_tx_qenable(txq->common); 1603e948693eSPhilip Paeps 1604e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_STARTED; 16053c838a9fSAndrew Rybchenko txq->flush_state = SFXGE_FLUSH_REQUIRED; 1606a45a0da1SAndrew Rybchenko txq->tso_fw_assisted = tso_fw_assisted; 1607a45a0da1SAndrew Rybchenko 1608a45a0da1SAndrew Rybchenko txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, 1609a45a0da1SAndrew Rybchenko tso_fw_assisted); 1610e948693eSPhilip Paeps 1611763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1612e948693eSPhilip Paeps 1613e948693eSPhilip Paeps return (0); 1614e948693eSPhilip Paeps 1615e948693eSPhilip Paeps fail: 1616e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1617385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries)); 1618385b1d8eSGeorge V. Neville-Neil return (rc); 1619e948693eSPhilip Paeps } 1620e948693eSPhilip Paeps 1621e948693eSPhilip Paeps void 1622e948693eSPhilip Paeps sfxge_tx_stop(struct sfxge_softc *sc) 1623e948693eSPhilip Paeps { 1624e948693eSPhilip Paeps int index; 1625e948693eSPhilip Paeps 1626e2b05fe2SAndrew Rybchenko index = sc->txq_count; 1627e948693eSPhilip Paeps while (--index >= 0) 1628e2b05fe2SAndrew Rybchenko sfxge_tx_qstop(sc, index); 1629e948693eSPhilip Paeps 1630e948693eSPhilip Paeps /* Tear down the transmit module */ 1631e948693eSPhilip Paeps efx_tx_fini(sc->enp); 1632e948693eSPhilip Paeps } 1633e948693eSPhilip Paeps 1634e948693eSPhilip Paeps int 1635e948693eSPhilip Paeps sfxge_tx_start(struct sfxge_softc *sc) 1636e948693eSPhilip Paeps { 1637e948693eSPhilip Paeps int index; 1638e948693eSPhilip Paeps int rc; 1639e948693eSPhilip Paeps 1640e948693eSPhilip Paeps /* Initialize the common code transmit module. */ 1641e948693eSPhilip Paeps if ((rc = efx_tx_init(sc->enp)) != 0) 1642e948693eSPhilip Paeps return (rc); 1643e948693eSPhilip Paeps 1644e2b05fe2SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) { 1645e2b05fe2SAndrew Rybchenko if ((rc = sfxge_tx_qstart(sc, index)) != 0) 1646e948693eSPhilip Paeps goto fail; 1647e948693eSPhilip Paeps } 1648e948693eSPhilip Paeps 1649e948693eSPhilip Paeps return (0); 1650e948693eSPhilip Paeps 1651e948693eSPhilip Paeps fail: 1652e2b05fe2SAndrew Rybchenko while (--index >= 0) 1653e2b05fe2SAndrew Rybchenko sfxge_tx_qstop(sc, index); 1654e2b05fe2SAndrew Rybchenko 1655e948693eSPhilip Paeps efx_tx_fini(sc->enp); 1656e948693eSPhilip Paeps 1657e948693eSPhilip Paeps return (rc); 1658e948693eSPhilip Paeps } 1659e948693eSPhilip Paeps 1660f6222d7bSAndrew Rybchenko static int 1661f6222d7bSAndrew Rybchenko sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) 1662f6222d7bSAndrew Rybchenko { 1663f6222d7bSAndrew Rybchenko struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); 1664f6222d7bSAndrew Rybchenko struct sysctl_oid *stat_node; 1665f6222d7bSAndrew Rybchenko unsigned int id; 1666f6222d7bSAndrew Rybchenko 1667f6222d7bSAndrew Rybchenko stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1668f6222d7bSAndrew Rybchenko "stats", CTLFLAG_RD, NULL, 1669f6222d7bSAndrew Rybchenko "Tx queue statistics"); 1670f6222d7bSAndrew Rybchenko if (stat_node == NULL) 1671f6222d7bSAndrew Rybchenko return (ENOMEM); 1672f6222d7bSAndrew Rybchenko 1673f6222d7bSAndrew Rybchenko for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1674f6222d7bSAndrew Rybchenko SYSCTL_ADD_ULONG( 1675f6222d7bSAndrew Rybchenko ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, 1676f6222d7bSAndrew Rybchenko sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, 1677f6222d7bSAndrew Rybchenko (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), 1678f6222d7bSAndrew Rybchenko ""); 1679f6222d7bSAndrew Rybchenko } 1680f6222d7bSAndrew Rybchenko 1681f6222d7bSAndrew Rybchenko return (0); 1682f6222d7bSAndrew Rybchenko } 1683f6222d7bSAndrew Rybchenko 1684e948693eSPhilip Paeps /** 1685e948693eSPhilip Paeps * Destroy a transmit queue. 1686e948693eSPhilip Paeps */ 1687e948693eSPhilip Paeps static void 1688e948693eSPhilip Paeps sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1689e948693eSPhilip Paeps { 1690e948693eSPhilip Paeps struct sfxge_txq *txq; 1691385b1d8eSGeorge V. Neville-Neil unsigned int nmaps; 1692e948693eSPhilip Paeps 1693e948693eSPhilip Paeps txq = sc->txq[index]; 1694e948693eSPhilip Paeps 1695e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1696e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1697e948693eSPhilip Paeps 1698e948693eSPhilip Paeps if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1699e948693eSPhilip Paeps tso_fini(txq); 1700e948693eSPhilip Paeps 1701e948693eSPhilip Paeps /* Free the context arrays. */ 1702e948693eSPhilip Paeps free(txq->pend_desc, M_SFXGE); 1703385b1d8eSGeorge V. Neville-Neil nmaps = sc->txq_entries; 1704b7b0edd1SGeorge V. Neville-Neil while (nmaps-- != 0) 1705e948693eSPhilip Paeps bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1706e948693eSPhilip Paeps free(txq->stmp, M_SFXGE); 1707e948693eSPhilip Paeps 1708e948693eSPhilip Paeps /* Release DMA memory mapping. */ 1709e948693eSPhilip Paeps sfxge_dma_free(&txq->mem); 1710e948693eSPhilip Paeps 1711e948693eSPhilip Paeps sc->txq[index] = NULL; 1712e948693eSPhilip Paeps 1713763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_DESTROY(txq); 1714e948693eSPhilip Paeps 1715e948693eSPhilip Paeps free(txq, M_SFXGE); 1716e948693eSPhilip Paeps } 1717e948693eSPhilip Paeps 1718e948693eSPhilip Paeps static int 1719e948693eSPhilip Paeps sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1720e948693eSPhilip Paeps enum sfxge_txq_type type, unsigned int evq_index) 1721e948693eSPhilip Paeps { 1722bc85c897SGeorge V. Neville-Neil char name[16]; 172395caaf0fSAndrew Rybchenko struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1724bc85c897SGeorge V. Neville-Neil struct sysctl_oid *txq_node; 1725e948693eSPhilip Paeps struct sfxge_txq *txq; 1726e948693eSPhilip Paeps struct sfxge_evq *evq; 1727e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 172895caaf0fSAndrew Rybchenko struct sysctl_oid *dpl_node; 1729e948693eSPhilip Paeps efsys_mem_t *esmp; 1730e948693eSPhilip Paeps unsigned int nmaps; 1731e948693eSPhilip Paeps int rc; 1732e948693eSPhilip Paeps 1733e948693eSPhilip Paeps txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1734e948693eSPhilip Paeps txq->sc = sc; 1735385b1d8eSGeorge V. Neville-Neil txq->entries = sc->txq_entries; 1736385b1d8eSGeorge V. Neville-Neil txq->ptr_mask = txq->entries - 1; 1737e948693eSPhilip Paeps 1738e948693eSPhilip Paeps sc->txq[txq_index] = txq; 1739e948693eSPhilip Paeps esmp = &txq->mem; 1740e948693eSPhilip Paeps 1741e948693eSPhilip Paeps evq = sc->evq[evq_index]; 1742e948693eSPhilip Paeps 1743e948693eSPhilip Paeps /* Allocate and zero DMA space for the descriptor ring. */ 1744385b1d8eSGeorge V. Neville-Neil if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1745e948693eSPhilip Paeps return (rc); 1746e948693eSPhilip Paeps 1747e948693eSPhilip Paeps /* Allocate buffer table entries. */ 1748385b1d8eSGeorge V. Neville-Neil sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1749e948693eSPhilip Paeps &txq->buf_base_id); 1750e948693eSPhilip Paeps 1751e948693eSPhilip Paeps /* Create a DMA tag for packet mappings. */ 1752fb8ccc78SMarius Strobl if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, 1753fb8ccc78SMarius Strobl MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1754fb8ccc78SMarius Strobl NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, 1755e948693eSPhilip Paeps &txq->packet_dma_tag) != 0) { 1756e948693eSPhilip Paeps device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1757e948693eSPhilip Paeps rc = ENOMEM; 1758e948693eSPhilip Paeps goto fail; 1759e948693eSPhilip Paeps } 1760e948693eSPhilip Paeps 1761e948693eSPhilip Paeps /* Allocate pending descriptor array for batching writes. */ 17623c838a9fSAndrew Rybchenko txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, 1763e948693eSPhilip Paeps M_SFXGE, M_ZERO | M_WAITOK); 1764e948693eSPhilip Paeps 1765e948693eSPhilip Paeps /* Allocate and initialise mbuf DMA mapping array. */ 1766385b1d8eSGeorge V. Neville-Neil txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1767e948693eSPhilip Paeps M_SFXGE, M_ZERO | M_WAITOK); 1768385b1d8eSGeorge V. Neville-Neil for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1769e948693eSPhilip Paeps rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1770e948693eSPhilip Paeps &txq->stmp[nmaps].map); 1771e948693eSPhilip Paeps if (rc != 0) 1772e948693eSPhilip Paeps goto fail2; 1773e948693eSPhilip Paeps } 1774e948693eSPhilip Paeps 1775bc85c897SGeorge V. Neville-Neil snprintf(name, sizeof(name), "%u", txq_index); 177695caaf0fSAndrew Rybchenko txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), 1777bc85c897SGeorge V. Neville-Neil OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1778bc85c897SGeorge V. Neville-Neil if (txq_node == NULL) { 1779bc85c897SGeorge V. Neville-Neil rc = ENOMEM; 1780bc85c897SGeorge V. Neville-Neil goto fail_txq_node; 1781bc85c897SGeorge V. Neville-Neil } 1782bc85c897SGeorge V. Neville-Neil 1783e948693eSPhilip Paeps if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1784e948693eSPhilip Paeps (rc = tso_init(txq)) != 0) 1785e948693eSPhilip Paeps goto fail3; 1786e948693eSPhilip Paeps 1787060a95efSGeorge V. Neville-Neil if (sfxge_tx_dpl_get_max <= 0) { 1788060a95efSGeorge V. Neville-Neil log(LOG_ERR, "%s=%d must be greater than 0", 1789060a95efSGeorge V. Neville-Neil SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 1790060a95efSGeorge V. Neville-Neil rc = EINVAL; 1791060a95efSGeorge V. Neville-Neil goto fail_tx_dpl_get_max; 1792060a95efSGeorge V. Neville-Neil } 179393929f25SAndrew Rybchenko if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 179493929f25SAndrew Rybchenko log(LOG_ERR, "%s=%d must be greater than 0", 179593929f25SAndrew Rybchenko SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 179693929f25SAndrew Rybchenko sfxge_tx_dpl_get_non_tcp_max); 179793929f25SAndrew Rybchenko rc = EINVAL; 179893929f25SAndrew Rybchenko goto fail_tx_dpl_get_max; 179993929f25SAndrew Rybchenko } 1800060a95efSGeorge V. Neville-Neil if (sfxge_tx_dpl_put_max < 0) { 1801060a95efSGeorge V. Neville-Neil log(LOG_ERR, "%s=%d must be greater or equal to 0", 1802060a95efSGeorge V. Neville-Neil SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 1803060a95efSGeorge V. Neville-Neil rc = EINVAL; 1804060a95efSGeorge V. Neville-Neil goto fail_tx_dpl_put_max; 1805060a95efSGeorge V. Neville-Neil } 1806060a95efSGeorge V. Neville-Neil 1807e948693eSPhilip Paeps /* Initialize the deferred packet list. */ 1808e948693eSPhilip Paeps stdp = &txq->dpl; 1809060a95efSGeorge V. Neville-Neil stdp->std_put_max = sfxge_tx_dpl_put_max; 1810060a95efSGeorge V. Neville-Neil stdp->std_get_max = sfxge_tx_dpl_get_max; 181193929f25SAndrew Rybchenko stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1812e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 1813e948693eSPhilip Paeps 181433d45dc5SAndrew Rybchenko SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1815bc85c897SGeorge V. Neville-Neil 181695caaf0fSAndrew Rybchenko dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 181795caaf0fSAndrew Rybchenko "dpl", CTLFLAG_RD, NULL, 181895caaf0fSAndrew Rybchenko "Deferred packet list statistics"); 181995caaf0fSAndrew Rybchenko if (dpl_node == NULL) { 182095caaf0fSAndrew Rybchenko rc = ENOMEM; 182195caaf0fSAndrew Rybchenko goto fail_dpl_node; 182295caaf0fSAndrew Rybchenko } 182395caaf0fSAndrew Rybchenko 182495caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 182595caaf0fSAndrew Rybchenko "get_count", CTLFLAG_RD | CTLFLAG_STATS, 1826bc85c897SGeorge V. Neville-Neil &stdp->std_get_count, 0, ""); 182795caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 182895caaf0fSAndrew Rybchenko "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 182993929f25SAndrew Rybchenko &stdp->std_get_non_tcp_count, 0, ""); 183095caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 183195caaf0fSAndrew Rybchenko "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 183293929f25SAndrew Rybchenko &stdp->std_get_hiwat, 0, ""); 183395caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 183495caaf0fSAndrew Rybchenko "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1835bce6d281SAndrew Rybchenko &stdp->std_put_hiwat, 0, ""); 1836e948693eSPhilip Paeps 1837f6222d7bSAndrew Rybchenko rc = sfxge_txq_stat_init(txq, txq_node); 1838f6222d7bSAndrew Rybchenko if (rc != 0) 1839f6222d7bSAndrew Rybchenko goto fail_txq_stat_init; 1840f6222d7bSAndrew Rybchenko 1841e948693eSPhilip Paeps txq->type = type; 1842e948693eSPhilip Paeps txq->evq_index = evq_index; 1843e948693eSPhilip Paeps txq->txq_index = txq_index; 1844e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_INITIALIZED; 18453c838a9fSAndrew Rybchenko txq->hw_vlan_tci = 0; 18463c838a9fSAndrew Rybchenko 1847e948693eSPhilip Paeps return (0); 1848e948693eSPhilip Paeps 1849f6222d7bSAndrew Rybchenko fail_txq_stat_init: 185095caaf0fSAndrew Rybchenko fail_dpl_node: 1851060a95efSGeorge V. Neville-Neil fail_tx_dpl_put_max: 1852060a95efSGeorge V. Neville-Neil fail_tx_dpl_get_max: 1853e948693eSPhilip Paeps fail3: 1854bc85c897SGeorge V. Neville-Neil fail_txq_node: 1855e948693eSPhilip Paeps free(txq->pend_desc, M_SFXGE); 1856e948693eSPhilip Paeps fail2: 1857b7b0edd1SGeorge V. Neville-Neil while (nmaps-- != 0) 1858e948693eSPhilip Paeps bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1859e948693eSPhilip Paeps free(txq->stmp, M_SFXGE); 1860e948693eSPhilip Paeps bus_dma_tag_destroy(txq->packet_dma_tag); 1861e948693eSPhilip Paeps 1862e948693eSPhilip Paeps fail: 1863e948693eSPhilip Paeps sfxge_dma_free(esmp); 1864e948693eSPhilip Paeps 1865e948693eSPhilip Paeps return (rc); 1866e948693eSPhilip Paeps } 1867e948693eSPhilip Paeps 1868e948693eSPhilip Paeps static int 1869e948693eSPhilip Paeps sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1870e948693eSPhilip Paeps { 1871e948693eSPhilip Paeps struct sfxge_softc *sc = arg1; 1872e948693eSPhilip Paeps unsigned int id = arg2; 1873e948693eSPhilip Paeps unsigned long sum; 1874e948693eSPhilip Paeps unsigned int index; 1875e948693eSPhilip Paeps 1876e948693eSPhilip Paeps /* Sum across all TX queues */ 1877e948693eSPhilip Paeps sum = 0; 1878e2b05fe2SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) 1879e948693eSPhilip Paeps sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1880e948693eSPhilip Paeps sfxge_tx_stats[id].offset); 1881e948693eSPhilip Paeps 1882b7b0edd1SGeorge V. Neville-Neil return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1883e948693eSPhilip Paeps } 1884e948693eSPhilip Paeps 1885e948693eSPhilip Paeps static void 1886e948693eSPhilip Paeps sfxge_tx_stat_init(struct sfxge_softc *sc) 1887e948693eSPhilip Paeps { 1888e948693eSPhilip Paeps struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1889e948693eSPhilip Paeps struct sysctl_oid_list *stat_list; 1890e948693eSPhilip Paeps unsigned int id; 1891e948693eSPhilip Paeps 1892e948693eSPhilip Paeps stat_list = SYSCTL_CHILDREN(sc->stats_node); 1893e948693eSPhilip Paeps 1894612d8e28SAndrew Rybchenko for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1895e948693eSPhilip Paeps SYSCTL_ADD_PROC( 1896e948693eSPhilip Paeps ctx, stat_list, 1897e948693eSPhilip Paeps OID_AUTO, sfxge_tx_stats[id].name, 1898e948693eSPhilip Paeps CTLTYPE_ULONG|CTLFLAG_RD, 1899e948693eSPhilip Paeps sc, id, sfxge_tx_stat_handler, "LU", 1900e948693eSPhilip Paeps ""); 1901e948693eSPhilip Paeps } 1902e948693eSPhilip Paeps } 1903e948693eSPhilip Paeps 19043d8fce27SAndrew Rybchenko uint64_t 19053d8fce27SAndrew Rybchenko sfxge_tx_get_drops(struct sfxge_softc *sc) 19063d8fce27SAndrew Rybchenko { 19073d8fce27SAndrew Rybchenko unsigned int index; 19083d8fce27SAndrew Rybchenko uint64_t drops = 0; 19093d8fce27SAndrew Rybchenko struct sfxge_txq *txq; 19103d8fce27SAndrew Rybchenko 19113d8fce27SAndrew Rybchenko /* Sum across all TX queues */ 19123d8fce27SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) { 19133d8fce27SAndrew Rybchenko txq = sc->txq[index]; 19143d8fce27SAndrew Rybchenko /* 19153d8fce27SAndrew Rybchenko * In theory, txq->put_overflow and txq->netdown_drops 19163d8fce27SAndrew Rybchenko * should use atomic operation and other should be 19173d8fce27SAndrew Rybchenko * obtained under txq lock, but it is just statistics. 19183d8fce27SAndrew Rybchenko */ 19193d8fce27SAndrew Rybchenko drops += txq->drops + txq->get_overflow + 19203d8fce27SAndrew Rybchenko txq->get_non_tcp_overflow + 19213d8fce27SAndrew Rybchenko txq->put_overflow + txq->netdown_drops + 19223d8fce27SAndrew Rybchenko txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; 19233d8fce27SAndrew Rybchenko } 19243d8fce27SAndrew Rybchenko return (drops); 19253d8fce27SAndrew Rybchenko } 19263d8fce27SAndrew Rybchenko 1927e948693eSPhilip Paeps void 1928e948693eSPhilip Paeps sfxge_tx_fini(struct sfxge_softc *sc) 1929e948693eSPhilip Paeps { 1930e948693eSPhilip Paeps int index; 1931e948693eSPhilip Paeps 1932e2b05fe2SAndrew Rybchenko index = sc->txq_count; 1933e948693eSPhilip Paeps while (--index >= 0) 1934e2b05fe2SAndrew Rybchenko sfxge_tx_qfini(sc, index); 1935e948693eSPhilip Paeps 1936e2b05fe2SAndrew Rybchenko sc->txq_count = 0; 1937e948693eSPhilip Paeps } 1938e948693eSPhilip Paeps 1939e948693eSPhilip Paeps 1940e948693eSPhilip Paeps int 1941e948693eSPhilip Paeps sfxge_tx_init(struct sfxge_softc *sc) 1942e948693eSPhilip Paeps { 19433c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 1944e948693eSPhilip Paeps struct sfxge_intr *intr; 1945e948693eSPhilip Paeps int index; 1946e948693eSPhilip Paeps int rc; 1947e948693eSPhilip Paeps 1948e948693eSPhilip Paeps intr = &sc->intr; 1949e948693eSPhilip Paeps 1950e948693eSPhilip Paeps KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1951e948693eSPhilip Paeps ("intr->state != SFXGE_INTR_INITIALIZED")); 1952e948693eSPhilip Paeps 1953e2b05fe2SAndrew Rybchenko sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc; 1954e2b05fe2SAndrew Rybchenko 19553c838a9fSAndrew Rybchenko sc->tso_fw_assisted = sfxge_tso_fw_assisted; 1956a45a0da1SAndrew Rybchenko if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || 1957a45a0da1SAndrew Rybchenko (!encp->enc_fw_assisted_tso_enabled)) 1958a45a0da1SAndrew Rybchenko sc->tso_fw_assisted &= ~SFXGE_FATSOV1; 1959a45a0da1SAndrew Rybchenko if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || 1960a45a0da1SAndrew Rybchenko (!encp->enc_fw_assisted_tso_v2_enabled)) 1961a45a0da1SAndrew Rybchenko sc->tso_fw_assisted &= ~SFXGE_FATSOV2; 19623c838a9fSAndrew Rybchenko 1963bc85c897SGeorge V. Neville-Neil sc->txqs_node = SYSCTL_ADD_NODE( 1964bc85c897SGeorge V. Neville-Neil device_get_sysctl_ctx(sc->dev), 1965bc85c897SGeorge V. Neville-Neil SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 1966bc85c897SGeorge V. Neville-Neil OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 1967bc85c897SGeorge V. Neville-Neil if (sc->txqs_node == NULL) { 1968bc85c897SGeorge V. Neville-Neil rc = ENOMEM; 1969bc85c897SGeorge V. Neville-Neil goto fail_txq_node; 1970bc85c897SGeorge V. Neville-Neil } 1971bc85c897SGeorge V. Neville-Neil 1972e948693eSPhilip Paeps /* Initialize the transmit queues */ 1973e948693eSPhilip Paeps if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 1974e948693eSPhilip Paeps SFXGE_TXQ_NON_CKSUM, 0)) != 0) 1975e948693eSPhilip Paeps goto fail; 1976e948693eSPhilip Paeps 1977e948693eSPhilip Paeps if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 1978e948693eSPhilip Paeps SFXGE_TXQ_IP_CKSUM, 0)) != 0) 1979e948693eSPhilip Paeps goto fail2; 1980e948693eSPhilip Paeps 1981e2b05fe2SAndrew Rybchenko for (index = 0; 1982e2b05fe2SAndrew Rybchenko index < sc->txq_count - SFXGE_TXQ_NTYPES + 1; 1983e2b05fe2SAndrew Rybchenko index++) { 1984e2b05fe2SAndrew Rybchenko if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index, 1985e948693eSPhilip Paeps SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 1986e948693eSPhilip Paeps goto fail3; 1987e948693eSPhilip Paeps } 1988e948693eSPhilip Paeps 1989e948693eSPhilip Paeps sfxge_tx_stat_init(sc); 1990e948693eSPhilip Paeps 1991e948693eSPhilip Paeps return (0); 1992e948693eSPhilip Paeps 1993e948693eSPhilip Paeps fail3: 1994e948693eSPhilip Paeps while (--index >= 0) 1995e948693eSPhilip Paeps sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1996e948693eSPhilip Paeps 1997e2b05fe2SAndrew Rybchenko sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1998e2b05fe2SAndrew Rybchenko 1999e948693eSPhilip Paeps fail2: 2000e948693eSPhilip Paeps sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 2001e948693eSPhilip Paeps 2002e948693eSPhilip Paeps fail: 2003bc85c897SGeorge V. Neville-Neil fail_txq_node: 2004e2b05fe2SAndrew Rybchenko sc->txq_count = 0; 2005e948693eSPhilip Paeps return (rc); 2006e948693eSPhilip Paeps } 2007