xref: /freebsd/sys/dev/sfxge/sfxge_tx.c (revision 685dc743)
1e948693eSPhilip Paeps /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni  *
4929c7febSAndrew Rybchenko  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5e948693eSPhilip Paeps  * All rights reserved.
6e948693eSPhilip Paeps  *
7e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
8e948693eSPhilip Paeps  * Solarflare Communications, Inc.
9e948693eSPhilip Paeps  *
10e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
113c838a9fSAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
12e948693eSPhilip Paeps  *
133c838a9fSAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
143c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer.
153c838a9fSAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
163c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
173c838a9fSAndrew Rybchenko  *    and/or other materials provided with the distribution.
183c838a9fSAndrew Rybchenko  *
193c838a9fSAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
203c838a9fSAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
213c838a9fSAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
223c838a9fSAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
233c838a9fSAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
243c838a9fSAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
253c838a9fSAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
263c838a9fSAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
273c838a9fSAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
283c838a9fSAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
293c838a9fSAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
303c838a9fSAndrew Rybchenko  *
313c838a9fSAndrew Rybchenko  * The views and conclusions contained in the software and documentation are
323c838a9fSAndrew Rybchenko  * those of the authors and should not be interpreted as representing official
333c838a9fSAndrew Rybchenko  * policies, either expressed or implied, of the FreeBSD Project.
34e948693eSPhilip Paeps  */
35e948693eSPhilip Paeps 
36cf07c70dSGeorge V. Neville-Neil /* Theory of operation:
37cf07c70dSGeorge V. Neville-Neil  *
388b447157SAndrew Rybchenko  * Tx queues allocation and mapping on Siena
39cf07c70dSGeorge V. Neville-Neil  *
40cf07c70dSGeorge V. Neville-Neil  * One Tx queue with enabled checksum offload is allocated per Rx channel
41cf07c70dSGeorge V. Neville-Neil  * (event queue).  Also 2 Tx queues (one without checksum offload and one
42cf07c70dSGeorge V. Neville-Neil  * with IP checksum offload only) are allocated and bound to event queue 0.
43cf07c70dSGeorge V. Neville-Neil  * sfxge_txq_type is used as Tx queue label.
44cf07c70dSGeorge V. Neville-Neil  *
45cf07c70dSGeorge V. Neville-Neil  * So, event queue plus label mapping to Tx queue index is:
46cf07c70dSGeorge V. Neville-Neil  *	if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
47cf07c70dSGeorge V. Neville-Neil  *	else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
48cf07c70dSGeorge V. Neville-Neil  * See sfxge_get_txq_by_label() sfxge_ev.c
498b447157SAndrew Rybchenko  *
508b447157SAndrew Rybchenko  * Tx queue allocation and mapping on EF10
518b447157SAndrew Rybchenko  *
528b447157SAndrew Rybchenko  * One Tx queue with enabled checksum offload is allocated per Rx
538b447157SAndrew Rybchenko  * channel (event queue). Checksum offload on all Tx queues is enabled or
548b447157SAndrew Rybchenko  * disabled dynamically by inserting option descriptors, so the additional
558b447157SAndrew Rybchenko  * queues used on Siena are not required.
568b447157SAndrew Rybchenko  *
578b447157SAndrew Rybchenko  * TxQ label is always set to zero on EF10 hardware.
588b447157SAndrew Rybchenko  * So, event queue to Tx queue mapping is simple:
598b447157SAndrew Rybchenko  * TxQ-index = EvQ-index
60cf07c70dSGeorge V. Neville-Neil  */
61cf07c70dSGeorge V. Neville-Neil 
62e948693eSPhilip Paeps #include <sys/cdefs.h>
633bbc1e08SAndrew Rybchenko #include "opt_rss.h"
643bbc1e08SAndrew Rybchenko 
658ec07310SGleb Smirnoff #include <sys/param.h>
668ec07310SGleb Smirnoff #include <sys/malloc.h>
67e948693eSPhilip Paeps #include <sys/mbuf.h>
68e948693eSPhilip Paeps #include <sys/smp.h>
69e948693eSPhilip Paeps #include <sys/socket.h>
70e948693eSPhilip Paeps #include <sys/sysctl.h>
71060a95efSGeorge V. Neville-Neil #include <sys/syslog.h>
72a45a0da1SAndrew Rybchenko #include <sys/limits.h>
73e948693eSPhilip Paeps 
74e948693eSPhilip Paeps #include <net/bpf.h>
75e948693eSPhilip Paeps #include <net/ethernet.h>
76e948693eSPhilip Paeps #include <net/if.h>
77e948693eSPhilip Paeps #include <net/if_vlan_var.h>
78e948693eSPhilip Paeps 
79e948693eSPhilip Paeps #include <netinet/in.h>
80e948693eSPhilip Paeps #include <netinet/ip.h>
81e948693eSPhilip Paeps #include <netinet/ip6.h>
82e948693eSPhilip Paeps #include <netinet/tcp.h>
83e948693eSPhilip Paeps 
843bbc1e08SAndrew Rybchenko #ifdef RSS
853bbc1e08SAndrew Rybchenko #include <net/rss_config.h>
863bbc1e08SAndrew Rybchenko #endif
873bbc1e08SAndrew Rybchenko 
88e948693eSPhilip Paeps #include "common/efx.h"
89e948693eSPhilip Paeps 
90e948693eSPhilip Paeps #include "sfxge.h"
91e948693eSPhilip Paeps #include "sfxge_tx.h"
92e948693eSPhilip Paeps 
93060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_GET_MAX	SFXGE_PARAM(tx_dpl_get_max)
94060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
95060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
96060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
97060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_get_max, 0,
9893929f25SAndrew Rybchenko 	   "Maximum number of any packets in deferred packet get-list");
9993929f25SAndrew Rybchenko 
10093929f25SAndrew Rybchenko #define	SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \
10193929f25SAndrew Rybchenko 	SFXGE_PARAM(tx_dpl_get_non_tcp_max)
10293929f25SAndrew Rybchenko static int sfxge_tx_dpl_get_non_tcp_max =
10393929f25SAndrew Rybchenko 	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT;
10493929f25SAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max);
10593929f25SAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN,
10693929f25SAndrew Rybchenko 	   &sfxge_tx_dpl_get_non_tcp_max, 0,
10793929f25SAndrew Rybchenko 	   "Maximum number of non-TCP packets in deferred packet get-list");
108060a95efSGeorge V. Neville-Neil 
109060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_PUT_MAX	SFXGE_PARAM(tx_dpl_put_max)
110060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
111060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
112060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
113060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_put_max, 0,
11493929f25SAndrew Rybchenko 	   "Maximum number of any packets in deferred packet put-list");
115060a95efSGeorge V. Neville-Neil 
1163c838a9fSAndrew Rybchenko #define	SFXGE_PARAM_TSO_FW_ASSISTED	SFXGE_PARAM(tso_fw_assisted)
117a45a0da1SAndrew Rybchenko static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2);
1183c838a9fSAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted);
1193c838a9fSAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN,
1203c838a9fSAndrew Rybchenko 	   &sfxge_tso_fw_assisted, 0,
121a45a0da1SAndrew Rybchenko 	   "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware");
1223c838a9fSAndrew Rybchenko 
123f6222d7bSAndrew Rybchenko static const struct {
124f6222d7bSAndrew Rybchenko 	const char *name;
125f6222d7bSAndrew Rybchenko 	size_t offset;
126f6222d7bSAndrew Rybchenko } sfxge_tx_stats[] = {
127f6222d7bSAndrew Rybchenko #define	SFXGE_TX_STAT(name, member) \
128f6222d7bSAndrew Rybchenko 	{ #name, offsetof(struct sfxge_txq, member) }
129f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tso_bursts, tso_bursts),
130f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tso_packets, tso_packets),
131f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
132f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
133f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
134f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_collapses, collapses),
135f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_drops, drops),
136f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_get_overflow, get_overflow),
137f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow),
138f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_put_overflow, put_overflow),
139f6222d7bSAndrew Rybchenko 	SFXGE_TX_STAT(tx_netdown_drops, netdown_drops),
140f6222d7bSAndrew Rybchenko };
141f6222d7bSAndrew Rybchenko 
142e948693eSPhilip Paeps /* Forward declarations. */
1430b28bbdcSAndrew Rybchenko static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
144e948693eSPhilip Paeps static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
145e948693eSPhilip Paeps static void sfxge_tx_qunblock(struct sfxge_txq *txq);
146e948693eSPhilip Paeps static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1473c838a9fSAndrew Rybchenko 			      const bus_dma_segment_t *dma_seg, int n_dma_seg,
1488b447157SAndrew Rybchenko 			      int n_extra_descs);
1493c838a9fSAndrew Rybchenko 
1503c838a9fSAndrew Rybchenko static inline void
sfxge_next_stmp(struct sfxge_txq * txq,struct sfxge_tx_mapping ** pstmp)1513c838a9fSAndrew Rybchenko sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
1523c838a9fSAndrew Rybchenko {
1533c838a9fSAndrew Rybchenko 	KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0"));
1543c838a9fSAndrew Rybchenko 	if (__predict_false(*pstmp ==
1553c838a9fSAndrew Rybchenko 			    &txq->stmp[txq->ptr_mask]))
1563c838a9fSAndrew Rybchenko 		*pstmp = &txq->stmp[0];
1573c838a9fSAndrew Rybchenko 	else
1583c838a9fSAndrew Rybchenko 		(*pstmp)++;
1593c838a9fSAndrew Rybchenko }
1603c838a9fSAndrew Rybchenko 
1618b447157SAndrew Rybchenko static int
sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq * txq,struct mbuf * mbuf,struct sfxge_tx_mapping ** pstmp)1628b447157SAndrew Rybchenko sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf,
1638b447157SAndrew Rybchenko 				    struct sfxge_tx_mapping **pstmp)
1648b447157SAndrew Rybchenko {
1658b447157SAndrew Rybchenko 	uint16_t new_hw_cksum_flags;
1668b447157SAndrew Rybchenko 	efx_desc_t *desc;
1678b447157SAndrew Rybchenko 
1688b447157SAndrew Rybchenko 	if (mbuf->m_pkthdr.csum_flags &
1698b447157SAndrew Rybchenko 	    (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) {
1708b447157SAndrew Rybchenko 		/*
1718b447157SAndrew Rybchenko 		 * We always set EFX_TXQ_CKSUM_IPV4 here because this
1728b447157SAndrew Rybchenko 		 * configuration is the most useful, and this won't
1738b447157SAndrew Rybchenko 		 * cause any trouble in case of IPv6 traffic anyway.
1748b447157SAndrew Rybchenko 		 */
1758b447157SAndrew Rybchenko 		new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
1768b447157SAndrew Rybchenko 	} else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
1778b447157SAndrew Rybchenko 		new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4;
1788b447157SAndrew Rybchenko 	} else {
1798b447157SAndrew Rybchenko 		new_hw_cksum_flags = 0;
1808b447157SAndrew Rybchenko 	}
1818b447157SAndrew Rybchenko 
1828b447157SAndrew Rybchenko 	if (new_hw_cksum_flags == txq->hw_cksum_flags)
1838b447157SAndrew Rybchenko 		return (0);
1848b447157SAndrew Rybchenko 
1858b447157SAndrew Rybchenko 	desc = &txq->pend_desc[txq->n_pend_desc];
1868b447157SAndrew Rybchenko 	efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc);
1878b447157SAndrew Rybchenko 	txq->hw_cksum_flags = new_hw_cksum_flags;
1888b447157SAndrew Rybchenko 	txq->n_pend_desc++;
1898b447157SAndrew Rybchenko 
1908b447157SAndrew Rybchenko 	sfxge_next_stmp(txq, pstmp);
1918b447157SAndrew Rybchenko 
1928b447157SAndrew Rybchenko 	return (1);
1938b447157SAndrew Rybchenko }
1948b447157SAndrew Rybchenko 
1958b447157SAndrew Rybchenko static int
sfxge_tx_maybe_insert_tag(struct sfxge_txq * txq,struct mbuf * mbuf,struct sfxge_tx_mapping ** pstmp)1968b447157SAndrew Rybchenko sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf,
1978b447157SAndrew Rybchenko 			  struct sfxge_tx_mapping **pstmp)
1988b447157SAndrew Rybchenko {
1998b447157SAndrew Rybchenko 	uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ?
2008b447157SAndrew Rybchenko 			     mbuf->m_pkthdr.ether_vtag :
2018b447157SAndrew Rybchenko 			     0);
2028b447157SAndrew Rybchenko 	efx_desc_t *desc;
2038b447157SAndrew Rybchenko 
2048b447157SAndrew Rybchenko 	if (this_tag == txq->hw_vlan_tci)
2058b447157SAndrew Rybchenko 		return (0);
2068b447157SAndrew Rybchenko 
2078b447157SAndrew Rybchenko 	desc = &txq->pend_desc[txq->n_pend_desc];
2088b447157SAndrew Rybchenko 	efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc);
2098b447157SAndrew Rybchenko 	txq->hw_vlan_tci = this_tag;
2108b447157SAndrew Rybchenko 	txq->n_pend_desc++;
2118b447157SAndrew Rybchenko 
2128b447157SAndrew Rybchenko 	sfxge_next_stmp(txq, pstmp);
2138b447157SAndrew Rybchenko 
2148b447157SAndrew Rybchenko 	return (1);
2158b447157SAndrew Rybchenko }
216e948693eSPhilip Paeps 
217e948693eSPhilip Paeps void
sfxge_tx_qcomplete(struct sfxge_txq * txq,struct sfxge_evq * evq)218cc933626SAndrew Rybchenko sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
219e948693eSPhilip Paeps {
220e948693eSPhilip Paeps 	unsigned int completed;
221e948693eSPhilip Paeps 
222763cab71SAndrew Rybchenko 	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
223e948693eSPhilip Paeps 
224e948693eSPhilip Paeps 	completed = txq->completed;
225e948693eSPhilip Paeps 	while (completed != txq->pending) {
226e948693eSPhilip Paeps 		struct sfxge_tx_mapping *stmp;
227e948693eSPhilip Paeps 		unsigned int id;
228e948693eSPhilip Paeps 
229385b1d8eSGeorge V. Neville-Neil 		id = completed++ & txq->ptr_mask;
230e948693eSPhilip Paeps 
231e948693eSPhilip Paeps 		stmp = &txq->stmp[id];
232e948693eSPhilip Paeps 		if (stmp->flags & TX_BUF_UNMAP) {
233e948693eSPhilip Paeps 			bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
234e948693eSPhilip Paeps 			if (stmp->flags & TX_BUF_MBUF) {
235e948693eSPhilip Paeps 				struct mbuf *m = stmp->u.mbuf;
236e948693eSPhilip Paeps 				do
237e948693eSPhilip Paeps 					m = m_free(m);
238e948693eSPhilip Paeps 				while (m != NULL);
239e948693eSPhilip Paeps 			} else {
240e948693eSPhilip Paeps 				free(stmp->u.heap_buf, M_SFXGE);
241e948693eSPhilip Paeps 			}
242e948693eSPhilip Paeps 			stmp->flags = 0;
243e948693eSPhilip Paeps 		}
244e948693eSPhilip Paeps 	}
245e948693eSPhilip Paeps 	txq->completed = completed;
246e948693eSPhilip Paeps 
247e948693eSPhilip Paeps 	/* Check whether we need to unblock the queue. */
248e948693eSPhilip Paeps 	mb();
249e948693eSPhilip Paeps 	if (txq->blocked) {
250e948693eSPhilip Paeps 		unsigned int level;
251e948693eSPhilip Paeps 
252e948693eSPhilip Paeps 		level = txq->added - txq->completed;
253385b1d8eSGeorge V. Neville-Neil 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
254e948693eSPhilip Paeps 			sfxge_tx_qunblock(txq);
255e948693eSPhilip Paeps 	}
256e948693eSPhilip Paeps }
257e948693eSPhilip Paeps 
2580b28bbdcSAndrew Rybchenko static unsigned int
sfxge_is_mbuf_non_tcp(struct mbuf * mbuf)25993929f25SAndrew Rybchenko sfxge_is_mbuf_non_tcp(struct mbuf *mbuf)
26093929f25SAndrew Rybchenko {
261453130d9SPedro F. Giffuni 	/* Absence of TCP checksum flags does not mean that it is non-TCP
26293929f25SAndrew Rybchenko 	 * but it should be true if user wants to achieve high throughput.
26393929f25SAndrew Rybchenko 	 */
26493929f25SAndrew Rybchenko 	return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)));
26593929f25SAndrew Rybchenko }
26693929f25SAndrew Rybchenko 
267e948693eSPhilip Paeps /*
268e948693eSPhilip Paeps  * Reorder the put list and append it to the get list.
269e948693eSPhilip Paeps  */
270e948693eSPhilip Paeps static void
sfxge_tx_qdpl_swizzle(struct sfxge_txq * txq)271e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
272e948693eSPhilip Paeps {
273e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
274e948693eSPhilip Paeps 	struct mbuf *mbuf, *get_next, **get_tailp;
275e948693eSPhilip Paeps 	volatile uintptr_t *putp;
276e948693eSPhilip Paeps 	uintptr_t put;
277e948693eSPhilip Paeps 	unsigned int count;
27893929f25SAndrew Rybchenko 	unsigned int non_tcp_count;
279e948693eSPhilip Paeps 
280763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
281e948693eSPhilip Paeps 
282e948693eSPhilip Paeps 	stdp = &txq->dpl;
283e948693eSPhilip Paeps 
284e948693eSPhilip Paeps 	/* Acquire the put list. */
285e948693eSPhilip Paeps 	putp = &stdp->std_put;
286fb8ccc78SMarius Strobl 	put = atomic_readandclear_ptr(putp);
287e948693eSPhilip Paeps 	mbuf = (void *)put;
288e948693eSPhilip Paeps 
289e948693eSPhilip Paeps 	if (mbuf == NULL)
290e948693eSPhilip Paeps 		return;
291e948693eSPhilip Paeps 
292e948693eSPhilip Paeps 	/* Reverse the put list. */
293e948693eSPhilip Paeps 	get_tailp = &mbuf->m_nextpkt;
294e948693eSPhilip Paeps 	get_next = NULL;
295e948693eSPhilip Paeps 
296e948693eSPhilip Paeps 	count = 0;
29793929f25SAndrew Rybchenko 	non_tcp_count = 0;
298e948693eSPhilip Paeps 	do {
299e948693eSPhilip Paeps 		struct mbuf *put_next;
300e948693eSPhilip Paeps 
30193929f25SAndrew Rybchenko 		non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf);
302e948693eSPhilip Paeps 		put_next = mbuf->m_nextpkt;
303e948693eSPhilip Paeps 		mbuf->m_nextpkt = get_next;
304e948693eSPhilip Paeps 		get_next = mbuf;
305e948693eSPhilip Paeps 		mbuf = put_next;
306e948693eSPhilip Paeps 
307e948693eSPhilip Paeps 		count++;
308e948693eSPhilip Paeps 	} while (mbuf != NULL);
309e948693eSPhilip Paeps 
310bce6d281SAndrew Rybchenko 	if (count > stdp->std_put_hiwat)
311bce6d281SAndrew Rybchenko 		stdp->std_put_hiwat = count;
312bce6d281SAndrew Rybchenko 
313e948693eSPhilip Paeps 	/* Append the reversed put list to the get list. */
314e948693eSPhilip Paeps 	KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
315e948693eSPhilip Paeps 	*stdp->std_getp = get_next;
316e948693eSPhilip Paeps 	stdp->std_getp = get_tailp;
317bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count += count;
31893929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_count += non_tcp_count;
319e948693eSPhilip Paeps }
320e948693eSPhilip Paeps 
321e948693eSPhilip Paeps static void
sfxge_tx_qreap(struct sfxge_txq * txq)322e948693eSPhilip Paeps sfxge_tx_qreap(struct sfxge_txq *txq)
323e948693eSPhilip Paeps {
324763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
325e948693eSPhilip Paeps 
326e948693eSPhilip Paeps 	txq->reaped = txq->completed;
327e948693eSPhilip Paeps }
328e948693eSPhilip Paeps 
329e948693eSPhilip Paeps static void
sfxge_tx_qlist_post(struct sfxge_txq * txq)330e948693eSPhilip Paeps sfxge_tx_qlist_post(struct sfxge_txq *txq)
331e948693eSPhilip Paeps {
332e565fa55SJohn Baldwin 	unsigned int old_added __diagused;
3333c838a9fSAndrew Rybchenko 	unsigned int block_level;
334e948693eSPhilip Paeps 	unsigned int level;
335e565fa55SJohn Baldwin 	int rc __diagused;
336e948693eSPhilip Paeps 
337763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
338e948693eSPhilip Paeps 
339e948693eSPhilip Paeps 	KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
3403c838a9fSAndrew Rybchenko 	KASSERT(txq->n_pend_desc <= txq->max_pkt_desc,
341e948693eSPhilip Paeps 		("txq->n_pend_desc too large"));
342e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
343e948693eSPhilip Paeps 
344e948693eSPhilip Paeps 	old_added = txq->added;
345e948693eSPhilip Paeps 
346e948693eSPhilip Paeps 	/* Post the fragment list. */
3473c838a9fSAndrew Rybchenko 	rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc,
348e948693eSPhilip Paeps 			  txq->reaped, &txq->added);
3493c838a9fSAndrew Rybchenko 	KASSERT(rc == 0, ("efx_tx_qdesc_post() failed"));
350e948693eSPhilip Paeps 
3513c838a9fSAndrew Rybchenko 	/* If efx_tx_qdesc_post() had to refragment, our information about
352e948693eSPhilip Paeps 	 * buffers to free may be associated with the wrong
353e948693eSPhilip Paeps 	 * descriptors.
354e948693eSPhilip Paeps 	 */
355e948693eSPhilip Paeps 	KASSERT(txq->added - old_added == txq->n_pend_desc,
3563c838a9fSAndrew Rybchenko 		("efx_tx_qdesc_post() refragmented descriptors"));
357e948693eSPhilip Paeps 
358e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
359385b1d8eSGeorge V. Neville-Neil 	KASSERT(level <= txq->entries, ("overfilled TX queue"));
360e948693eSPhilip Paeps 
361e948693eSPhilip Paeps 	/* Clear the fragment list. */
362e948693eSPhilip Paeps 	txq->n_pend_desc = 0;
363e948693eSPhilip Paeps 
3643c838a9fSAndrew Rybchenko 	/*
3653c838a9fSAndrew Rybchenko 	 * Set the block level to ensure there is space to generate a
3663c838a9fSAndrew Rybchenko 	 * large number of descriptors for TSO.
3673c838a9fSAndrew Rybchenko 	 */
3683c838a9fSAndrew Rybchenko 	block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc;
3693c838a9fSAndrew Rybchenko 
370e948693eSPhilip Paeps 	/* Have we reached the block level? */
3713c838a9fSAndrew Rybchenko 	if (level < block_level)
372e948693eSPhilip Paeps 		return;
373e948693eSPhilip Paeps 
374e948693eSPhilip Paeps 	/* Reap, and check again */
375e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
376e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
3773c838a9fSAndrew Rybchenko 	if (level < block_level)
378e948693eSPhilip Paeps 		return;
379e948693eSPhilip Paeps 
380e948693eSPhilip Paeps 	txq->blocked = 1;
381e948693eSPhilip Paeps 
382e948693eSPhilip Paeps 	/*
383e948693eSPhilip Paeps 	 * Avoid a race with completion interrupt handling that could leave
384e948693eSPhilip Paeps 	 * the queue blocked.
385e948693eSPhilip Paeps 	 */
386e948693eSPhilip Paeps 	mb();
387e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
388e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
3893c838a9fSAndrew Rybchenko 	if (level < block_level) {
390e948693eSPhilip Paeps 		mb();
391e948693eSPhilip Paeps 		txq->blocked = 0;
392e948693eSPhilip Paeps 	}
393e948693eSPhilip Paeps }
394e948693eSPhilip Paeps 
sfxge_tx_queue_mbuf(struct sfxge_txq * txq,struct mbuf * mbuf)395e948693eSPhilip Paeps static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
396e948693eSPhilip Paeps {
397e948693eSPhilip Paeps 	bus_dmamap_t *used_map;
398e948693eSPhilip Paeps 	bus_dmamap_t map;
399e948693eSPhilip Paeps 	bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
400e948693eSPhilip Paeps 	unsigned int id;
401e948693eSPhilip Paeps 	struct sfxge_tx_mapping *stmp;
4023c838a9fSAndrew Rybchenko 	efx_desc_t *desc;
403e948693eSPhilip Paeps 	int n_dma_seg;
404e948693eSPhilip Paeps 	int rc;
405e948693eSPhilip Paeps 	int i;
4063c838a9fSAndrew Rybchenko 	int eop;
4078b447157SAndrew Rybchenko 	uint16_t hw_cksum_flags_prev;
408ad72d030SAndrew Rybchenko 	uint16_t hw_vlan_tci_prev;
4098b447157SAndrew Rybchenko 	int n_extra_descs;
410e948693eSPhilip Paeps 
411e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
412e948693eSPhilip Paeps 
413e3ef7bb2SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY
414e3ef7bb2SAndrew Rybchenko 	/*
415e3ef7bb2SAndrew Rybchenko 	 * If software TSO is used, we still need to copy packet header,
416e3ef7bb2SAndrew Rybchenko 	 * even if we have already parsed it early before enqueue.
417e3ef7bb2SAndrew Rybchenko 	 */
418e3ef7bb2SAndrew Rybchenko 	if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) &&
419e3ef7bb2SAndrew Rybchenko 	    (txq->tso_fw_assisted == 0))
420e3ef7bb2SAndrew Rybchenko 		prefetch_read_many(mbuf->m_data);
421e3ef7bb2SAndrew Rybchenko #else
422e3ef7bb2SAndrew Rybchenko 	/*
423e3ef7bb2SAndrew Rybchenko 	 * Prefetch packet header since we need to parse it and extract
424e3ef7bb2SAndrew Rybchenko 	 * IP ID, TCP sequence number and flags.
425e3ef7bb2SAndrew Rybchenko 	 */
426e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
427e948693eSPhilip Paeps 		prefetch_read_many(mbuf->m_data);
428e3ef7bb2SAndrew Rybchenko #endif
429e948693eSPhilip Paeps 
430851128b8SAndrew Rybchenko 	if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
431e948693eSPhilip Paeps 		rc = EINTR;
432e948693eSPhilip Paeps 		goto reject;
433e948693eSPhilip Paeps 	}
434e948693eSPhilip Paeps 
435e948693eSPhilip Paeps 	/* Load the packet for DMA. */
436385b1d8eSGeorge V. Neville-Neil 	id = txq->added & txq->ptr_mask;
437e948693eSPhilip Paeps 	stmp = &txq->stmp[id];
438e948693eSPhilip Paeps 	rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
439e948693eSPhilip Paeps 				     mbuf, dma_seg, &n_dma_seg, 0);
440e948693eSPhilip Paeps 	if (rc == EFBIG) {
441e948693eSPhilip Paeps 		/* Try again. */
442c6499eccSGleb Smirnoff 		struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
443e948693eSPhilip Paeps 						   SFXGE_TX_MAPPING_MAX_SEG);
444e948693eSPhilip Paeps 		if (new_mbuf == NULL)
445e948693eSPhilip Paeps 			goto reject;
446e948693eSPhilip Paeps 		++txq->collapses;
447e948693eSPhilip Paeps 		mbuf = new_mbuf;
448e948693eSPhilip Paeps 		rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
449e948693eSPhilip Paeps 					     stmp->map, mbuf,
450e948693eSPhilip Paeps 					     dma_seg, &n_dma_seg, 0);
451e948693eSPhilip Paeps 	}
452e948693eSPhilip Paeps 	if (rc != 0)
453e948693eSPhilip Paeps 		goto reject;
454e948693eSPhilip Paeps 
455e948693eSPhilip Paeps 	/* Make the packet visible to the hardware. */
456e948693eSPhilip Paeps 	bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
457e948693eSPhilip Paeps 
458e948693eSPhilip Paeps 	used_map = &stmp->map;
459e948693eSPhilip Paeps 
4608b447157SAndrew Rybchenko 	hw_cksum_flags_prev = txq->hw_cksum_flags;
461ad72d030SAndrew Rybchenko 	hw_vlan_tci_prev = txq->hw_vlan_tci;
462ad72d030SAndrew Rybchenko 
4638b447157SAndrew Rybchenko 	/*
4648b447157SAndrew Rybchenko 	 * The order of option descriptors, which are used to leverage VLAN tag
4658b447157SAndrew Rybchenko 	 * and checksum offloads, might be important. Changing checksum offload
4668b447157SAndrew Rybchenko 	 * between VLAN option and packet descriptors probably does not work.
4678b447157SAndrew Rybchenko 	 */
4688b447157SAndrew Rybchenko 	n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp);
4698b447157SAndrew Rybchenko 	n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp);
4708b447157SAndrew Rybchenko 
471e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
4728b447157SAndrew Rybchenko 		rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg,
4738b447157SAndrew Rybchenko 					n_extra_descs);
474e948693eSPhilip Paeps 		if (rc < 0)
475e948693eSPhilip Paeps 			goto reject_mapped;
4763c838a9fSAndrew Rybchenko 		stmp = &txq->stmp[(rc - 1) & txq->ptr_mask];
477e948693eSPhilip Paeps 	} else {
478e948693eSPhilip Paeps 		/* Add the mapping to the fragment list, and set flags
479e948693eSPhilip Paeps 		 * for the buffer.
480e948693eSPhilip Paeps 		 */
4813c838a9fSAndrew Rybchenko 
482e948693eSPhilip Paeps 		i = 0;
483e948693eSPhilip Paeps 		for (;;) {
4848b447157SAndrew Rybchenko 			desc = &txq->pend_desc[i + n_extra_descs];
4853c838a9fSAndrew Rybchenko 			eop = (i == n_dma_seg - 1);
4863c838a9fSAndrew Rybchenko 			efx_tx_qdesc_dma_create(txq->common,
4873c838a9fSAndrew Rybchenko 						dma_seg[i].ds_addr,
4883c838a9fSAndrew Rybchenko 						dma_seg[i].ds_len,
4893c838a9fSAndrew Rybchenko 						eop,
4903c838a9fSAndrew Rybchenko 						desc);
4913c838a9fSAndrew Rybchenko 			if (eop)
492e948693eSPhilip Paeps 				break;
493e948693eSPhilip Paeps 			i++;
4943c838a9fSAndrew Rybchenko 			sfxge_next_stmp(txq, &stmp);
495e948693eSPhilip Paeps 		}
4968b447157SAndrew Rybchenko 		txq->n_pend_desc = n_dma_seg + n_extra_descs;
497e948693eSPhilip Paeps 	}
498e948693eSPhilip Paeps 
499e948693eSPhilip Paeps 	/*
500e948693eSPhilip Paeps 	 * If the mapping required more than one descriptor
501e948693eSPhilip Paeps 	 * then we need to associate the DMA map with the last
502e948693eSPhilip Paeps 	 * descriptor, not the first.
503e948693eSPhilip Paeps 	 */
504e948693eSPhilip Paeps 	if (used_map != &stmp->map) {
505e948693eSPhilip Paeps 		map = stmp->map;
506e948693eSPhilip Paeps 		stmp->map = *used_map;
507e948693eSPhilip Paeps 		*used_map = map;
508e948693eSPhilip Paeps 	}
509e948693eSPhilip Paeps 
510e948693eSPhilip Paeps 	stmp->u.mbuf = mbuf;
511e948693eSPhilip Paeps 	stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
512e948693eSPhilip Paeps 
513e948693eSPhilip Paeps 	/* Post the fragment list. */
514e948693eSPhilip Paeps 	sfxge_tx_qlist_post(txq);
515e948693eSPhilip Paeps 
516b7b0edd1SGeorge V. Neville-Neil 	return (0);
517e948693eSPhilip Paeps 
518e948693eSPhilip Paeps reject_mapped:
519ad72d030SAndrew Rybchenko 	txq->hw_vlan_tci = hw_vlan_tci_prev;
5208b447157SAndrew Rybchenko 	txq->hw_cksum_flags = hw_cksum_flags_prev;
521e948693eSPhilip Paeps 	bus_dmamap_unload(txq->packet_dma_tag, *used_map);
522e948693eSPhilip Paeps reject:
523e948693eSPhilip Paeps 	/* Drop the packet on the floor. */
524e948693eSPhilip Paeps 	m_freem(mbuf);
525e948693eSPhilip Paeps 	++txq->drops;
526e948693eSPhilip Paeps 
527b7b0edd1SGeorge V. Neville-Neil 	return (rc);
528e948693eSPhilip Paeps }
529e948693eSPhilip Paeps 
530e948693eSPhilip Paeps /*
531e948693eSPhilip Paeps  * Drain the deferred packet list into the transmit queue.
532e948693eSPhilip Paeps  */
533e948693eSPhilip Paeps static void
sfxge_tx_qdpl_drain(struct sfxge_txq * txq)534e948693eSPhilip Paeps sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
535e948693eSPhilip Paeps {
536e948693eSPhilip Paeps 	struct sfxge_softc *sc;
537e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
538e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
539e948693eSPhilip Paeps 	unsigned int count;
54093929f25SAndrew Rybchenko 	unsigned int non_tcp_count;
541e948693eSPhilip Paeps 	unsigned int pushed;
542e948693eSPhilip Paeps 	int rc;
543e948693eSPhilip Paeps 
544763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
545e948693eSPhilip Paeps 
546e948693eSPhilip Paeps 	sc = txq->sc;
547e948693eSPhilip Paeps 	stdp = &txq->dpl;
548e948693eSPhilip Paeps 	pushed = txq->added;
549e948693eSPhilip Paeps 
55006824d2cSAndrew Rybchenko 	if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) {
551e948693eSPhilip Paeps 		prefetch_read_many(sc->enp);
552e948693eSPhilip Paeps 		prefetch_read_many(txq->common);
55306824d2cSAndrew Rybchenko 	}
554e948693eSPhilip Paeps 
555e948693eSPhilip Paeps 	mbuf = stdp->std_get;
556bc85c897SGeorge V. Neville-Neil 	count = stdp->std_get_count;
55793929f25SAndrew Rybchenko 	non_tcp_count = stdp->std_get_non_tcp_count;
55893929f25SAndrew Rybchenko 
55993929f25SAndrew Rybchenko 	if (count > stdp->std_get_hiwat)
56093929f25SAndrew Rybchenko 		stdp->std_get_hiwat = count;
561e948693eSPhilip Paeps 
562e948693eSPhilip Paeps 	while (count != 0) {
563e948693eSPhilip Paeps 		KASSERT(mbuf != NULL, ("mbuf == NULL"));
564e948693eSPhilip Paeps 
565e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
566e948693eSPhilip Paeps 		mbuf->m_nextpkt = NULL;
567e948693eSPhilip Paeps 
568e948693eSPhilip Paeps 		ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
569e948693eSPhilip Paeps 
570e948693eSPhilip Paeps 		if (next != NULL)
571e948693eSPhilip Paeps 			prefetch_read_many(next);
572e948693eSPhilip Paeps 
573e948693eSPhilip Paeps 		rc = sfxge_tx_queue_mbuf(txq, mbuf);
574e948693eSPhilip Paeps 		--count;
57593929f25SAndrew Rybchenko 		non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf);
576e948693eSPhilip Paeps 		mbuf = next;
577e948693eSPhilip Paeps 		if (rc != 0)
578e948693eSPhilip Paeps 			continue;
579e948693eSPhilip Paeps 
580e948693eSPhilip Paeps 		if (txq->blocked)
581e948693eSPhilip Paeps 			break;
582e948693eSPhilip Paeps 
583e948693eSPhilip Paeps 		/* Push the fragments to the hardware in batches. */
584e948693eSPhilip Paeps 		if (txq->added - pushed >= SFXGE_TX_BATCH) {
5853c838a9fSAndrew Rybchenko 			efx_tx_qpush(txq->common, txq->added, pushed);
586e948693eSPhilip Paeps 			pushed = txq->added;
587e948693eSPhilip Paeps 		}
588e948693eSPhilip Paeps 	}
589e948693eSPhilip Paeps 
590e948693eSPhilip Paeps 	if (count == 0) {
591e948693eSPhilip Paeps 		KASSERT(mbuf == NULL, ("mbuf != NULL"));
59293929f25SAndrew Rybchenko 		KASSERT(non_tcp_count == 0,
59393929f25SAndrew Rybchenko 			("inconsistent TCP/non-TCP detection"));
594e948693eSPhilip Paeps 		stdp->std_get = NULL;
595bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = 0;
59693929f25SAndrew Rybchenko 		stdp->std_get_non_tcp_count = 0;
597e948693eSPhilip Paeps 		stdp->std_getp = &stdp->std_get;
598e948693eSPhilip Paeps 	} else {
599e948693eSPhilip Paeps 		stdp->std_get = mbuf;
600bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = count;
60193929f25SAndrew Rybchenko 		stdp->std_get_non_tcp_count = non_tcp_count;
602e948693eSPhilip Paeps 	}
603e948693eSPhilip Paeps 
604e948693eSPhilip Paeps 	if (txq->added != pushed)
6053c838a9fSAndrew Rybchenko 		efx_tx_qpush(txq->common, txq->added, pushed);
606e948693eSPhilip Paeps 
607bc85c897SGeorge V. Neville-Neil 	KASSERT(txq->blocked || stdp->std_get_count == 0,
608e948693eSPhilip Paeps 		("queue unblocked but count is non-zero"));
609e948693eSPhilip Paeps }
610e948693eSPhilip Paeps 
6113c838a9fSAndrew Rybchenko #define	SFXGE_TX_QDPL_PENDING(_txq)	((_txq)->dpl.std_put != 0)
612e948693eSPhilip Paeps 
613e948693eSPhilip Paeps /*
614e948693eSPhilip Paeps  * Service the deferred packet list.
615e948693eSPhilip Paeps  *
616e948693eSPhilip Paeps  * NOTE: drops the txq mutex!
617e948693eSPhilip Paeps  */
6180b28bbdcSAndrew Rybchenko static void
sfxge_tx_qdpl_service(struct sfxge_txq * txq)619e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq)
620e948693eSPhilip Paeps {
621763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
622e948693eSPhilip Paeps 
623e948693eSPhilip Paeps 	do {
624e948693eSPhilip Paeps 		if (SFXGE_TX_QDPL_PENDING(txq))
625e948693eSPhilip Paeps 			sfxge_tx_qdpl_swizzle(txq);
626e948693eSPhilip Paeps 
627e948693eSPhilip Paeps 		if (!txq->blocked)
628e948693eSPhilip Paeps 			sfxge_tx_qdpl_drain(txq);
629e948693eSPhilip Paeps 
630763cab71SAndrew Rybchenko 		SFXGE_TXQ_UNLOCK(txq);
631e948693eSPhilip Paeps 	} while (SFXGE_TX_QDPL_PENDING(txq) &&
632763cab71SAndrew Rybchenko 		 SFXGE_TXQ_TRYLOCK(txq));
633e948693eSPhilip Paeps }
634e948693eSPhilip Paeps 
635e948693eSPhilip Paeps /*
636d6e9f736SAndrew Rybchenko  * Put a packet on the deferred packet get-list.
637e948693eSPhilip Paeps  */
6380b28bbdcSAndrew Rybchenko static int
sfxge_tx_qdpl_put_locked(struct sfxge_txq * txq,struct mbuf * mbuf)639d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf)
640e948693eSPhilip Paeps {
641e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
642e948693eSPhilip Paeps 
643e948693eSPhilip Paeps 	stdp = &txq->dpl;
644e948693eSPhilip Paeps 
645e948693eSPhilip Paeps 	KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
646e948693eSPhilip Paeps 
647763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
648e948693eSPhilip Paeps 
64993929f25SAndrew Rybchenko 	if (stdp->std_get_count >= stdp->std_get_max) {
65093929f25SAndrew Rybchenko 		txq->get_overflow++;
651c1974e29SGleb Smirnoff 		return (ENOBUFS);
65293929f25SAndrew Rybchenko 	}
65393929f25SAndrew Rybchenko 	if (sfxge_is_mbuf_non_tcp(mbuf)) {
65493929f25SAndrew Rybchenko 		if (stdp->std_get_non_tcp_count >=
65593929f25SAndrew Rybchenko 		    stdp->std_get_non_tcp_max) {
65693929f25SAndrew Rybchenko 			txq->get_non_tcp_overflow++;
65793929f25SAndrew Rybchenko 			return (ENOBUFS);
65893929f25SAndrew Rybchenko 		}
65993929f25SAndrew Rybchenko 		stdp->std_get_non_tcp_count++;
66093929f25SAndrew Rybchenko 	}
661c1974e29SGleb Smirnoff 
662e948693eSPhilip Paeps 	*(stdp->std_getp) = mbuf;
663e948693eSPhilip Paeps 	stdp->std_getp = &mbuf->m_nextpkt;
664bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count++;
665d6e9f736SAndrew Rybchenko 
666d6e9f736SAndrew Rybchenko 	return (0);
667d6e9f736SAndrew Rybchenko }
668d6e9f736SAndrew Rybchenko 
669d6e9f736SAndrew Rybchenko /*
670d6e9f736SAndrew Rybchenko  * Put a packet on the deferred packet put-list.
671d6e9f736SAndrew Rybchenko  *
672d6e9f736SAndrew Rybchenko  * We overload the csum_data field in the mbuf to keep track of this length
673d6e9f736SAndrew Rybchenko  * because there is no cheap alternative to avoid races.
674d6e9f736SAndrew Rybchenko  */
675d6e9f736SAndrew Rybchenko static int
sfxge_tx_qdpl_put_unlocked(struct sfxge_txq * txq,struct mbuf * mbuf)676d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf)
677d6e9f736SAndrew Rybchenko {
678d6e9f736SAndrew Rybchenko 	struct sfxge_tx_dpl *stdp;
679e948693eSPhilip Paeps 	volatile uintptr_t *putp;
680e948693eSPhilip Paeps 	uintptr_t old;
681e948693eSPhilip Paeps 	uintptr_t new;
6821e2b4cefSAndrew Rybchenko 	unsigned int put_count;
683e948693eSPhilip Paeps 
684d6e9f736SAndrew Rybchenko 	KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
685d6e9f736SAndrew Rybchenko 
686d6e9f736SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
687d6e9f736SAndrew Rybchenko 
688d6e9f736SAndrew Rybchenko 	stdp = &txq->dpl;
689e948693eSPhilip Paeps 	putp = &stdp->std_put;
690e948693eSPhilip Paeps 	new = (uintptr_t)mbuf;
691e948693eSPhilip Paeps 
692e948693eSPhilip Paeps 	do {
693e948693eSPhilip Paeps 		old = *putp;
694b7b0edd1SGeorge V. Neville-Neil 		if (old != 0) {
695e948693eSPhilip Paeps 			struct mbuf *mp = (struct mbuf *)old;
6961e2b4cefSAndrew Rybchenko 			put_count = mp->m_pkthdr.csum_data;
697e948693eSPhilip Paeps 		} else
6981e2b4cefSAndrew Rybchenko 			put_count = 0;
6991e2b4cefSAndrew Rybchenko 		if (put_count >= stdp->std_put_max) {
70093929f25SAndrew Rybchenko 			atomic_add_long(&txq->put_overflow, 1);
701c1974e29SGleb Smirnoff 			return (ENOBUFS);
70293929f25SAndrew Rybchenko 		}
7031e2b4cefSAndrew Rybchenko 		mbuf->m_pkthdr.csum_data = put_count + 1;
704e948693eSPhilip Paeps 		mbuf->m_nextpkt = (void *)old;
705fb8ccc78SMarius Strobl 	} while (atomic_cmpset_ptr(putp, old, new) == 0);
706e948693eSPhilip Paeps 
707e948693eSPhilip Paeps 	return (0);
708e948693eSPhilip Paeps }
709e948693eSPhilip Paeps 
710e948693eSPhilip Paeps /*
711e948693eSPhilip Paeps  * Called from if_transmit - will try to grab the txq lock and enqueue to the
712c071447aSAndrew Rybchenko  * put list if it succeeds, otherwise try to push onto the defer list if space.
713e948693eSPhilip Paeps  */
7143c838a9fSAndrew Rybchenko static int
sfxge_tx_packet_add(struct sfxge_txq * txq,struct mbuf * m)715e948693eSPhilip Paeps sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
716e948693eSPhilip Paeps {
717e948693eSPhilip Paeps 	int rc;
718e948693eSPhilip Paeps 
719d7ac87d3SGleb Smirnoff 	if (!SFXGE_LINK_UP(txq->sc)) {
72093929f25SAndrew Rybchenko 		atomic_add_long(&txq->netdown_drops, 1);
7210e4ebe6cSAndrew Rybchenko 		return (ENETDOWN);
722d7ac87d3SGleb Smirnoff 	}
723d7ac87d3SGleb Smirnoff 
724e948693eSPhilip Paeps 	/*
725e948693eSPhilip Paeps 	 * Try to grab the txq lock.  If we are able to get the lock,
726e948693eSPhilip Paeps 	 * the packet will be appended to the "get list" of the deferred
727e948693eSPhilip Paeps 	 * packet list.  Otherwise, it will be pushed on the "put list".
728e948693eSPhilip Paeps 	 */
729deee1de4SAndrew Rybchenko 	if (SFXGE_TXQ_TRYLOCK(txq)) {
730d6e9f736SAndrew Rybchenko 		/* First swizzle put-list to get-list to keep order */
731d6e9f736SAndrew Rybchenko 		sfxge_tx_qdpl_swizzle(txq);
732d6e9f736SAndrew Rybchenko 
733d6e9f736SAndrew Rybchenko 		rc = sfxge_tx_qdpl_put_locked(txq, m);
734deee1de4SAndrew Rybchenko 
735deee1de4SAndrew Rybchenko 		/* Try to service the list. */
736deee1de4SAndrew Rybchenko 		sfxge_tx_qdpl_service(txq);
737deee1de4SAndrew Rybchenko 		/* Lock has been dropped. */
738d6e9f736SAndrew Rybchenko 	} else {
739d6e9f736SAndrew Rybchenko 		rc = sfxge_tx_qdpl_put_unlocked(txq, m);
740e948693eSPhilip Paeps 
741e948693eSPhilip Paeps 		/*
742e948693eSPhilip Paeps 		 * Try to grab the lock again.
743e948693eSPhilip Paeps 		 *
744d6e9f736SAndrew Rybchenko 		 * If we are able to get the lock, we need to process
745d6e9f736SAndrew Rybchenko 		 * the deferred packet list.  If we are not able to get
746d6e9f736SAndrew Rybchenko 		 * the lock, another thread is processing the list.
747e948693eSPhilip Paeps 		 */
748f080384cSAndrew Rybchenko 		if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) {
749e948693eSPhilip Paeps 			sfxge_tx_qdpl_service(txq);
750e948693eSPhilip Paeps 			/* Lock has been dropped. */
751e948693eSPhilip Paeps 		}
752deee1de4SAndrew Rybchenko 	}
753deee1de4SAndrew Rybchenko 
754deee1de4SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
755e948693eSPhilip Paeps 
756f080384cSAndrew Rybchenko 	return (rc);
757e948693eSPhilip Paeps }
758e948693eSPhilip Paeps 
759e948693eSPhilip Paeps static void
sfxge_tx_qdpl_flush(struct sfxge_txq * txq)760e948693eSPhilip Paeps sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
761e948693eSPhilip Paeps {
762e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp = &txq->dpl;
763e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
764e948693eSPhilip Paeps 
765763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
766e948693eSPhilip Paeps 
767e948693eSPhilip Paeps 	sfxge_tx_qdpl_swizzle(txq);
768e948693eSPhilip Paeps 	for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
769e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
770e948693eSPhilip Paeps 		m_freem(mbuf);
771e948693eSPhilip Paeps 	}
772e948693eSPhilip Paeps 	stdp->std_get = NULL;
773bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count = 0;
77493929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_count = 0;
775e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
776e948693eSPhilip Paeps 
777763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
778e948693eSPhilip Paeps }
779e948693eSPhilip Paeps 
780e948693eSPhilip Paeps void
sfxge_if_qflush(if_t ifp)78104abf87bSJustin Hibbits sfxge_if_qflush(if_t ifp)
782e948693eSPhilip Paeps {
783e948693eSPhilip Paeps 	struct sfxge_softc *sc;
784cb552e88SAndrew Rybchenko 	unsigned int i;
785e948693eSPhilip Paeps 
78604abf87bSJustin Hibbits 	sc = if_getsoftc(ifp);
787e948693eSPhilip Paeps 
788e2b05fe2SAndrew Rybchenko 	for (i = 0; i < sc->txq_count; i++)
789e948693eSPhilip Paeps 		sfxge_tx_qdpl_flush(sc->txq[i]);
790e948693eSPhilip Paeps }
791e948693eSPhilip Paeps 
792a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY
793a32efb97SAndrew Rybchenko 
794a32efb97SAndrew Rybchenko /* There is little space for user data in mbuf pkthdr, so we
795a32efb97SAndrew Rybchenko  * use l*hlen fields which are not used by the driver otherwise
796a32efb97SAndrew Rybchenko  * to store header offsets.
797a32efb97SAndrew Rybchenko  * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes.
798a32efb97SAndrew Rybchenko  */
799a32efb97SAndrew Rybchenko 
800a32efb97SAndrew Rybchenko #define TSO_MBUF_PROTO(_mbuf)    ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
801a32efb97SAndrew Rybchenko /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */
802a32efb97SAndrew Rybchenko #define TSO_MBUF_FLAGS(_mbuf)    ((_mbuf)->m_pkthdr.l5hlen)
803a32efb97SAndrew Rybchenko #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
804a32efb97SAndrew Rybchenko #define TSO_MBUF_SEQNUM(_mbuf)   ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1])
805a32efb97SAndrew Rybchenko 
sfxge_parse_tx_packet(struct mbuf * mbuf)806a32efb97SAndrew Rybchenko static void sfxge_parse_tx_packet(struct mbuf *mbuf)
807a32efb97SAndrew Rybchenko {
808a32efb97SAndrew Rybchenko 	struct ether_header *eh = mtod(mbuf, struct ether_header *);
809a32efb97SAndrew Rybchenko 	const struct tcphdr *th;
810a32efb97SAndrew Rybchenko 	struct tcphdr th_copy;
811a32efb97SAndrew Rybchenko 
812a32efb97SAndrew Rybchenko 	/* Find network protocol and header */
813a32efb97SAndrew Rybchenko 	TSO_MBUF_PROTO(mbuf) = eh->ether_type;
814a32efb97SAndrew Rybchenko 	if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) {
815a32efb97SAndrew Rybchenko 		struct ether_vlan_header *veh =
816a32efb97SAndrew Rybchenko 			mtod(mbuf, struct ether_vlan_header *);
817a32efb97SAndrew Rybchenko 		TSO_MBUF_PROTO(mbuf) = veh->evl_proto;
818a32efb97SAndrew Rybchenko 		mbuf->m_pkthdr.l2hlen = sizeof(*veh);
819a32efb97SAndrew Rybchenko 	} else {
820a32efb97SAndrew Rybchenko 		mbuf->m_pkthdr.l2hlen = sizeof(*eh);
821a32efb97SAndrew Rybchenko 	}
822a32efb97SAndrew Rybchenko 
823a32efb97SAndrew Rybchenko 	/* Find TCP header */
824a32efb97SAndrew Rybchenko 	if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) {
825a32efb97SAndrew Rybchenko 		const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen);
826a32efb97SAndrew Rybchenko 
827a32efb97SAndrew Rybchenko 		KASSERT(iph->ip_p == IPPROTO_TCP,
828a32efb97SAndrew Rybchenko 			("TSO required on non-TCP packet"));
829a32efb97SAndrew Rybchenko 		mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl;
830a32efb97SAndrew Rybchenko 		TSO_MBUF_PACKETID(mbuf) = iph->ip_id;
831a32efb97SAndrew Rybchenko 	} else {
832a32efb97SAndrew Rybchenko 		KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6),
833a32efb97SAndrew Rybchenko 			("TSO required on non-IP packet"));
834a32efb97SAndrew Rybchenko 		KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt ==
835a32efb97SAndrew Rybchenko 			IPPROTO_TCP,
836a32efb97SAndrew Rybchenko 			("TSO required on non-TCP packet"));
837a32efb97SAndrew Rybchenko 		mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr);
838a32efb97SAndrew Rybchenko 		TSO_MBUF_PACKETID(mbuf) = 0;
839a32efb97SAndrew Rybchenko 	}
840a32efb97SAndrew Rybchenko 
841a32efb97SAndrew Rybchenko 	KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen,
842a32efb97SAndrew Rybchenko 		("network header is fragmented in mbuf"));
843a32efb97SAndrew Rybchenko 
844a32efb97SAndrew Rybchenko 	/* We need TCP header including flags (window is the next) */
845a32efb97SAndrew Rybchenko 	if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) {
846a32efb97SAndrew Rybchenko 		m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy),
847a32efb97SAndrew Rybchenko 			   (caddr_t)&th_copy);
848a32efb97SAndrew Rybchenko 		th = &th_copy;
849a32efb97SAndrew Rybchenko 	} else {
850a32efb97SAndrew Rybchenko 		th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen);
851a32efb97SAndrew Rybchenko 	}
852a32efb97SAndrew Rybchenko 
853a32efb97SAndrew Rybchenko 	mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off;
854a32efb97SAndrew Rybchenko 	TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq);
855a32efb97SAndrew Rybchenko 
856a32efb97SAndrew Rybchenko 	/* These flags must not be duplicated */
857a32efb97SAndrew Rybchenko 	/*
858a32efb97SAndrew Rybchenko 	 * RST should not be duplicated as well, but FreeBSD kernel
859a32efb97SAndrew Rybchenko 	 * generates TSO packets with RST flag. So, do not assert
860a32efb97SAndrew Rybchenko 	 * its absence.
861a32efb97SAndrew Rybchenko 	 */
862a32efb97SAndrew Rybchenko 	KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
863a32efb97SAndrew Rybchenko 		("incompatible TCP flag 0x%x on TSO packet",
864a32efb97SAndrew Rybchenko 		 th->th_flags & (TH_URG | TH_SYN)));
865a32efb97SAndrew Rybchenko 	TSO_MBUF_FLAGS(mbuf) = th->th_flags;
866a32efb97SAndrew Rybchenko }
867a32efb97SAndrew Rybchenko #endif
868a32efb97SAndrew Rybchenko 
869e948693eSPhilip Paeps /*
870e948693eSPhilip Paeps  * TX start -- called by the stack.
871e948693eSPhilip Paeps  */
872e948693eSPhilip Paeps int
sfxge_if_transmit(if_t ifp,struct mbuf * m)87304abf87bSJustin Hibbits sfxge_if_transmit(if_t ifp, struct mbuf *m)
874e948693eSPhilip Paeps {
875e948693eSPhilip Paeps 	struct sfxge_softc *sc;
876e948693eSPhilip Paeps 	struct sfxge_txq *txq;
877e948693eSPhilip Paeps 	int rc;
878e948693eSPhilip Paeps 
87904abf87bSJustin Hibbits 	sc = (struct sfxge_softc *)if_getsoftc(ifp);
880e948693eSPhilip Paeps 
88187581ab8SAndrew Rybchenko 	/*
88287581ab8SAndrew Rybchenko 	 * Transmit may be called when interface is up from the kernel
88387581ab8SAndrew Rybchenko 	 * point of view, but not yet up (in progress) from the driver
88487581ab8SAndrew Rybchenko 	 * point of view. I.e. link aggregation bring up.
88587581ab8SAndrew Rybchenko 	 * Transmit may be called when interface is up from the driver
88687581ab8SAndrew Rybchenko 	 * point of view, but already down from the kernel point of
88787581ab8SAndrew Rybchenko 	 * view. I.e. Rx when interface shutdown is in progress.
88887581ab8SAndrew Rybchenko 	 */
88904abf87bSJustin Hibbits 	KASSERT((if_getflags(ifp) & IFF_UP) || (sc->if_flags & IFF_UP),
89087581ab8SAndrew Rybchenko 		("interface not up"));
891e948693eSPhilip Paeps 
892e948693eSPhilip Paeps 	/* Pick the desired transmit queue. */
8938b447157SAndrew Rybchenko 	if (sc->txq_dynamic_cksum_toggle_supported |
8948b447157SAndrew Rybchenko 	    (m->m_pkthdr.csum_flags &
8958b447157SAndrew Rybchenko 	     (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) {
896e948693eSPhilip Paeps 		int index = 0;
897e948693eSPhilip Paeps 
8983bbc1e08SAndrew Rybchenko #ifdef RSS
8993bbc1e08SAndrew Rybchenko 		uint32_t bucket_id;
9003bbc1e08SAndrew Rybchenko 
9013bbc1e08SAndrew Rybchenko 		/*
9023bbc1e08SAndrew Rybchenko 		 * Select a TX queue which matches the corresponding
9033bbc1e08SAndrew Rybchenko 		 * RX queue for the hash in order to assign both
9043bbc1e08SAndrew Rybchenko 		 * TX and RX parts of the flow to the same CPU
9053bbc1e08SAndrew Rybchenko 		 */
9063bbc1e08SAndrew Rybchenko 		if (rss_m2bucket(m, &bucket_id) == 0)
9073bbc1e08SAndrew Rybchenko 			index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1));
9083bbc1e08SAndrew Rybchenko #else
909c2529042SHans Petter Selasky 		/* check if flowid is set */
910c2529042SHans Petter Selasky 		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
911e948693eSPhilip Paeps 			uint32_t hash = m->m_pkthdr.flowid;
91244fcad03SAndrew Rybchenko 			uint32_t idx = hash % nitems(sc->rx_indir_table);
913e948693eSPhilip Paeps 
91444fcad03SAndrew Rybchenko 			index = sc->rx_indir_table[idx];
915e948693eSPhilip Paeps 		}
9163bbc1e08SAndrew Rybchenko #endif
917a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY
918a32efb97SAndrew Rybchenko 		if (m->m_pkthdr.csum_flags & CSUM_TSO)
919a32efb97SAndrew Rybchenko 			sfxge_parse_tx_packet(m);
920a32efb97SAndrew Rybchenko #endif
9218b447157SAndrew Rybchenko 		index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ?
9228b447157SAndrew Rybchenko 			 SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0;
9238b447157SAndrew Rybchenko 		txq = sc->txq[index];
924e948693eSPhilip Paeps 	} else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
925e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
926e948693eSPhilip Paeps 	} else {
927e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
928e948693eSPhilip Paeps 	}
929e948693eSPhilip Paeps 
930e948693eSPhilip Paeps 	rc = sfxge_tx_packet_add(txq, m);
9310e4ebe6cSAndrew Rybchenko 	if (rc != 0)
9320e4ebe6cSAndrew Rybchenko 		m_freem(m);
933e948693eSPhilip Paeps 
934e948693eSPhilip Paeps 	return (rc);
935e948693eSPhilip Paeps }
936e948693eSPhilip Paeps 
937e948693eSPhilip Paeps /*
938e948693eSPhilip Paeps  * Software "TSO".  Not quite as good as doing it in hardware, but
939e948693eSPhilip Paeps  * still faster than segmenting in the stack.
940e948693eSPhilip Paeps  */
941e948693eSPhilip Paeps 
942e948693eSPhilip Paeps struct sfxge_tso_state {
943e948693eSPhilip Paeps 	/* Output position */
944e948693eSPhilip Paeps 	unsigned out_len;	/* Remaining length in current segment */
945e948693eSPhilip Paeps 	unsigned seqnum;	/* Current sequence number */
946e948693eSPhilip Paeps 	unsigned packet_space;	/* Remaining space in current packet */
947a45a0da1SAndrew Rybchenko 	unsigned segs_space;	/* Remaining number of DMA segments
948a45a0da1SAndrew Rybchenko 				   for the packet (FATSOv2 only) */
949e948693eSPhilip Paeps 
950e948693eSPhilip Paeps 	/* Input position */
951e948693eSPhilip Paeps 	uint64_t dma_addr;	/* DMA address of current position */
952e948693eSPhilip Paeps 	unsigned in_len;	/* Remaining length in current mbuf */
953e948693eSPhilip Paeps 
954e948693eSPhilip Paeps 	const struct mbuf *mbuf; /* Input mbuf (head of chain) */
955e948693eSPhilip Paeps 	u_short protocol;	/* Network protocol (after VLAN decap) */
956e948693eSPhilip Paeps 	ssize_t nh_off;		/* Offset of network header */
957e948693eSPhilip Paeps 	ssize_t tcph_off;	/* Offset of TCP header */
958e948693eSPhilip Paeps 	unsigned header_len;	/* Number of bytes of header */
959d0f73877SAndrew Rybchenko 	unsigned seg_size;	/* TCP segment size */
9603c838a9fSAndrew Rybchenko 	int fw_assisted;	/* Use FW-assisted TSO */
9613c838a9fSAndrew Rybchenko 	u_short packet_id;	/* IPv4 packet ID from the original packet */
962a32efb97SAndrew Rybchenko 	uint8_t tcp_flags;	/* TCP flags */
9633c838a9fSAndrew Rybchenko 	efx_desc_t header_desc; /* Precomputed header descriptor for
9643c838a9fSAndrew Rybchenko 				 * FW-assisted TSO */
965e948693eSPhilip Paeps };
966e948693eSPhilip Paeps 
967a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
tso_iph(const struct sfxge_tso_state * tso)9680b28bbdcSAndrew Rybchenko static const struct ip *tso_iph(const struct sfxge_tso_state *tso)
969e948693eSPhilip Paeps {
970e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IP),
971e948693eSPhilip Paeps 		("tso_iph() in non-IPv4 state"));
972e948693eSPhilip Paeps 	return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
973e948693eSPhilip Paeps }
974a32efb97SAndrew Rybchenko 
tso_ip6h(const struct sfxge_tso_state * tso)9750b28bbdcSAndrew Rybchenko static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
976e948693eSPhilip Paeps {
977e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
978e948693eSPhilip Paeps 		("tso_ip6h() in non-IPv6 state"));
979e948693eSPhilip Paeps 	return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
980e948693eSPhilip Paeps }
981a32efb97SAndrew Rybchenko 
tso_tcph(const struct sfxge_tso_state * tso)9820b28bbdcSAndrew Rybchenko static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
983e948693eSPhilip Paeps {
984e948693eSPhilip Paeps 	return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
985e948693eSPhilip Paeps }
986a32efb97SAndrew Rybchenko #endif
987a32efb97SAndrew Rybchenko 
988e948693eSPhilip Paeps /* Size of preallocated TSO header buffers.  Larger blocks must be
989e948693eSPhilip Paeps  * allocated from the heap.
990e948693eSPhilip Paeps  */
991e948693eSPhilip Paeps #define	TSOH_STD_SIZE	128
992e948693eSPhilip Paeps 
993e948693eSPhilip Paeps /* At most half the descriptors in the queue at any time will refer to
994e948693eSPhilip Paeps  * a TSO header buffer, since they must always be followed by a
995e948693eSPhilip Paeps  * payload descriptor referring to an mbuf.
996e948693eSPhilip Paeps  */
997385b1d8eSGeorge V. Neville-Neil #define	TSOH_COUNT(_txq_entries)	((_txq_entries) / 2u)
998e948693eSPhilip Paeps #define	TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
999385b1d8eSGeorge V. Neville-Neil #define	TSOH_PAGE_COUNT(_txq_entries)	\
1000057b4402SPedro F. Giffuni 	howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE)
1001e948693eSPhilip Paeps 
tso_init(struct sfxge_txq * txq)1002e948693eSPhilip Paeps static int tso_init(struct sfxge_txq *txq)
1003e948693eSPhilip Paeps {
1004e948693eSPhilip Paeps 	struct sfxge_softc *sc = txq->sc;
1005385b1d8eSGeorge V. Neville-Neil 	unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
1006e948693eSPhilip Paeps 	int i, rc;
1007e948693eSPhilip Paeps 
1008e948693eSPhilip Paeps 	/* Allocate TSO header buffers */
1009385b1d8eSGeorge V. Neville-Neil 	txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
1010e948693eSPhilip Paeps 				  M_SFXGE, M_WAITOK);
1011e948693eSPhilip Paeps 
1012385b1d8eSGeorge V. Neville-Neil 	for (i = 0; i < tsoh_page_count; i++) {
1013e948693eSPhilip Paeps 		rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
1014b7b0edd1SGeorge V. Neville-Neil 		if (rc != 0)
1015e948693eSPhilip Paeps 			goto fail;
1016e948693eSPhilip Paeps 	}
1017e948693eSPhilip Paeps 
1018b7b0edd1SGeorge V. Neville-Neil 	return (0);
1019e948693eSPhilip Paeps 
1020e948693eSPhilip Paeps fail:
1021e948693eSPhilip Paeps 	while (i-- > 0)
1022e948693eSPhilip Paeps 		sfxge_dma_free(&txq->tsoh_buffer[i]);
1023e948693eSPhilip Paeps 	free(txq->tsoh_buffer, M_SFXGE);
1024e948693eSPhilip Paeps 	txq->tsoh_buffer = NULL;
1025b7b0edd1SGeorge V. Neville-Neil 	return (rc);
1026e948693eSPhilip Paeps }
1027e948693eSPhilip Paeps 
tso_fini(struct sfxge_txq * txq)1028e948693eSPhilip Paeps static void tso_fini(struct sfxge_txq *txq)
1029e948693eSPhilip Paeps {
1030e948693eSPhilip Paeps 	int i;
1031e948693eSPhilip Paeps 
1032b7b0edd1SGeorge V. Neville-Neil 	if (txq->tsoh_buffer != NULL) {
1033385b1d8eSGeorge V. Neville-Neil 		for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
1034e948693eSPhilip Paeps 			sfxge_dma_free(&txq->tsoh_buffer[i]);
1035e948693eSPhilip Paeps 		free(txq->tsoh_buffer, M_SFXGE);
1036e948693eSPhilip Paeps 	}
1037e948693eSPhilip Paeps }
1038e948693eSPhilip Paeps 
tso_start(struct sfxge_txq * txq,struct sfxge_tso_state * tso,const bus_dma_segment_t * hdr_dma_seg,struct mbuf * mbuf)10393c838a9fSAndrew Rybchenko static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso,
10403c838a9fSAndrew Rybchenko 		      const bus_dma_segment_t *hdr_dma_seg,
10413c838a9fSAndrew Rybchenko 		      struct mbuf *mbuf)
1042e948693eSPhilip Paeps {
10433c838a9fSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp);
1044a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
1045a32efb97SAndrew Rybchenko 	struct ether_header *eh = mtod(mbuf, struct ether_header *);
1046b2c43c38SAndrew Rybchenko 	const struct tcphdr *th;
1047b2c43c38SAndrew Rybchenko 	struct tcphdr th_copy;
1048a32efb97SAndrew Rybchenko #endif
1049e948693eSPhilip Paeps 
1050a45a0da1SAndrew Rybchenko 	tso->fw_assisted = txq->tso_fw_assisted;
1051e948693eSPhilip Paeps 	tso->mbuf = mbuf;
1052e948693eSPhilip Paeps 
1053e948693eSPhilip Paeps 	/* Find network protocol and header */
1054a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
1055e948693eSPhilip Paeps 	tso->protocol = eh->ether_type;
1056e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_VLAN)) {
1057e948693eSPhilip Paeps 		struct ether_vlan_header *veh =
1058e948693eSPhilip Paeps 			mtod(mbuf, struct ether_vlan_header *);
1059e948693eSPhilip Paeps 		tso->protocol = veh->evl_proto;
1060e948693eSPhilip Paeps 		tso->nh_off = sizeof(*veh);
1061e948693eSPhilip Paeps 	} else {
1062e948693eSPhilip Paeps 		tso->nh_off = sizeof(*eh);
1063e948693eSPhilip Paeps 	}
1064a32efb97SAndrew Rybchenko #else
1065a32efb97SAndrew Rybchenko 	tso->protocol = TSO_MBUF_PROTO(mbuf);
1066a32efb97SAndrew Rybchenko 	tso->nh_off = mbuf->m_pkthdr.l2hlen;
1067a32efb97SAndrew Rybchenko 	tso->tcph_off = mbuf->m_pkthdr.l3hlen;
1068414dec5fSAndrew Rybchenko 	tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf));
1069a32efb97SAndrew Rybchenko #endif
1070e948693eSPhilip Paeps 
1071a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
1072e948693eSPhilip Paeps 	/* Find TCP header */
1073e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_IP)) {
1074e948693eSPhilip Paeps 		KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
1075e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
1076e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
1077414dec5fSAndrew Rybchenko 		tso->packet_id = ntohs(tso_iph(tso)->ip_id);
1078e948693eSPhilip Paeps 	} else {
1079e948693eSPhilip Paeps 		KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
1080e948693eSPhilip Paeps 			("TSO required on non-IP packet"));
1081e948693eSPhilip Paeps 		KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
1082e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
1083e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
10843c838a9fSAndrew Rybchenko 		tso->packet_id = 0;
10853c838a9fSAndrew Rybchenko 	}
1086a32efb97SAndrew Rybchenko #endif
1087a32efb97SAndrew Rybchenko 
10883c838a9fSAndrew Rybchenko 	if (tso->fw_assisted &&
10893c838a9fSAndrew Rybchenko 	    __predict_false(tso->tcph_off >
10903c838a9fSAndrew Rybchenko 			    encp->enc_tx_tso_tcp_header_offset_limit)) {
10913c838a9fSAndrew Rybchenko 		tso->fw_assisted = 0;
1092e948693eSPhilip Paeps 	}
1093e948693eSPhilip Paeps 
1094a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
1095b2c43c38SAndrew Rybchenko 	KASSERT(mbuf->m_len >= tso->tcph_off,
1096b2c43c38SAndrew Rybchenko 		("network header is fragmented in mbuf"));
1097b2c43c38SAndrew Rybchenko 	/* We need TCP header including flags (window is the next) */
1098b2c43c38SAndrew Rybchenko 	if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) {
1099b2c43c38SAndrew Rybchenko 		m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy),
1100b2c43c38SAndrew Rybchenko 			   (caddr_t)&th_copy);
1101b2c43c38SAndrew Rybchenko 		th = &th_copy;
1102b2c43c38SAndrew Rybchenko 	} else {
1103b2c43c38SAndrew Rybchenko 		th = tso_tcph(tso);
1104b2c43c38SAndrew Rybchenko 	}
1105b2c43c38SAndrew Rybchenko 	tso->header_len = tso->tcph_off + 4 * th->th_off;
1106a32efb97SAndrew Rybchenko #else
1107a32efb97SAndrew Rybchenko 	tso->header_len = mbuf->m_pkthdr.l4hlen;
1108a32efb97SAndrew Rybchenko #endif
1109d0f73877SAndrew Rybchenko 	tso->seg_size = mbuf->m_pkthdr.tso_segsz;
1110e948693eSPhilip Paeps 
1111a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY
1112b2c43c38SAndrew Rybchenko 	tso->seqnum = ntohl(th->th_seq);
1113e948693eSPhilip Paeps 
1114e948693eSPhilip Paeps 	/* These flags must not be duplicated */
11151217b24eSAndrew Rybchenko 	/*
11161217b24eSAndrew Rybchenko 	 * RST should not be duplicated as well, but FreeBSD kernel
11171217b24eSAndrew Rybchenko 	 * generates TSO packets with RST flag. So, do not assert
11181217b24eSAndrew Rybchenko 	 * its absence.
11191217b24eSAndrew Rybchenko 	 */
11201217b24eSAndrew Rybchenko 	KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
11211217b24eSAndrew Rybchenko 		("incompatible TCP flag 0x%x on TSO packet",
11221217b24eSAndrew Rybchenko 		 th->th_flags & (TH_URG | TH_SYN)));
1123a32efb97SAndrew Rybchenko 	tso->tcp_flags = th->th_flags;
1124a32efb97SAndrew Rybchenko #else
1125a32efb97SAndrew Rybchenko 	tso->seqnum = TSO_MBUF_SEQNUM(mbuf);
1126a32efb97SAndrew Rybchenko 	tso->tcp_flags = TSO_MBUF_FLAGS(mbuf);
1127a32efb97SAndrew Rybchenko #endif
1128e948693eSPhilip Paeps 
1129e948693eSPhilip Paeps 	tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
11303c838a9fSAndrew Rybchenko 
11313c838a9fSAndrew Rybchenko 	if (tso->fw_assisted) {
11323c838a9fSAndrew Rybchenko 		if (hdr_dma_seg->ds_len >= tso->header_len)
11333c838a9fSAndrew Rybchenko 			efx_tx_qdesc_dma_create(txq->common,
11343c838a9fSAndrew Rybchenko 						hdr_dma_seg->ds_addr,
11353c838a9fSAndrew Rybchenko 						tso->header_len,
11363c838a9fSAndrew Rybchenko 						B_FALSE,
11373c838a9fSAndrew Rybchenko 						&tso->header_desc);
11383c838a9fSAndrew Rybchenko 		else
11393c838a9fSAndrew Rybchenko 			tso->fw_assisted = 0;
11403c838a9fSAndrew Rybchenko 	}
1141e948693eSPhilip Paeps }
1142e948693eSPhilip Paeps 
1143e948693eSPhilip Paeps /*
1144e948693eSPhilip Paeps  * tso_fill_packet_with_fragment - form descriptors for the current fragment
1145e948693eSPhilip Paeps  *
1146e948693eSPhilip Paeps  * Form descriptors for the current fragment, until we reach the end
1147e948693eSPhilip Paeps  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
1148e948693eSPhilip Paeps  * space.
1149e948693eSPhilip Paeps  */
tso_fill_packet_with_fragment(struct sfxge_txq * txq,struct sfxge_tso_state * tso)1150e948693eSPhilip Paeps static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
1151e948693eSPhilip Paeps 					  struct sfxge_tso_state *tso)
1152e948693eSPhilip Paeps {
11533c838a9fSAndrew Rybchenko 	efx_desc_t *desc;
1154e948693eSPhilip Paeps 	int n;
1155a45a0da1SAndrew Rybchenko 	uint64_t dma_addr = tso->dma_addr;
1156a45a0da1SAndrew Rybchenko 	boolean_t eop;
1157e948693eSPhilip Paeps 
1158e948693eSPhilip Paeps 	if (tso->in_len == 0 || tso->packet_space == 0)
1159e948693eSPhilip Paeps 		return;
1160e948693eSPhilip Paeps 
1161e948693eSPhilip Paeps 	KASSERT(tso->in_len > 0, ("TSO input length went negative"));
1162e948693eSPhilip Paeps 	KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
1163e948693eSPhilip Paeps 
1164a45a0da1SAndrew Rybchenko 	if (tso->fw_assisted & SFXGE_FATSOV2) {
1165a45a0da1SAndrew Rybchenko 		n = tso->in_len;
1166a45a0da1SAndrew Rybchenko 		tso->out_len -= n;
1167a45a0da1SAndrew Rybchenko 		tso->seqnum += n;
1168a45a0da1SAndrew Rybchenko 		tso->in_len = 0;
1169a45a0da1SAndrew Rybchenko 		if (n < tso->packet_space) {
1170a45a0da1SAndrew Rybchenko 			tso->packet_space -= n;
1171a45a0da1SAndrew Rybchenko 			tso->segs_space--;
1172a45a0da1SAndrew Rybchenko 		} else {
1173a45a0da1SAndrew Rybchenko 			tso->packet_space = tso->seg_size -
1174a45a0da1SAndrew Rybchenko 			    (n - tso->packet_space) % tso->seg_size;
1175a45a0da1SAndrew Rybchenko 			tso->segs_space =
1176a45a0da1SAndrew Rybchenko 			    EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 -
1177a45a0da1SAndrew Rybchenko 			    (tso->packet_space != tso->seg_size);
1178a45a0da1SAndrew Rybchenko 		}
1179a45a0da1SAndrew Rybchenko 	} else {
1180e948693eSPhilip Paeps 		n = min(tso->in_len, tso->packet_space);
1181e948693eSPhilip Paeps 		tso->packet_space -= n;
1182e948693eSPhilip Paeps 		tso->out_len -= n;
1183a45a0da1SAndrew Rybchenko 		tso->dma_addr += n;
1184e948693eSPhilip Paeps 		tso->in_len -= n;
1185a45a0da1SAndrew Rybchenko 	}
1186a45a0da1SAndrew Rybchenko 
1187a45a0da1SAndrew Rybchenko 	/*
1188a45a0da1SAndrew Rybchenko 	 * It is OK to use binary OR below to avoid extra branching
1189a45a0da1SAndrew Rybchenko 	 * since all conditions may always be checked.
1190a45a0da1SAndrew Rybchenko 	 */
1191a45a0da1SAndrew Rybchenko 	eop = (tso->out_len == 0) | (tso->packet_space == 0) |
1192a45a0da1SAndrew Rybchenko 	    (tso->segs_space == 0);
1193e948693eSPhilip Paeps 
1194e948693eSPhilip Paeps 	desc = &txq->pend_desc[txq->n_pend_desc++];
1195a45a0da1SAndrew Rybchenko 	efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc);
1196e948693eSPhilip Paeps }
1197e948693eSPhilip Paeps 
1198e948693eSPhilip Paeps /* Callback from bus_dmamap_load() for long TSO headers. */
tso_map_long_header(void * dma_addr_ret,bus_dma_segment_t * segs,int nseg,int error)1199e948693eSPhilip Paeps static void tso_map_long_header(void *dma_addr_ret,
1200e948693eSPhilip Paeps 				bus_dma_segment_t *segs, int nseg,
1201e948693eSPhilip Paeps 				int error)
1202e948693eSPhilip Paeps {
1203e948693eSPhilip Paeps 	*(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
1204e948693eSPhilip Paeps 				      __predict_true(nseg == 1)) ?
1205e948693eSPhilip Paeps 				     segs->ds_addr : 0);
1206e948693eSPhilip Paeps }
1207e948693eSPhilip Paeps 
1208e948693eSPhilip Paeps /*
1209e948693eSPhilip Paeps  * tso_start_new_packet - generate a new header and prepare for the new packet
1210e948693eSPhilip Paeps  *
1211e948693eSPhilip Paeps  * Generate a new header and prepare for the new packet.  Return 0 on
1212e948693eSPhilip Paeps  * success, or an error code if failed to alloc header.
1213e948693eSPhilip Paeps  */
tso_start_new_packet(struct sfxge_txq * txq,struct sfxge_tso_state * tso,unsigned int * idp)1214e948693eSPhilip Paeps static int tso_start_new_packet(struct sfxge_txq *txq,
1215e948693eSPhilip Paeps 				struct sfxge_tso_state *tso,
12163c838a9fSAndrew Rybchenko 				unsigned int *idp)
1217e948693eSPhilip Paeps {
12183c838a9fSAndrew Rybchenko 	unsigned int id = *idp;
1219e948693eSPhilip Paeps 	struct tcphdr *tsoh_th;
1220e948693eSPhilip Paeps 	unsigned ip_length;
1221e948693eSPhilip Paeps 	caddr_t header;
1222e948693eSPhilip Paeps 	uint64_t dma_addr;
1223e948693eSPhilip Paeps 	bus_dmamap_t map;
12243c838a9fSAndrew Rybchenko 	efx_desc_t *desc;
1225e948693eSPhilip Paeps 	int rc;
1226e948693eSPhilip Paeps 
12273c838a9fSAndrew Rybchenko 	if (tso->fw_assisted) {
1228a45a0da1SAndrew Rybchenko 		if (tso->fw_assisted & SFXGE_FATSOV2) {
1229a45a0da1SAndrew Rybchenko 			/* Add 2 FATSOv2 option descriptors */
1230a45a0da1SAndrew Rybchenko 			desc = &txq->pend_desc[txq->n_pend_desc];
1231a45a0da1SAndrew Rybchenko 			efx_tx_qdesc_tso2_create(txq->common,
1232a45a0da1SAndrew Rybchenko 						 tso->packet_id,
12334142e8cfSAndrew Rybchenko 						 0,
1234a45a0da1SAndrew Rybchenko 						 tso->seqnum,
1235a45a0da1SAndrew Rybchenko 						 tso->seg_size,
1236a45a0da1SAndrew Rybchenko 						 desc,
1237a45a0da1SAndrew Rybchenko 						 EFX_TX_FATSOV2_OPT_NDESCS);
1238a45a0da1SAndrew Rybchenko 			desc += EFX_TX_FATSOV2_OPT_NDESCS;
1239a45a0da1SAndrew Rybchenko 			txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS;
1240a45a0da1SAndrew Rybchenko 			KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1241a45a0da1SAndrew Rybchenko 			id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask;
1242a45a0da1SAndrew Rybchenko 
1243a45a0da1SAndrew Rybchenko 			tso->segs_space =
1244a45a0da1SAndrew Rybchenko 			    EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1;
1245a45a0da1SAndrew Rybchenko 		} else {
1246a32efb97SAndrew Rybchenko 			uint8_t tcp_flags = tso->tcp_flags;
12473c838a9fSAndrew Rybchenko 
12483c838a9fSAndrew Rybchenko 			if (tso->out_len > tso->seg_size)
12493c838a9fSAndrew Rybchenko 				tcp_flags &= ~(TH_FIN | TH_PUSH);
12503c838a9fSAndrew Rybchenko 
1251a45a0da1SAndrew Rybchenko 			/* Add FATSOv1 option descriptor */
12523c838a9fSAndrew Rybchenko 			desc = &txq->pend_desc[txq->n_pend_desc++];
12533c838a9fSAndrew Rybchenko 			efx_tx_qdesc_tso_create(txq->common,
12543c838a9fSAndrew Rybchenko 						tso->packet_id,
12553c838a9fSAndrew Rybchenko 						tso->seqnum,
12563c838a9fSAndrew Rybchenko 						tcp_flags,
12573c838a9fSAndrew Rybchenko 						desc++);
12583c838a9fSAndrew Rybchenko 			KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
12593c838a9fSAndrew Rybchenko 			id = (id + 1) & txq->ptr_mask;
12603c838a9fSAndrew Rybchenko 
1261a45a0da1SAndrew Rybchenko 			tso->seqnum += tso->seg_size;
1262a45a0da1SAndrew Rybchenko 			tso->segs_space = UINT_MAX;
1263a45a0da1SAndrew Rybchenko 		}
1264a45a0da1SAndrew Rybchenko 
12653c838a9fSAndrew Rybchenko 		/* Header DMA descriptor */
12663c838a9fSAndrew Rybchenko 		*desc = tso->header_desc;
12673c838a9fSAndrew Rybchenko 		txq->n_pend_desc++;
12683c838a9fSAndrew Rybchenko 		KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
12693c838a9fSAndrew Rybchenko 		id = (id + 1) & txq->ptr_mask;
12703c838a9fSAndrew Rybchenko 	} else {
1271e948693eSPhilip Paeps 		/* Allocate a DMA-mapped header buffer. */
1272e948693eSPhilip Paeps 		if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
1273e948693eSPhilip Paeps 			unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
1274e948693eSPhilip Paeps 			unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
1275e948693eSPhilip Paeps 
1276e948693eSPhilip Paeps 			header = (txq->tsoh_buffer[page_index].esm_base +
1277e948693eSPhilip Paeps 				  buf_index * TSOH_STD_SIZE);
1278e948693eSPhilip Paeps 			dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
1279e948693eSPhilip Paeps 				    buf_index * TSOH_STD_SIZE);
1280e948693eSPhilip Paeps 			map = txq->tsoh_buffer[page_index].esm_map;
1281e948693eSPhilip Paeps 
12823c838a9fSAndrew Rybchenko 			KASSERT(txq->stmp[id].flags == 0,
12833c838a9fSAndrew Rybchenko 				("stmp flags are not 0"));
1284e948693eSPhilip Paeps 		} else {
12853c838a9fSAndrew Rybchenko 			struct sfxge_tx_mapping *stmp = &txq->stmp[id];
12863c838a9fSAndrew Rybchenko 
1287e948693eSPhilip Paeps 			/* We cannot use bus_dmamem_alloc() as that may sleep */
1288e948693eSPhilip Paeps 			header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
1289e948693eSPhilip Paeps 			if (__predict_false(!header))
1290b7b0edd1SGeorge V. Neville-Neil 				return (ENOMEM);
1291e948693eSPhilip Paeps 			rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
1292e948693eSPhilip Paeps 					     header, tso->header_len,
1293e948693eSPhilip Paeps 					     tso_map_long_header, &dma_addr,
1294e948693eSPhilip Paeps 					     BUS_DMA_NOWAIT);
1295e948693eSPhilip Paeps 			if (__predict_false(dma_addr == 0)) {
1296e948693eSPhilip Paeps 				if (rc == 0) {
1297e948693eSPhilip Paeps 					/* Succeeded but got >1 segment */
1298e948693eSPhilip Paeps 					bus_dmamap_unload(txq->packet_dma_tag,
1299e948693eSPhilip Paeps 							  stmp->map);
1300e948693eSPhilip Paeps 					rc = EINVAL;
1301e948693eSPhilip Paeps 				}
1302e948693eSPhilip Paeps 				free(header, M_SFXGE);
1303b7b0edd1SGeorge V. Neville-Neil 				return (rc);
1304e948693eSPhilip Paeps 			}
1305e948693eSPhilip Paeps 			map = stmp->map;
1306e948693eSPhilip Paeps 
1307e948693eSPhilip Paeps 			txq->tso_long_headers++;
1308e948693eSPhilip Paeps 			stmp->u.heap_buf = header;
1309e948693eSPhilip Paeps 			stmp->flags = TX_BUF_UNMAP;
1310e948693eSPhilip Paeps 		}
1311e948693eSPhilip Paeps 
1312e948693eSPhilip Paeps 		tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
1313e948693eSPhilip Paeps 
1314e948693eSPhilip Paeps 		/* Copy and update the headers. */
1315a35485aaSAndrew Rybchenko 		m_copydata(tso->mbuf, 0, tso->header_len, header);
1316e948693eSPhilip Paeps 
1317e948693eSPhilip Paeps 		tsoh_th->th_seq = htonl(tso->seqnum);
1318d0f73877SAndrew Rybchenko 		tso->seqnum += tso->seg_size;
1319d0f73877SAndrew Rybchenko 		if (tso->out_len > tso->seg_size) {
1320e948693eSPhilip Paeps 			/* This packet will not finish the TSO burst. */
1321d0f73877SAndrew Rybchenko 			ip_length = tso->header_len - tso->nh_off + tso->seg_size;
1322e948693eSPhilip Paeps 			tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
1323e948693eSPhilip Paeps 		} else {
1324e948693eSPhilip Paeps 			/* This packet will be the last in the TSO burst. */
1325e948693eSPhilip Paeps 			ip_length = tso->header_len - tso->nh_off + tso->out_len;
1326e948693eSPhilip Paeps 		}
1327e948693eSPhilip Paeps 
1328e948693eSPhilip Paeps 		if (tso->protocol == htons(ETHERTYPE_IP)) {
1329e948693eSPhilip Paeps 			struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
1330e948693eSPhilip Paeps 			tsoh_iph->ip_len = htons(ip_length);
1331e948693eSPhilip Paeps 			/* XXX We should increment ip_id, but FreeBSD doesn't
1332e948693eSPhilip Paeps 			 * currently allocate extra IDs for multiple segments.
1333e948693eSPhilip Paeps 			 */
1334e948693eSPhilip Paeps 		} else {
1335e948693eSPhilip Paeps 			struct ip6_hdr *tsoh_iph =
1336e948693eSPhilip Paeps 				(struct ip6_hdr *)(header + tso->nh_off);
1337e948693eSPhilip Paeps 			tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
1338e948693eSPhilip Paeps 		}
1339e948693eSPhilip Paeps 
1340e948693eSPhilip Paeps 		/* Make the header visible to the hardware. */
1341e948693eSPhilip Paeps 		bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1342e948693eSPhilip Paeps 
1343e948693eSPhilip Paeps 		/* Form a descriptor for this header. */
1344e948693eSPhilip Paeps 		desc = &txq->pend_desc[txq->n_pend_desc++];
13453c838a9fSAndrew Rybchenko 		efx_tx_qdesc_dma_create(txq->common,
13463c838a9fSAndrew Rybchenko 					dma_addr,
13473c838a9fSAndrew Rybchenko 					tso->header_len,
13483c838a9fSAndrew Rybchenko 					0,
13493c838a9fSAndrew Rybchenko 					desc);
13503c838a9fSAndrew Rybchenko 		id = (id + 1) & txq->ptr_mask;
1351a45a0da1SAndrew Rybchenko 
1352a45a0da1SAndrew Rybchenko 		tso->segs_space = UINT_MAX;
13533c838a9fSAndrew Rybchenko 	}
13543c838a9fSAndrew Rybchenko 	tso->packet_space = tso->seg_size;
13553c838a9fSAndrew Rybchenko 	txq->tso_packets++;
13563c838a9fSAndrew Rybchenko 	*idp = id;
1357e948693eSPhilip Paeps 
1358b7b0edd1SGeorge V. Neville-Neil 	return (0);
1359e948693eSPhilip Paeps }
1360e948693eSPhilip Paeps 
1361e948693eSPhilip Paeps static int
sfxge_tx_queue_tso(struct sfxge_txq * txq,struct mbuf * mbuf,const bus_dma_segment_t * dma_seg,int n_dma_seg,int n_extra_descs)1362e948693eSPhilip Paeps sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
13633c838a9fSAndrew Rybchenko 		   const bus_dma_segment_t *dma_seg, int n_dma_seg,
13648b447157SAndrew Rybchenko 		   int n_extra_descs)
1365e948693eSPhilip Paeps {
1366e948693eSPhilip Paeps 	struct sfxge_tso_state tso;
13673c838a9fSAndrew Rybchenko 	unsigned int id;
1368a35485aaSAndrew Rybchenko 	unsigned skipped = 0;
1369e948693eSPhilip Paeps 
13703c838a9fSAndrew Rybchenko 	tso_start(txq, &tso, dma_seg, mbuf);
1371e948693eSPhilip Paeps 
1372a35485aaSAndrew Rybchenko 	while (dma_seg->ds_len + skipped <= tso.header_len) {
1373a35485aaSAndrew Rybchenko 		skipped += dma_seg->ds_len;
1374e948693eSPhilip Paeps 		--n_dma_seg;
1375e948693eSPhilip Paeps 		KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1376e948693eSPhilip Paeps 		++dma_seg;
1377e948693eSPhilip Paeps 	}
1378cfaf34ffSAndrew Rybchenko 	tso.in_len = dma_seg->ds_len - (tso.header_len - skipped);
1379a35485aaSAndrew Rybchenko 	tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
1380e948693eSPhilip Paeps 
13818b447157SAndrew Rybchenko 	id = (txq->added + n_extra_descs) & txq->ptr_mask;
13823c838a9fSAndrew Rybchenko 	if (__predict_false(tso_start_new_packet(txq, &tso, &id)))
1383385b1d8eSGeorge V. Neville-Neil 		return (-1);
1384e948693eSPhilip Paeps 
1385e948693eSPhilip Paeps 	while (1) {
1386e948693eSPhilip Paeps 		tso_fill_packet_with_fragment(txq, &tso);
13873c838a9fSAndrew Rybchenko 		/* Exactly one DMA descriptor is added */
13883c838a9fSAndrew Rybchenko 		KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
13893c838a9fSAndrew Rybchenko 		id = (id + 1) & txq->ptr_mask;
1390e948693eSPhilip Paeps 
1391e948693eSPhilip Paeps 		/* Move onto the next fragment? */
1392e948693eSPhilip Paeps 		if (tso.in_len == 0) {
1393e948693eSPhilip Paeps 			--n_dma_seg;
1394e948693eSPhilip Paeps 			if (n_dma_seg == 0)
1395e948693eSPhilip Paeps 				break;
1396e948693eSPhilip Paeps 			++dma_seg;
1397e948693eSPhilip Paeps 			tso.in_len = dma_seg->ds_len;
1398e948693eSPhilip Paeps 			tso.dma_addr = dma_seg->ds_addr;
1399e948693eSPhilip Paeps 		}
1400e948693eSPhilip Paeps 
1401e948693eSPhilip Paeps 		/* End of packet? */
1402a45a0da1SAndrew Rybchenko 		if ((tso.packet_space == 0) | (tso.segs_space == 0)) {
1403a45a0da1SAndrew Rybchenko 			unsigned int n_fatso_opt_desc =
1404a45a0da1SAndrew Rybchenko 			    (tso.fw_assisted & SFXGE_FATSOV2) ?
1405a45a0da1SAndrew Rybchenko 			    EFX_TX_FATSOV2_OPT_NDESCS :
1406a45a0da1SAndrew Rybchenko 			    (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0;
1407a45a0da1SAndrew Rybchenko 
1408e948693eSPhilip Paeps 			/* If the queue is now full due to tiny MSS,
1409e948693eSPhilip Paeps 			 * or we can't create another header, discard
1410e948693eSPhilip Paeps 			 * the remainder of the input mbuf but do not
1411e948693eSPhilip Paeps 			 * roll back the work we have done.
1412e948693eSPhilip Paeps 			 */
1413a45a0da1SAndrew Rybchenko 			if (txq->n_pend_desc + n_fatso_opt_desc +
1414a45a0da1SAndrew Rybchenko 			    1 /* header */ + n_dma_seg > txq->max_pkt_desc) {
1415e1a3d10eSAndrew Rybchenko 				txq->tso_pdrop_too_many++;
1416e948693eSPhilip Paeps 				break;
1417e1a3d10eSAndrew Rybchenko 			}
1418e948693eSPhilip Paeps 			if (__predict_false(tso_start_new_packet(txq, &tso,
14193c838a9fSAndrew Rybchenko 								 &id))) {
1420e1a3d10eSAndrew Rybchenko 				txq->tso_pdrop_no_rsrc++;
1421e948693eSPhilip Paeps 				break;
1422e1a3d10eSAndrew Rybchenko 			}
1423e948693eSPhilip Paeps 		}
1424e948693eSPhilip Paeps 	}
1425e948693eSPhilip Paeps 
1426e948693eSPhilip Paeps 	txq->tso_bursts++;
1427b7b0edd1SGeorge V. Neville-Neil 	return (id);
1428e948693eSPhilip Paeps }
1429e948693eSPhilip Paeps 
1430e948693eSPhilip Paeps static void
sfxge_tx_qunblock(struct sfxge_txq * txq)1431e948693eSPhilip Paeps sfxge_tx_qunblock(struct sfxge_txq *txq)
1432e948693eSPhilip Paeps {
1433e948693eSPhilip Paeps 	struct sfxge_softc *sc;
1434e565fa55SJohn Baldwin 	struct sfxge_evq *evq __diagused;
1435e948693eSPhilip Paeps 
1436e948693eSPhilip Paeps 	sc = txq->sc;
1437e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1438e948693eSPhilip Paeps 
1439763cab71SAndrew Rybchenko 	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1440e948693eSPhilip Paeps 
1441851128b8SAndrew Rybchenko 	if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
1442e948693eSPhilip Paeps 		return;
1443e948693eSPhilip Paeps 
1444763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1445e948693eSPhilip Paeps 
1446e948693eSPhilip Paeps 	if (txq->blocked) {
1447e948693eSPhilip Paeps 		unsigned int level;
1448e948693eSPhilip Paeps 
1449e948693eSPhilip Paeps 		level = txq->added - txq->completed;
14506d73545eSAndrew Rybchenko 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) {
14516d73545eSAndrew Rybchenko 			/* reaped must be in sync with blocked */
14526d73545eSAndrew Rybchenko 			sfxge_tx_qreap(txq);
1453e948693eSPhilip Paeps 			txq->blocked = 0;
1454e948693eSPhilip Paeps 		}
14556d73545eSAndrew Rybchenko 	}
1456e948693eSPhilip Paeps 
1457e948693eSPhilip Paeps 	sfxge_tx_qdpl_service(txq);
1458e948693eSPhilip Paeps 	/* note: lock has been dropped */
1459e948693eSPhilip Paeps }
1460e948693eSPhilip Paeps 
1461e948693eSPhilip Paeps void
sfxge_tx_qflush_done(struct sfxge_txq * txq)1462e948693eSPhilip Paeps sfxge_tx_qflush_done(struct sfxge_txq *txq)
1463e948693eSPhilip Paeps {
1464e948693eSPhilip Paeps 
1465e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_DONE;
1466e948693eSPhilip Paeps }
1467e948693eSPhilip Paeps 
1468e948693eSPhilip Paeps static void
sfxge_tx_qstop(struct sfxge_softc * sc,unsigned int index)1469e948693eSPhilip Paeps sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1470e948693eSPhilip Paeps {
1471e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1472e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1473e948693eSPhilip Paeps 	unsigned int count;
1474e948693eSPhilip Paeps 
14753c838a9fSAndrew Rybchenko 	SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
14763c838a9fSAndrew Rybchenko 
1477e948693eSPhilip Paeps 	txq = sc->txq[index];
1478e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1479e948693eSPhilip Paeps 
14803c838a9fSAndrew Rybchenko 	SFXGE_EVQ_LOCK(evq);
1481763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1482e948693eSPhilip Paeps 
1483e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1484e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_STARTED"));
1485e948693eSPhilip Paeps 
1486e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
14873c838a9fSAndrew Rybchenko 
14883c838a9fSAndrew Rybchenko 	if (txq->flush_state != SFXGE_FLUSH_DONE) {
1489e948693eSPhilip Paeps 		txq->flush_state = SFXGE_FLUSH_PENDING;
1490e948693eSPhilip Paeps 
14913c838a9fSAndrew Rybchenko 		SFXGE_EVQ_UNLOCK(evq);
1492763cab71SAndrew Rybchenko 		SFXGE_TXQ_UNLOCK(txq);
1493e948693eSPhilip Paeps 
14943c838a9fSAndrew Rybchenko 		/* Flush the transmit queue. */
14953c838a9fSAndrew Rybchenko 		if (efx_tx_qflush(txq->common) != 0) {
14963c838a9fSAndrew Rybchenko 			log(LOG_ERR, "%s: Flushing Tx queue %u failed\n",
14973c838a9fSAndrew Rybchenko 			    device_get_nameunit(sc->dev), index);
14983c838a9fSAndrew Rybchenko 			txq->flush_state = SFXGE_FLUSH_DONE;
14993c838a9fSAndrew Rybchenko 		} else {
1500e948693eSPhilip Paeps 			count = 0;
1501e948693eSPhilip Paeps 			do {
1502e948693eSPhilip Paeps 				/* Spin for 100ms. */
1503e948693eSPhilip Paeps 				DELAY(100000);
1504e948693eSPhilip Paeps 				if (txq->flush_state != SFXGE_FLUSH_PENDING)
1505e948693eSPhilip Paeps 					break;
1506e948693eSPhilip Paeps 			} while (++count < 20);
15073c838a9fSAndrew Rybchenko 		}
1508763cab71SAndrew Rybchenko 		SFXGE_EVQ_LOCK(evq);
1509763cab71SAndrew Rybchenko 		SFXGE_TXQ_LOCK(txq);
1510e948693eSPhilip Paeps 
1511e948693eSPhilip Paeps 		KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1512e948693eSPhilip Paeps 		    ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1513e948693eSPhilip Paeps 
15143c838a9fSAndrew Rybchenko 		if (txq->flush_state != SFXGE_FLUSH_DONE) {
15153c838a9fSAndrew Rybchenko 			/* Flush timeout */
15163c838a9fSAndrew Rybchenko 			log(LOG_ERR, "%s: Cannot flush Tx queue %u\n",
15173c838a9fSAndrew Rybchenko 			    device_get_nameunit(sc->dev), index);
1518e948693eSPhilip Paeps 			txq->flush_state = SFXGE_FLUSH_DONE;
15193c838a9fSAndrew Rybchenko 		}
15203c838a9fSAndrew Rybchenko 	}
1521e948693eSPhilip Paeps 
1522e948693eSPhilip Paeps 	txq->blocked = 0;
1523e948693eSPhilip Paeps 	txq->pending = txq->added;
1524e948693eSPhilip Paeps 
1525cc933626SAndrew Rybchenko 	sfxge_tx_qcomplete(txq, evq);
1526e948693eSPhilip Paeps 	KASSERT(txq->completed == txq->added,
1527e948693eSPhilip Paeps 	    ("txq->completed != txq->added"));
1528e948693eSPhilip Paeps 
1529e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
1530e948693eSPhilip Paeps 	KASSERT(txq->reaped == txq->completed,
1531e948693eSPhilip Paeps 	    ("txq->reaped != txq->completed"));
1532e948693eSPhilip Paeps 
1533e948693eSPhilip Paeps 	txq->added = 0;
1534e948693eSPhilip Paeps 	txq->pending = 0;
1535e948693eSPhilip Paeps 	txq->completed = 0;
1536e948693eSPhilip Paeps 	txq->reaped = 0;
1537e948693eSPhilip Paeps 
1538e948693eSPhilip Paeps 	/* Destroy the common code transmit queue. */
1539e948693eSPhilip Paeps 	efx_tx_qdestroy(txq->common);
1540e948693eSPhilip Paeps 	txq->common = NULL;
1541e948693eSPhilip Paeps 
1542e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1543385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1544e948693eSPhilip Paeps 
15458b447157SAndrew Rybchenko 	txq->hw_cksum_flags = 0;
15468b447157SAndrew Rybchenko 
1547763cab71SAndrew Rybchenko 	SFXGE_EVQ_UNLOCK(evq);
1548763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
1549e948693eSPhilip Paeps }
1550e948693eSPhilip Paeps 
1551a45a0da1SAndrew Rybchenko /*
1552a45a0da1SAndrew Rybchenko  * Estimate maximum number of Tx descriptors required for TSO packet.
1553a45a0da1SAndrew Rybchenko  * With minimum MSS and maximum mbuf length we might need more (even
1554a45a0da1SAndrew Rybchenko  * than a ring-ful of descriptors), but this should not happen in
1555a45a0da1SAndrew Rybchenko  * practice except due to deliberate attack.  In that case we will
1556a45a0da1SAndrew Rybchenko  * truncate the output at a packet boundary.
1557a45a0da1SAndrew Rybchenko  */
1558a45a0da1SAndrew Rybchenko static unsigned int
sfxge_tx_max_pkt_desc(const struct sfxge_softc * sc,enum sfxge_txq_type type,unsigned int tso_fw_assisted)1559a45a0da1SAndrew Rybchenko sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type,
1560a45a0da1SAndrew Rybchenko 		      unsigned int tso_fw_assisted)
1561a45a0da1SAndrew Rybchenko {
1562a45a0da1SAndrew Rybchenko 	/* One descriptor for every input fragment */
1563a45a0da1SAndrew Rybchenko 	unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG;
1564a45a0da1SAndrew Rybchenko 	unsigned int sw_tso_max_descs;
1565a45a0da1SAndrew Rybchenko 	unsigned int fa_tso_v1_max_descs = 0;
1566a45a0da1SAndrew Rybchenko 	unsigned int fa_tso_v2_max_descs = 0;
1567a45a0da1SAndrew Rybchenko 
15688b447157SAndrew Rybchenko 	/* Checksum offload Tx option descriptor may be required */
15698b447157SAndrew Rybchenko 	if (sc->txq_dynamic_cksum_toggle_supported)
15708b447157SAndrew Rybchenko 		max_descs++;
15718b447157SAndrew Rybchenko 
1572a45a0da1SAndrew Rybchenko 	/* VLAN tagging Tx option descriptor may be required */
1573a45a0da1SAndrew Rybchenko 	if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled)
1574a45a0da1SAndrew Rybchenko 		max_descs++;
1575a45a0da1SAndrew Rybchenko 
1576a45a0da1SAndrew Rybchenko 	if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) {
1577a45a0da1SAndrew Rybchenko 		/*
1578a45a0da1SAndrew Rybchenko 		 * Plus header and payload descriptor for each output segment.
1579a45a0da1SAndrew Rybchenko 		 * Minus one since header fragment is already counted.
1580a45a0da1SAndrew Rybchenko 		 * Even if FATSO is used, we should be ready to fallback
1581a45a0da1SAndrew Rybchenko 		 * to do it in the driver.
1582a45a0da1SAndrew Rybchenko 		 */
1583a45a0da1SAndrew Rybchenko 		sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1;
1584a45a0da1SAndrew Rybchenko 
1585a45a0da1SAndrew Rybchenko 		/* FW assisted TSOv1 requires one more descriptor per segment
1586a45a0da1SAndrew Rybchenko 		 * in comparison to SW TSO */
1587a45a0da1SAndrew Rybchenko 		if (tso_fw_assisted & SFXGE_FATSOV1)
1588a45a0da1SAndrew Rybchenko 			fa_tso_v1_max_descs =
1589a45a0da1SAndrew Rybchenko 			    sw_tso_max_descs + SFXGE_TSO_MAX_SEGS;
1590a45a0da1SAndrew Rybchenko 
1591a45a0da1SAndrew Rybchenko 		/* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra
1592a45a0da1SAndrew Rybchenko 		 * descriptors per superframe limited by number of DMA fetches
1593a45a0da1SAndrew Rybchenko 		 * per packet. The first packet header is already counted.
1594a45a0da1SAndrew Rybchenko 		 */
1595a45a0da1SAndrew Rybchenko 		if (tso_fw_assisted & SFXGE_FATSOV2) {
1596a45a0da1SAndrew Rybchenko 			fa_tso_v2_max_descs =
1597a45a0da1SAndrew Rybchenko 			    howmany(SFXGE_TX_MAPPING_MAX_SEG,
1598a45a0da1SAndrew Rybchenko 				    EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) *
1599a45a0da1SAndrew Rybchenko 			    (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1;
1600a45a0da1SAndrew Rybchenko 		}
1601a45a0da1SAndrew Rybchenko 
1602a45a0da1SAndrew Rybchenko 		max_descs += MAX(sw_tso_max_descs,
1603a45a0da1SAndrew Rybchenko 				 MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs));
1604a45a0da1SAndrew Rybchenko 	}
1605a45a0da1SAndrew Rybchenko 
1606a45a0da1SAndrew Rybchenko 	return (max_descs);
1607a45a0da1SAndrew Rybchenko }
1608a45a0da1SAndrew Rybchenko 
1609e948693eSPhilip Paeps static int
sfxge_tx_qstart(struct sfxge_softc * sc,unsigned int index)1610e948693eSPhilip Paeps sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1611e948693eSPhilip Paeps {
1612e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1613e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1614e948693eSPhilip Paeps 	uint16_t flags;
1615a45a0da1SAndrew Rybchenko 	unsigned int tso_fw_assisted;
16168b447157SAndrew Rybchenko 	unsigned int label;
1617e948693eSPhilip Paeps 	struct sfxge_evq *evq;
16183c838a9fSAndrew Rybchenko 	unsigned int desc_index;
1619e948693eSPhilip Paeps 	int rc;
1620e948693eSPhilip Paeps 
16213c838a9fSAndrew Rybchenko 	SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
16223c838a9fSAndrew Rybchenko 
1623e948693eSPhilip Paeps 	txq = sc->txq[index];
1624e948693eSPhilip Paeps 	esmp = &txq->mem;
1625e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1626e948693eSPhilip Paeps 
1627e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1628e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1629e948693eSPhilip Paeps 	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1630e948693eSPhilip Paeps 	    ("evq->init_state != SFXGE_EVQ_STARTED"));
1631e948693eSPhilip Paeps 
1632e948693eSPhilip Paeps 	/* Program the buffer table. */
1633e948693eSPhilip Paeps 	if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1634385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1635385b1d8eSGeorge V. Neville-Neil 		return (rc);
1636e948693eSPhilip Paeps 
1637e948693eSPhilip Paeps 	/* Determine the kind of queue we are creating. */
1638a45a0da1SAndrew Rybchenko 	tso_fw_assisted = 0;
1639e948693eSPhilip Paeps 	switch (txq->type) {
1640e948693eSPhilip Paeps 	case SFXGE_TXQ_NON_CKSUM:
1641e948693eSPhilip Paeps 		flags = 0;
1642e948693eSPhilip Paeps 		break;
1643e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_CKSUM:
16449dd0e15fSAndrew Rybchenko 		flags = EFX_TXQ_CKSUM_IPV4;
1645e948693eSPhilip Paeps 		break;
1646e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
16479dd0e15fSAndrew Rybchenko 		flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
1648a45a0da1SAndrew Rybchenko 		tso_fw_assisted = sc->tso_fw_assisted;
1649a45a0da1SAndrew Rybchenko 		if (tso_fw_assisted & SFXGE_FATSOV2)
1650a45a0da1SAndrew Rybchenko 			flags |= EFX_TXQ_FATSOV2;
1651e948693eSPhilip Paeps 		break;
1652e948693eSPhilip Paeps 	default:
1653e948693eSPhilip Paeps 		KASSERT(0, ("Impossible TX queue"));
1654e948693eSPhilip Paeps 		flags = 0;
1655e948693eSPhilip Paeps 		break;
1656e948693eSPhilip Paeps 	}
1657e948693eSPhilip Paeps 
16588b447157SAndrew Rybchenko 	label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type;
16598b447157SAndrew Rybchenko 
1660e948693eSPhilip Paeps 	/* Create the common code transmit queue. */
16618b447157SAndrew Rybchenko 	if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
1662385b1d8eSGeorge V. Neville-Neil 	    sc->txq_entries, txq->buf_base_id, flags, evq->common,
1663a45a0da1SAndrew Rybchenko 	    &txq->common, &desc_index)) != 0) {
1664a45a0da1SAndrew Rybchenko 		/* Retry if no FATSOv2 resources, otherwise fail */
1665a45a0da1SAndrew Rybchenko 		if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2))
1666a45a0da1SAndrew Rybchenko 			goto fail;
1667a45a0da1SAndrew Rybchenko 
1668a45a0da1SAndrew Rybchenko 		/* Looks like all FATSOv2 contexts are used */
1669a45a0da1SAndrew Rybchenko 		flags &= ~EFX_TXQ_FATSOV2;
1670a45a0da1SAndrew Rybchenko 		tso_fw_assisted &= ~SFXGE_FATSOV2;
16718b447157SAndrew Rybchenko 		if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
1672a45a0da1SAndrew Rybchenko 		    sc->txq_entries, txq->buf_base_id, flags, evq->common,
16733c838a9fSAndrew Rybchenko 		    &txq->common, &desc_index)) != 0)
1674e948693eSPhilip Paeps 			goto fail;
1675a45a0da1SAndrew Rybchenko 	}
1676e948693eSPhilip Paeps 
16773c838a9fSAndrew Rybchenko 	/* Initialise queue descriptor indexes */
16783c838a9fSAndrew Rybchenko 	txq->added = txq->pending = txq->completed = txq->reaped = desc_index;
16793c838a9fSAndrew Rybchenko 
1680763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1681e948693eSPhilip Paeps 
1682e948693eSPhilip Paeps 	/* Enable the transmit queue. */
1683e948693eSPhilip Paeps 	efx_tx_qenable(txq->common);
1684e948693eSPhilip Paeps 
1685e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_STARTED;
16863c838a9fSAndrew Rybchenko 	txq->flush_state = SFXGE_FLUSH_REQUIRED;
1687a45a0da1SAndrew Rybchenko 	txq->tso_fw_assisted = tso_fw_assisted;
1688a45a0da1SAndrew Rybchenko 
1689a45a0da1SAndrew Rybchenko 	txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type,
1690a45a0da1SAndrew Rybchenko 						  tso_fw_assisted);
1691e948693eSPhilip Paeps 
16921baf03a4SAndrew Rybchenko 	txq->hw_vlan_tci = 0;
16931baf03a4SAndrew Rybchenko 
16948b447157SAndrew Rybchenko 	txq->hw_cksum_flags = flags &
16958b447157SAndrew Rybchenko 			      (EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP);
16968b447157SAndrew Rybchenko 
1697763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
1698e948693eSPhilip Paeps 
1699e948693eSPhilip Paeps 	return (0);
1700e948693eSPhilip Paeps 
1701e948693eSPhilip Paeps fail:
1702e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1703385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1704385b1d8eSGeorge V. Neville-Neil 	return (rc);
1705e948693eSPhilip Paeps }
1706e948693eSPhilip Paeps 
1707e948693eSPhilip Paeps void
sfxge_tx_stop(struct sfxge_softc * sc)1708e948693eSPhilip Paeps sfxge_tx_stop(struct sfxge_softc *sc)
1709e948693eSPhilip Paeps {
1710e948693eSPhilip Paeps 	int index;
1711e948693eSPhilip Paeps 
1712e2b05fe2SAndrew Rybchenko 	index = sc->txq_count;
1713e948693eSPhilip Paeps 	while (--index >= 0)
1714e2b05fe2SAndrew Rybchenko 		sfxge_tx_qstop(sc, index);
1715e948693eSPhilip Paeps 
1716e948693eSPhilip Paeps 	/* Tear down the transmit module */
1717e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1718e948693eSPhilip Paeps }
1719e948693eSPhilip Paeps 
1720e948693eSPhilip Paeps int
sfxge_tx_start(struct sfxge_softc * sc)1721e948693eSPhilip Paeps sfxge_tx_start(struct sfxge_softc *sc)
1722e948693eSPhilip Paeps {
1723e948693eSPhilip Paeps 	int index;
1724e948693eSPhilip Paeps 	int rc;
1725e948693eSPhilip Paeps 
1726e948693eSPhilip Paeps 	/* Initialize the common code transmit module. */
1727e948693eSPhilip Paeps 	if ((rc = efx_tx_init(sc->enp)) != 0)
1728e948693eSPhilip Paeps 		return (rc);
1729e948693eSPhilip Paeps 
1730e2b05fe2SAndrew Rybchenko 	for (index = 0; index < sc->txq_count; index++) {
1731e2b05fe2SAndrew Rybchenko 		if ((rc = sfxge_tx_qstart(sc, index)) != 0)
1732e948693eSPhilip Paeps 			goto fail;
1733e948693eSPhilip Paeps 	}
1734e948693eSPhilip Paeps 
1735e948693eSPhilip Paeps 	return (0);
1736e948693eSPhilip Paeps 
1737e948693eSPhilip Paeps fail:
1738e2b05fe2SAndrew Rybchenko 	while (--index >= 0)
1739e2b05fe2SAndrew Rybchenko 		sfxge_tx_qstop(sc, index);
1740e2b05fe2SAndrew Rybchenko 
1741e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1742e948693eSPhilip Paeps 
1743e948693eSPhilip Paeps 	return (rc);
1744e948693eSPhilip Paeps }
1745e948693eSPhilip Paeps 
1746f6222d7bSAndrew Rybchenko static int
sfxge_txq_stat_init(struct sfxge_txq * txq,struct sysctl_oid * txq_node)1747f6222d7bSAndrew Rybchenko sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node)
1748f6222d7bSAndrew Rybchenko {
1749f6222d7bSAndrew Rybchenko 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev);
1750f6222d7bSAndrew Rybchenko 	struct sysctl_oid *stat_node;
1751f6222d7bSAndrew Rybchenko 	unsigned int id;
1752f6222d7bSAndrew Rybchenko 
1753f6222d7bSAndrew Rybchenko 	stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
17547029da5cSPawel Biernacki 	    "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx queue statistics");
1755f6222d7bSAndrew Rybchenko 	if (stat_node == NULL)
1756f6222d7bSAndrew Rybchenko 		return (ENOMEM);
1757f6222d7bSAndrew Rybchenko 
1758f6222d7bSAndrew Rybchenko 	for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1759f6222d7bSAndrew Rybchenko 		SYSCTL_ADD_ULONG(
1760f6222d7bSAndrew Rybchenko 		    ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO,
1761f6222d7bSAndrew Rybchenko 		    sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS,
1762f6222d7bSAndrew Rybchenko 		    (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset),
1763f6222d7bSAndrew Rybchenko 		    "");
1764f6222d7bSAndrew Rybchenko 	}
1765f6222d7bSAndrew Rybchenko 
1766f6222d7bSAndrew Rybchenko 	return (0);
1767f6222d7bSAndrew Rybchenko }
1768f6222d7bSAndrew Rybchenko 
1769e948693eSPhilip Paeps /**
1770e948693eSPhilip Paeps  * Destroy a transmit queue.
1771e948693eSPhilip Paeps  */
1772e948693eSPhilip Paeps static void
sfxge_tx_qfini(struct sfxge_softc * sc,unsigned int index)1773e948693eSPhilip Paeps sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1774e948693eSPhilip Paeps {
1775e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1776385b1d8eSGeorge V. Neville-Neil 	unsigned int nmaps;
1777e948693eSPhilip Paeps 
1778e948693eSPhilip Paeps 	txq = sc->txq[index];
1779e948693eSPhilip Paeps 
1780e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1781e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1782e948693eSPhilip Paeps 
1783e948693eSPhilip Paeps 	if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1784e948693eSPhilip Paeps 		tso_fini(txq);
1785e948693eSPhilip Paeps 
1786e948693eSPhilip Paeps 	/* Free the context arrays. */
1787e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1788385b1d8eSGeorge V. Neville-Neil 	nmaps = sc->txq_entries;
1789b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1790e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1791e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1792e948693eSPhilip Paeps 
1793e948693eSPhilip Paeps 	/* Release DMA memory mapping. */
1794e948693eSPhilip Paeps 	sfxge_dma_free(&txq->mem);
1795e948693eSPhilip Paeps 
1796e948693eSPhilip Paeps 	sc->txq[index] = NULL;
1797e948693eSPhilip Paeps 
1798763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_DESTROY(txq);
1799e948693eSPhilip Paeps 
1800e948693eSPhilip Paeps 	free(txq, M_SFXGE);
1801e948693eSPhilip Paeps }
1802e948693eSPhilip Paeps 
1803e948693eSPhilip Paeps static int
sfxge_tx_qinit(struct sfxge_softc * sc,unsigned int txq_index,enum sfxge_txq_type type,unsigned int evq_index)1804e948693eSPhilip Paeps sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1805e948693eSPhilip Paeps 	       enum sfxge_txq_type type, unsigned int evq_index)
1806e948693eSPhilip Paeps {
18076a09b206SAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
1808bc85c897SGeorge V. Neville-Neil 	char name[16];
180995caaf0fSAndrew Rybchenko 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1810bc85c897SGeorge V. Neville-Neil 	struct sysctl_oid *txq_node;
1811e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1812e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
181395caaf0fSAndrew Rybchenko 	struct sysctl_oid *dpl_node;
1814e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1815e948693eSPhilip Paeps 	unsigned int nmaps;
1816e948693eSPhilip Paeps 	int rc;
1817e948693eSPhilip Paeps 
1818e948693eSPhilip Paeps 	txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1819e948693eSPhilip Paeps 	txq->sc = sc;
1820385b1d8eSGeorge V. Neville-Neil 	txq->entries = sc->txq_entries;
1821385b1d8eSGeorge V. Neville-Neil 	txq->ptr_mask = txq->entries - 1;
1822e948693eSPhilip Paeps 
1823e948693eSPhilip Paeps 	sc->txq[txq_index] = txq;
1824e948693eSPhilip Paeps 	esmp = &txq->mem;
1825e948693eSPhilip Paeps 
1826e948693eSPhilip Paeps 	/* Allocate and zero DMA space for the descriptor ring. */
1827385b1d8eSGeorge V. Neville-Neil 	if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1828e948693eSPhilip Paeps 		return (rc);
1829e948693eSPhilip Paeps 
1830e948693eSPhilip Paeps 	/* Allocate buffer table entries. */
1831385b1d8eSGeorge V. Neville-Neil 	sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1832e948693eSPhilip Paeps 				 &txq->buf_base_id);
1833e948693eSPhilip Paeps 
1834e948693eSPhilip Paeps 	/* Create a DMA tag for packet mappings. */
18356a09b206SAndrew Rybchenko 	if (bus_dma_tag_create(sc->parent_dma_tag, 1,
18366a09b206SAndrew Rybchenko 	    encp->enc_tx_dma_desc_boundary,
1837fb8ccc78SMarius Strobl 	    MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
18386a09b206SAndrew Rybchenko 	    NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG,
18396a09b206SAndrew Rybchenko 	    encp->enc_tx_dma_desc_size_max, 0, NULL, NULL,
1840e948693eSPhilip Paeps 	    &txq->packet_dma_tag) != 0) {
1841e948693eSPhilip Paeps 		device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1842e948693eSPhilip Paeps 		rc = ENOMEM;
1843e948693eSPhilip Paeps 		goto fail;
1844e948693eSPhilip Paeps 	}
1845e948693eSPhilip Paeps 
1846e948693eSPhilip Paeps 	/* Allocate pending descriptor array for batching writes. */
18473c838a9fSAndrew Rybchenko 	txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries,
1848e948693eSPhilip Paeps 				M_SFXGE, M_ZERO | M_WAITOK);
1849e948693eSPhilip Paeps 
1850e948693eSPhilip Paeps 	/* Allocate and initialise mbuf DMA mapping array. */
1851385b1d8eSGeorge V. Neville-Neil 	txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1852e948693eSPhilip Paeps 	    M_SFXGE, M_ZERO | M_WAITOK);
1853385b1d8eSGeorge V. Neville-Neil 	for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1854e948693eSPhilip Paeps 		rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1855e948693eSPhilip Paeps 				       &txq->stmp[nmaps].map);
1856e948693eSPhilip Paeps 		if (rc != 0)
1857e948693eSPhilip Paeps 			goto fail2;
1858e948693eSPhilip Paeps 	}
1859e948693eSPhilip Paeps 
1860bc85c897SGeorge V. Neville-Neil 	snprintf(name, sizeof(name), "%u", txq_index);
186195caaf0fSAndrew Rybchenko 	txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node),
18627029da5cSPawel Biernacki 	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
1863bc85c897SGeorge V. Neville-Neil 	if (txq_node == NULL) {
1864bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
1865bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
1866bc85c897SGeorge V. Neville-Neil 	}
1867bc85c897SGeorge V. Neville-Neil 
1868e948693eSPhilip Paeps 	if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1869e948693eSPhilip Paeps 	    (rc = tso_init(txq)) != 0)
1870e948693eSPhilip Paeps 		goto fail3;
1871e948693eSPhilip Paeps 
1872e948693eSPhilip Paeps 	/* Initialize the deferred packet list. */
1873e948693eSPhilip Paeps 	stdp = &txq->dpl;
1874060a95efSGeorge V. Neville-Neil 	stdp->std_put_max = sfxge_tx_dpl_put_max;
1875060a95efSGeorge V. Neville-Neil 	stdp->std_get_max = sfxge_tx_dpl_get_max;
187693929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1877e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
1878e948693eSPhilip Paeps 
187933d45dc5SAndrew Rybchenko 	SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1880bc85c897SGeorge V. Neville-Neil 
188195caaf0fSAndrew Rybchenko 	dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
18827029da5cSPawel Biernacki 	    "dpl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
188395caaf0fSAndrew Rybchenko 	    "Deferred packet list statistics");
188495caaf0fSAndrew Rybchenko 	if (dpl_node == NULL) {
188595caaf0fSAndrew Rybchenko 		rc = ENOMEM;
188695caaf0fSAndrew Rybchenko 		goto fail_dpl_node;
188795caaf0fSAndrew Rybchenko 	}
188895caaf0fSAndrew Rybchenko 
188995caaf0fSAndrew Rybchenko 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
189095caaf0fSAndrew Rybchenko 			"get_count", CTLFLAG_RD | CTLFLAG_STATS,
1891bc85c897SGeorge V. Neville-Neil 			&stdp->std_get_count, 0, "");
189295caaf0fSAndrew Rybchenko 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
189395caaf0fSAndrew Rybchenko 			"get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,
189493929f25SAndrew Rybchenko 			&stdp->std_get_non_tcp_count, 0, "");
189595caaf0fSAndrew Rybchenko 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
189695caaf0fSAndrew Rybchenko 			"get_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
189793929f25SAndrew Rybchenko 			&stdp->std_get_hiwat, 0, "");
189895caaf0fSAndrew Rybchenko 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
189995caaf0fSAndrew Rybchenko 			"put_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1900bce6d281SAndrew Rybchenko 			&stdp->std_put_hiwat, 0, "");
1901e948693eSPhilip Paeps 
1902f6222d7bSAndrew Rybchenko 	rc = sfxge_txq_stat_init(txq, txq_node);
1903f6222d7bSAndrew Rybchenko 	if (rc != 0)
1904f6222d7bSAndrew Rybchenko 		goto fail_txq_stat_init;
1905f6222d7bSAndrew Rybchenko 
1906e948693eSPhilip Paeps 	txq->type = type;
1907e948693eSPhilip Paeps 	txq->evq_index = evq_index;
1908e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
19093c838a9fSAndrew Rybchenko 
1910e948693eSPhilip Paeps 	return (0);
1911e948693eSPhilip Paeps 
1912f6222d7bSAndrew Rybchenko fail_txq_stat_init:
191395caaf0fSAndrew Rybchenko fail_dpl_node:
1914e948693eSPhilip Paeps fail3:
1915bc85c897SGeorge V. Neville-Neil fail_txq_node:
1916e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1917e948693eSPhilip Paeps fail2:
1918b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1919e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1920e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1921e948693eSPhilip Paeps 	bus_dma_tag_destroy(txq->packet_dma_tag);
1922e948693eSPhilip Paeps 
1923e948693eSPhilip Paeps fail:
1924e948693eSPhilip Paeps 	sfxge_dma_free(esmp);
1925e948693eSPhilip Paeps 
1926e948693eSPhilip Paeps 	return (rc);
1927e948693eSPhilip Paeps }
1928e948693eSPhilip Paeps 
1929e948693eSPhilip Paeps static int
sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)1930e948693eSPhilip Paeps sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1931e948693eSPhilip Paeps {
1932e948693eSPhilip Paeps 	struct sfxge_softc *sc = arg1;
1933e948693eSPhilip Paeps 	unsigned int id = arg2;
1934e948693eSPhilip Paeps 	unsigned long sum;
1935e948693eSPhilip Paeps 	unsigned int index;
1936e948693eSPhilip Paeps 
1937e948693eSPhilip Paeps 	/* Sum across all TX queues */
1938e948693eSPhilip Paeps 	sum = 0;
1939e2b05fe2SAndrew Rybchenko 	for (index = 0; index < sc->txq_count; index++)
1940e948693eSPhilip Paeps 		sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1941e948693eSPhilip Paeps 					  sfxge_tx_stats[id].offset);
1942e948693eSPhilip Paeps 
1943b7b0edd1SGeorge V. Neville-Neil 	return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1944e948693eSPhilip Paeps }
1945e948693eSPhilip Paeps 
1946e948693eSPhilip Paeps static void
sfxge_tx_stat_init(struct sfxge_softc * sc)1947e948693eSPhilip Paeps sfxge_tx_stat_init(struct sfxge_softc *sc)
1948e948693eSPhilip Paeps {
1949e948693eSPhilip Paeps 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1950e948693eSPhilip Paeps 	struct sysctl_oid_list *stat_list;
1951e948693eSPhilip Paeps 	unsigned int id;
1952e948693eSPhilip Paeps 
1953e948693eSPhilip Paeps 	stat_list = SYSCTL_CHILDREN(sc->stats_node);
1954e948693eSPhilip Paeps 
1955612d8e28SAndrew Rybchenko 	for (id = 0; id < nitems(sfxge_tx_stats); id++) {
19567029da5cSPawel Biernacki 		SYSCTL_ADD_PROC(ctx, stat_list, OID_AUTO,
19577029da5cSPawel Biernacki 		    sfxge_tx_stats[id].name,
19587029da5cSPawel Biernacki 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
19597029da5cSPawel Biernacki 		    sc, id, sfxge_tx_stat_handler, "LU", "");
1960e948693eSPhilip Paeps 	}
1961e948693eSPhilip Paeps }
1962e948693eSPhilip Paeps 
19633d8fce27SAndrew Rybchenko uint64_t
sfxge_tx_get_drops(struct sfxge_softc * sc)19643d8fce27SAndrew Rybchenko sfxge_tx_get_drops(struct sfxge_softc *sc)
19653d8fce27SAndrew Rybchenko {
19663d8fce27SAndrew Rybchenko 	unsigned int index;
19673d8fce27SAndrew Rybchenko 	uint64_t drops = 0;
19683d8fce27SAndrew Rybchenko 	struct sfxge_txq *txq;
19693d8fce27SAndrew Rybchenko 
19703d8fce27SAndrew Rybchenko 	/* Sum across all TX queues */
19713d8fce27SAndrew Rybchenko 	for (index = 0; index < sc->txq_count; index++) {
19723d8fce27SAndrew Rybchenko 		txq = sc->txq[index];
19733d8fce27SAndrew Rybchenko 		/*
19743d8fce27SAndrew Rybchenko 		 * In theory, txq->put_overflow and txq->netdown_drops
19753d8fce27SAndrew Rybchenko 		 * should use atomic operation and other should be
19763d8fce27SAndrew Rybchenko 		 * obtained under txq lock, but it is just statistics.
19773d8fce27SAndrew Rybchenko 		 */
19783d8fce27SAndrew Rybchenko 		drops += txq->drops + txq->get_overflow +
19793d8fce27SAndrew Rybchenko 			 txq->get_non_tcp_overflow +
19803d8fce27SAndrew Rybchenko 			 txq->put_overflow + txq->netdown_drops +
19813d8fce27SAndrew Rybchenko 			 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc;
19823d8fce27SAndrew Rybchenko 	}
19833d8fce27SAndrew Rybchenko 	return (drops);
19843d8fce27SAndrew Rybchenko }
19853d8fce27SAndrew Rybchenko 
1986e948693eSPhilip Paeps void
sfxge_tx_fini(struct sfxge_softc * sc)1987e948693eSPhilip Paeps sfxge_tx_fini(struct sfxge_softc *sc)
1988e948693eSPhilip Paeps {
1989e948693eSPhilip Paeps 	int index;
1990e948693eSPhilip Paeps 
1991e2b05fe2SAndrew Rybchenko 	index = sc->txq_count;
1992e948693eSPhilip Paeps 	while (--index >= 0)
1993e2b05fe2SAndrew Rybchenko 		sfxge_tx_qfini(sc, index);
1994e948693eSPhilip Paeps 
1995e2b05fe2SAndrew Rybchenko 	sc->txq_count = 0;
1996e948693eSPhilip Paeps }
1997e948693eSPhilip Paeps 
1998e948693eSPhilip Paeps int
sfxge_tx_init(struct sfxge_softc * sc)1999e948693eSPhilip Paeps sfxge_tx_init(struct sfxge_softc *sc)
2000e948693eSPhilip Paeps {
20013c838a9fSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
2002e565fa55SJohn Baldwin 	struct sfxge_intr *intr __diagused;
2003e948693eSPhilip Paeps 	int index;
2004e948693eSPhilip Paeps 	int rc;
2005e948693eSPhilip Paeps 
2006e948693eSPhilip Paeps 	intr = &sc->intr;
2007e948693eSPhilip Paeps 
2008e948693eSPhilip Paeps 	KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
2009e948693eSPhilip Paeps 	    ("intr->state != SFXGE_INTR_INITIALIZED"));
2010e948693eSPhilip Paeps 
20113bce7d0fSAndrew Rybchenko 	if (sfxge_tx_dpl_get_max <= 0) {
20123bce7d0fSAndrew Rybchenko 		log(LOG_ERR, "%s=%d must be greater than 0",
20133bce7d0fSAndrew Rybchenko 		    SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
20143bce7d0fSAndrew Rybchenko 		rc = EINVAL;
20153bce7d0fSAndrew Rybchenko 		goto fail_tx_dpl_get_max;
20163bce7d0fSAndrew Rybchenko 	}
20173bce7d0fSAndrew Rybchenko 	if (sfxge_tx_dpl_get_non_tcp_max <= 0) {
20183bce7d0fSAndrew Rybchenko 		log(LOG_ERR, "%s=%d must be greater than 0",
20193bce7d0fSAndrew Rybchenko 		    SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX,
20203bce7d0fSAndrew Rybchenko 		    sfxge_tx_dpl_get_non_tcp_max);
20213bce7d0fSAndrew Rybchenko 		rc = EINVAL;
20223bce7d0fSAndrew Rybchenko 		goto fail_tx_dpl_get_non_tcp_max;
20233bce7d0fSAndrew Rybchenko 	}
20243bce7d0fSAndrew Rybchenko 	if (sfxge_tx_dpl_put_max < 0) {
20253bce7d0fSAndrew Rybchenko 		log(LOG_ERR, "%s=%d must be greater or equal to 0",
20263bce7d0fSAndrew Rybchenko 		    SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
20273bce7d0fSAndrew Rybchenko 		rc = EINVAL;
20283bce7d0fSAndrew Rybchenko 		goto fail_tx_dpl_put_max;
20293bce7d0fSAndrew Rybchenko 	}
20303bce7d0fSAndrew Rybchenko 
2031e4b0a127SAndrew Rybchenko 	sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc;
2032e2b05fe2SAndrew Rybchenko 
20333c838a9fSAndrew Rybchenko 	sc->tso_fw_assisted = sfxge_tso_fw_assisted;
2034a45a0da1SAndrew Rybchenko 	if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) ||
2035a45a0da1SAndrew Rybchenko 	    (!encp->enc_fw_assisted_tso_enabled))
2036a45a0da1SAndrew Rybchenko 		sc->tso_fw_assisted &= ~SFXGE_FATSOV1;
2037a45a0da1SAndrew Rybchenko 	if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) ||
2038a45a0da1SAndrew Rybchenko 	    (!encp->enc_fw_assisted_tso_v2_enabled))
2039a45a0da1SAndrew Rybchenko 		sc->tso_fw_assisted &= ~SFXGE_FATSOV2;
20403c838a9fSAndrew Rybchenko 
20417029da5cSPawel Biernacki 	sc->txqs_node = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev),
20427029da5cSPawel Biernacki 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
20437029da5cSPawel Biernacki 	    "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx queues");
2044bc85c897SGeorge V. Neville-Neil 	if (sc->txqs_node == NULL) {
2045bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
2046bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
2047bc85c897SGeorge V. Neville-Neil 	}
2048bc85c897SGeorge V. Neville-Neil 
2049e948693eSPhilip Paeps 	/* Initialize the transmit queues */
20508b447157SAndrew Rybchenko 	if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) {
2051e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
2052e948693eSPhilip Paeps 		    SFXGE_TXQ_NON_CKSUM, 0)) != 0)
2053e948693eSPhilip Paeps 			goto fail;
2054e948693eSPhilip Paeps 
2055e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
2056e948693eSPhilip Paeps 		    SFXGE_TXQ_IP_CKSUM, 0)) != 0)
2057e948693eSPhilip Paeps 			goto fail2;
20588b447157SAndrew Rybchenko 	}
2059e948693eSPhilip Paeps 
2060e2b05fe2SAndrew Rybchenko 	for (index = 0;
2061e4b0a127SAndrew Rybchenko 	     index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1;
2062e2b05fe2SAndrew Rybchenko 	     index++) {
2063e4b0a127SAndrew Rybchenko 		if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index,
2064e948693eSPhilip Paeps 		    SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
2065e948693eSPhilip Paeps 			goto fail3;
2066e948693eSPhilip Paeps 	}
2067e948693eSPhilip Paeps 
2068e948693eSPhilip Paeps 	sfxge_tx_stat_init(sc);
2069e948693eSPhilip Paeps 
2070e948693eSPhilip Paeps 	return (0);
2071e948693eSPhilip Paeps 
2072e948693eSPhilip Paeps fail3:
2073e948693eSPhilip Paeps 	while (--index >= 0)
2074e948693eSPhilip Paeps 		sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
2075e948693eSPhilip Paeps 
2076e2b05fe2SAndrew Rybchenko 	sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
2077e2b05fe2SAndrew Rybchenko 
2078e948693eSPhilip Paeps fail2:
2079e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
2080e948693eSPhilip Paeps 
2081e948693eSPhilip Paeps fail:
2082bc85c897SGeorge V. Neville-Neil fail_txq_node:
2083e2b05fe2SAndrew Rybchenko 	sc->txq_count = 0;
20843bce7d0fSAndrew Rybchenko fail_tx_dpl_put_max:
20853bce7d0fSAndrew Rybchenko fail_tx_dpl_get_non_tcp_max:
20863bce7d0fSAndrew Rybchenko fail_tx_dpl_get_max:
2087e948693eSPhilip Paeps 	return (rc);
2088e948693eSPhilip Paeps }
2089