xref: /freebsd/sys/dev/sfxge/sfxge_tx.c (revision d0f73877)
1e948693eSPhilip Paeps /*-
2e948693eSPhilip Paeps  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3e948693eSPhilip Paeps  * All rights reserved.
4e948693eSPhilip Paeps  *
5e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
6e948693eSPhilip Paeps  * Solarflare Communications, Inc.
7e948693eSPhilip Paeps  *
8e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
9e948693eSPhilip Paeps  * modification, are permitted provided that the following conditions
10e948693eSPhilip Paeps  * are met:
11e948693eSPhilip Paeps  * 1. Redistributions of source code must retain the above copyright
12e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer.
13e948693eSPhilip Paeps  * 2. Redistributions in binary form must reproduce the above copyright
14e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer in the
15e948693eSPhilip Paeps  *    documentation and/or other materials provided with the distribution.
16e948693eSPhilip Paeps  *
17e948693eSPhilip Paeps  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18e948693eSPhilip Paeps  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19e948693eSPhilip Paeps  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20e948693eSPhilip Paeps  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21e948693eSPhilip Paeps  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22e948693eSPhilip Paeps  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23e948693eSPhilip Paeps  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24e948693eSPhilip Paeps  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25e948693eSPhilip Paeps  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26e948693eSPhilip Paeps  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27e948693eSPhilip Paeps  * SUCH DAMAGE.
28e948693eSPhilip Paeps  */
29e948693eSPhilip Paeps 
30cf07c70dSGeorge V. Neville-Neil /* Theory of operation:
31cf07c70dSGeorge V. Neville-Neil  *
32cf07c70dSGeorge V. Neville-Neil  * Tx queues allocation and mapping
33cf07c70dSGeorge V. Neville-Neil  *
34cf07c70dSGeorge V. Neville-Neil  * One Tx queue with enabled checksum offload is allocated per Rx channel
35cf07c70dSGeorge V. Neville-Neil  * (event queue).  Also 2 Tx queues (one without checksum offload and one
36cf07c70dSGeorge V. Neville-Neil  * with IP checksum offload only) are allocated and bound to event queue 0.
37cf07c70dSGeorge V. Neville-Neil  * sfxge_txq_type is used as Tx queue label.
38cf07c70dSGeorge V. Neville-Neil  *
39cf07c70dSGeorge V. Neville-Neil  * So, event queue plus label mapping to Tx queue index is:
40cf07c70dSGeorge V. Neville-Neil  *	if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41cf07c70dSGeorge V. Neville-Neil  *	else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42cf07c70dSGeorge V. Neville-Neil  * See sfxge_get_txq_by_label() sfxge_ev.c
43cf07c70dSGeorge V. Neville-Neil  */
44cf07c70dSGeorge V. Neville-Neil 
45e948693eSPhilip Paeps #include <sys/cdefs.h>
46e948693eSPhilip Paeps __FBSDID("$FreeBSD$");
47e948693eSPhilip Paeps 
48e948693eSPhilip Paeps #include <sys/types.h>
49e948693eSPhilip Paeps #include <sys/mbuf.h>
50e948693eSPhilip Paeps #include <sys/smp.h>
51e948693eSPhilip Paeps #include <sys/socket.h>
52e948693eSPhilip Paeps #include <sys/sysctl.h>
53060a95efSGeorge V. Neville-Neil #include <sys/syslog.h>
54e948693eSPhilip Paeps 
55e948693eSPhilip Paeps #include <net/bpf.h>
56e948693eSPhilip Paeps #include <net/ethernet.h>
57e948693eSPhilip Paeps #include <net/if.h>
58e948693eSPhilip Paeps #include <net/if_vlan_var.h>
59e948693eSPhilip Paeps 
60e948693eSPhilip Paeps #include <netinet/in.h>
61e948693eSPhilip Paeps #include <netinet/ip.h>
62e948693eSPhilip Paeps #include <netinet/ip6.h>
63e948693eSPhilip Paeps #include <netinet/tcp.h>
64e948693eSPhilip Paeps 
65e948693eSPhilip Paeps #include "common/efx.h"
66e948693eSPhilip Paeps 
67e948693eSPhilip Paeps #include "sfxge.h"
68e948693eSPhilip Paeps #include "sfxge_tx.h"
69e948693eSPhilip Paeps 
70e948693eSPhilip Paeps /* Set the block level to ensure there is space to generate a
71e948693eSPhilip Paeps  * large number of descriptors for TSO.  With minimum MSS and
72e948693eSPhilip Paeps  * maximum mbuf length we might need more than a ring-ful of
73e948693eSPhilip Paeps  * descriptors, but this should not happen in practice except
74e948693eSPhilip Paeps  * due to deliberate attack.  In that case we will truncate
75e948693eSPhilip Paeps  * the output at a packet boundary.  Allow for a reasonable
76e948693eSPhilip Paeps  * minimum MSS of 512.
77e948693eSPhilip Paeps  */
78e948693eSPhilip Paeps #define	SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
79385b1d8eSGeorge V. Neville-Neil #define	SFXGE_TXQ_BLOCK_LEVEL(_entries)	((_entries) - SFXGE_TSO_MAX_DESC)
80e948693eSPhilip Paeps 
81060a95efSGeorge V. Neville-Neil #ifdef SFXGE_HAVE_MQ
82060a95efSGeorge V. Neville-Neil 
83060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_GET_MAX	SFXGE_PARAM(tx_dpl_get_max)
84060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
85060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
86060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
87060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_get_max, 0,
8893929f25SAndrew Rybchenko 	   "Maximum number of any packets in deferred packet get-list");
8993929f25SAndrew Rybchenko 
9093929f25SAndrew Rybchenko #define	SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \
9193929f25SAndrew Rybchenko 	SFXGE_PARAM(tx_dpl_get_non_tcp_max)
9293929f25SAndrew Rybchenko static int sfxge_tx_dpl_get_non_tcp_max =
9393929f25SAndrew Rybchenko 	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT;
9493929f25SAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max);
9593929f25SAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN,
9693929f25SAndrew Rybchenko 	   &sfxge_tx_dpl_get_non_tcp_max, 0,
9793929f25SAndrew Rybchenko 	   "Maximum number of non-TCP packets in deferred packet get-list");
98060a95efSGeorge V. Neville-Neil 
99060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_PUT_MAX	SFXGE_PARAM(tx_dpl_put_max)
100060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
101060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
102060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
103060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_put_max, 0,
10493929f25SAndrew Rybchenko 	   "Maximum number of any packets in deferred packet put-list");
105060a95efSGeorge V. Neville-Neil 
106060a95efSGeorge V. Neville-Neil #endif
107060a95efSGeorge V. Neville-Neil 
108060a95efSGeorge V. Neville-Neil 
109e948693eSPhilip Paeps /* Forward declarations. */
1100b28bbdcSAndrew Rybchenko static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
111e948693eSPhilip Paeps static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
112e948693eSPhilip Paeps static void sfxge_tx_qunblock(struct sfxge_txq *txq);
113e948693eSPhilip Paeps static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
114e948693eSPhilip Paeps 			      const bus_dma_segment_t *dma_seg, int n_dma_seg);
115e948693eSPhilip Paeps 
116e948693eSPhilip Paeps void
117cc933626SAndrew Rybchenko sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
118e948693eSPhilip Paeps {
119e948693eSPhilip Paeps 	unsigned int completed;
120e948693eSPhilip Paeps 
121763cab71SAndrew Rybchenko 	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
122e948693eSPhilip Paeps 
123e948693eSPhilip Paeps 	completed = txq->completed;
124e948693eSPhilip Paeps 	while (completed != txq->pending) {
125e948693eSPhilip Paeps 		struct sfxge_tx_mapping *stmp;
126e948693eSPhilip Paeps 		unsigned int id;
127e948693eSPhilip Paeps 
128385b1d8eSGeorge V. Neville-Neil 		id = completed++ & txq->ptr_mask;
129e948693eSPhilip Paeps 
130e948693eSPhilip Paeps 		stmp = &txq->stmp[id];
131e948693eSPhilip Paeps 		if (stmp->flags & TX_BUF_UNMAP) {
132e948693eSPhilip Paeps 			bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
133e948693eSPhilip Paeps 			if (stmp->flags & TX_BUF_MBUF) {
134e948693eSPhilip Paeps 				struct mbuf *m = stmp->u.mbuf;
135e948693eSPhilip Paeps 				do
136e948693eSPhilip Paeps 					m = m_free(m);
137e948693eSPhilip Paeps 				while (m != NULL);
138e948693eSPhilip Paeps 			} else {
139e948693eSPhilip Paeps 				free(stmp->u.heap_buf, M_SFXGE);
140e948693eSPhilip Paeps 			}
141e948693eSPhilip Paeps 			stmp->flags = 0;
142e948693eSPhilip Paeps 		}
143e948693eSPhilip Paeps 	}
144e948693eSPhilip Paeps 	txq->completed = completed;
145e948693eSPhilip Paeps 
146e948693eSPhilip Paeps 	/* Check whether we need to unblock the queue. */
147e948693eSPhilip Paeps 	mb();
148e948693eSPhilip Paeps 	if (txq->blocked) {
149e948693eSPhilip Paeps 		unsigned int level;
150e948693eSPhilip Paeps 
151e948693eSPhilip Paeps 		level = txq->added - txq->completed;
152385b1d8eSGeorge V. Neville-Neil 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
153e948693eSPhilip Paeps 			sfxge_tx_qunblock(txq);
154e948693eSPhilip Paeps 	}
155e948693eSPhilip Paeps }
156e948693eSPhilip Paeps 
157e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
158e948693eSPhilip Paeps 
1590b28bbdcSAndrew Rybchenko static unsigned int
16093929f25SAndrew Rybchenko sfxge_is_mbuf_non_tcp(struct mbuf *mbuf)
16193929f25SAndrew Rybchenko {
16293929f25SAndrew Rybchenko 	/* Absense of TCP checksum flags does not mean that it is non-TCP
16393929f25SAndrew Rybchenko 	 * but it should be true if user wants to achieve high throughput.
16493929f25SAndrew Rybchenko 	 */
16593929f25SAndrew Rybchenko 	return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)));
16693929f25SAndrew Rybchenko }
16793929f25SAndrew Rybchenko 
168e948693eSPhilip Paeps /*
169e948693eSPhilip Paeps  * Reorder the put list and append it to the get list.
170e948693eSPhilip Paeps  */
171e948693eSPhilip Paeps static void
172e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
173e948693eSPhilip Paeps {
174e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
175e948693eSPhilip Paeps 	struct mbuf *mbuf, *get_next, **get_tailp;
176e948693eSPhilip Paeps 	volatile uintptr_t *putp;
177e948693eSPhilip Paeps 	uintptr_t put;
178e948693eSPhilip Paeps 	unsigned int count;
17993929f25SAndrew Rybchenko 	unsigned int non_tcp_count;
180e948693eSPhilip Paeps 
181763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
182e948693eSPhilip Paeps 
183e948693eSPhilip Paeps 	stdp = &txq->dpl;
184e948693eSPhilip Paeps 
185e948693eSPhilip Paeps 	/* Acquire the put list. */
186e948693eSPhilip Paeps 	putp = &stdp->std_put;
187fb8ccc78SMarius Strobl 	put = atomic_readandclear_ptr(putp);
188e948693eSPhilip Paeps 	mbuf = (void *)put;
189e948693eSPhilip Paeps 
190e948693eSPhilip Paeps 	if (mbuf == NULL)
191e948693eSPhilip Paeps 		return;
192e948693eSPhilip Paeps 
193e948693eSPhilip Paeps 	/* Reverse the put list. */
194e948693eSPhilip Paeps 	get_tailp = &mbuf->m_nextpkt;
195e948693eSPhilip Paeps 	get_next = NULL;
196e948693eSPhilip Paeps 
197e948693eSPhilip Paeps 	count = 0;
19893929f25SAndrew Rybchenko 	non_tcp_count = 0;
199e948693eSPhilip Paeps 	do {
200e948693eSPhilip Paeps 		struct mbuf *put_next;
201e948693eSPhilip Paeps 
20293929f25SAndrew Rybchenko 		non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf);
203e948693eSPhilip Paeps 		put_next = mbuf->m_nextpkt;
204e948693eSPhilip Paeps 		mbuf->m_nextpkt = get_next;
205e948693eSPhilip Paeps 		get_next = mbuf;
206e948693eSPhilip Paeps 		mbuf = put_next;
207e948693eSPhilip Paeps 
208e948693eSPhilip Paeps 		count++;
209e948693eSPhilip Paeps 	} while (mbuf != NULL);
210e948693eSPhilip Paeps 
211e948693eSPhilip Paeps 	/* Append the reversed put list to the get list. */
212e948693eSPhilip Paeps 	KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
213e948693eSPhilip Paeps 	*stdp->std_getp = get_next;
214e948693eSPhilip Paeps 	stdp->std_getp = get_tailp;
215bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count += count;
21693929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_count += non_tcp_count;
217e948693eSPhilip Paeps }
218e948693eSPhilip Paeps 
219e948693eSPhilip Paeps #endif /* SFXGE_HAVE_MQ */
220e948693eSPhilip Paeps 
221e948693eSPhilip Paeps static void
222e948693eSPhilip Paeps sfxge_tx_qreap(struct sfxge_txq *txq)
223e948693eSPhilip Paeps {
224763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
225e948693eSPhilip Paeps 
226e948693eSPhilip Paeps 	txq->reaped = txq->completed;
227e948693eSPhilip Paeps }
228e948693eSPhilip Paeps 
229e948693eSPhilip Paeps static void
230e948693eSPhilip Paeps sfxge_tx_qlist_post(struct sfxge_txq *txq)
231e948693eSPhilip Paeps {
232e948693eSPhilip Paeps 	unsigned int old_added;
233e948693eSPhilip Paeps 	unsigned int level;
234e948693eSPhilip Paeps 	int rc;
235e948693eSPhilip Paeps 
236763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
237e948693eSPhilip Paeps 
238e948693eSPhilip Paeps 	KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
239e948693eSPhilip Paeps 	KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
240e948693eSPhilip Paeps 		("txq->n_pend_desc too large"));
241e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
242e948693eSPhilip Paeps 
243e948693eSPhilip Paeps 	old_added = txq->added;
244e948693eSPhilip Paeps 
245e948693eSPhilip Paeps 	/* Post the fragment list. */
246e948693eSPhilip Paeps 	rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
247e948693eSPhilip Paeps 			  txq->reaped, &txq->added);
248e948693eSPhilip Paeps 	KASSERT(rc == 0, ("efx_tx_qpost() failed"));
249e948693eSPhilip Paeps 
250e948693eSPhilip Paeps 	/* If efx_tx_qpost() had to refragment, our information about
251e948693eSPhilip Paeps 	 * buffers to free may be associated with the wrong
252e948693eSPhilip Paeps 	 * descriptors.
253e948693eSPhilip Paeps 	 */
254e948693eSPhilip Paeps 	KASSERT(txq->added - old_added == txq->n_pend_desc,
255e948693eSPhilip Paeps 		("efx_tx_qpost() refragmented descriptors"));
256e948693eSPhilip Paeps 
257e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
258385b1d8eSGeorge V. Neville-Neil 	KASSERT(level <= txq->entries, ("overfilled TX queue"));
259e948693eSPhilip Paeps 
260e948693eSPhilip Paeps 	/* Clear the fragment list. */
261e948693eSPhilip Paeps 	txq->n_pend_desc = 0;
262e948693eSPhilip Paeps 
263e948693eSPhilip Paeps 	/* Have we reached the block level? */
264385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
265e948693eSPhilip Paeps 		return;
266e948693eSPhilip Paeps 
267e948693eSPhilip Paeps 	/* Reap, and check again */
268e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
269e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
270385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
271e948693eSPhilip Paeps 		return;
272e948693eSPhilip Paeps 
273e948693eSPhilip Paeps 	txq->blocked = 1;
274e948693eSPhilip Paeps 
275e948693eSPhilip Paeps 	/*
276e948693eSPhilip Paeps 	 * Avoid a race with completion interrupt handling that could leave
277e948693eSPhilip Paeps 	 * the queue blocked.
278e948693eSPhilip Paeps 	 */
279e948693eSPhilip Paeps 	mb();
280e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
281e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
282385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
283e948693eSPhilip Paeps 		mb();
284e948693eSPhilip Paeps 		txq->blocked = 0;
285e948693eSPhilip Paeps 	}
286e948693eSPhilip Paeps }
287e948693eSPhilip Paeps 
288e948693eSPhilip Paeps static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
289e948693eSPhilip Paeps {
290e948693eSPhilip Paeps 	bus_dmamap_t *used_map;
291e948693eSPhilip Paeps 	bus_dmamap_t map;
292e948693eSPhilip Paeps 	bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
293e948693eSPhilip Paeps 	unsigned int id;
294e948693eSPhilip Paeps 	struct sfxge_tx_mapping *stmp;
295e948693eSPhilip Paeps 	efx_buffer_t *desc;
296e948693eSPhilip Paeps 	int n_dma_seg;
297e948693eSPhilip Paeps 	int rc;
298e948693eSPhilip Paeps 	int i;
299e948693eSPhilip Paeps 
300e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
301e948693eSPhilip Paeps 
302e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
303e948693eSPhilip Paeps 		prefetch_read_many(mbuf->m_data);
304e948693eSPhilip Paeps 
305e948693eSPhilip Paeps 	if (txq->init_state != SFXGE_TXQ_STARTED) {
306e948693eSPhilip Paeps 		rc = EINTR;
307e948693eSPhilip Paeps 		goto reject;
308e948693eSPhilip Paeps 	}
309e948693eSPhilip Paeps 
310e948693eSPhilip Paeps 	/* Load the packet for DMA. */
311385b1d8eSGeorge V. Neville-Neil 	id = txq->added & txq->ptr_mask;
312e948693eSPhilip Paeps 	stmp = &txq->stmp[id];
313e948693eSPhilip Paeps 	rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
314e948693eSPhilip Paeps 				     mbuf, dma_seg, &n_dma_seg, 0);
315e948693eSPhilip Paeps 	if (rc == EFBIG) {
316e948693eSPhilip Paeps 		/* Try again. */
317c6499eccSGleb Smirnoff 		struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
318e948693eSPhilip Paeps 						   SFXGE_TX_MAPPING_MAX_SEG);
319e948693eSPhilip Paeps 		if (new_mbuf == NULL)
320e948693eSPhilip Paeps 			goto reject;
321e948693eSPhilip Paeps 		++txq->collapses;
322e948693eSPhilip Paeps 		mbuf = new_mbuf;
323e948693eSPhilip Paeps 		rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
324e948693eSPhilip Paeps 					     stmp->map, mbuf,
325e948693eSPhilip Paeps 					     dma_seg, &n_dma_seg, 0);
326e948693eSPhilip Paeps 	}
327e948693eSPhilip Paeps 	if (rc != 0)
328e948693eSPhilip Paeps 		goto reject;
329e948693eSPhilip Paeps 
330e948693eSPhilip Paeps 	/* Make the packet visible to the hardware. */
331e948693eSPhilip Paeps 	bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
332e948693eSPhilip Paeps 
333e948693eSPhilip Paeps 	used_map = &stmp->map;
334e948693eSPhilip Paeps 
335e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
336e948693eSPhilip Paeps 		rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
337e948693eSPhilip Paeps 		if (rc < 0)
338e948693eSPhilip Paeps 			goto reject_mapped;
339e948693eSPhilip Paeps 		stmp = &txq->stmp[rc];
340e948693eSPhilip Paeps 	} else {
341e948693eSPhilip Paeps 		/* Add the mapping to the fragment list, and set flags
342e948693eSPhilip Paeps 		 * for the buffer.
343e948693eSPhilip Paeps 		 */
344e948693eSPhilip Paeps 		i = 0;
345e948693eSPhilip Paeps 		for (;;) {
346e948693eSPhilip Paeps 			desc = &txq->pend_desc[i];
347e948693eSPhilip Paeps 			desc->eb_addr = dma_seg[i].ds_addr;
348e948693eSPhilip Paeps 			desc->eb_size = dma_seg[i].ds_len;
349e948693eSPhilip Paeps 			if (i == n_dma_seg - 1) {
350e948693eSPhilip Paeps 				desc->eb_eop = 1;
351e948693eSPhilip Paeps 				break;
352e948693eSPhilip Paeps 			}
353e948693eSPhilip Paeps 			desc->eb_eop = 0;
354e948693eSPhilip Paeps 			i++;
355e948693eSPhilip Paeps 
356e948693eSPhilip Paeps 			stmp->flags = 0;
357e948693eSPhilip Paeps 			if (__predict_false(stmp ==
358385b1d8eSGeorge V. Neville-Neil 					    &txq->stmp[txq->ptr_mask]))
359e948693eSPhilip Paeps 				stmp = &txq->stmp[0];
360e948693eSPhilip Paeps 			else
361e948693eSPhilip Paeps 				stmp++;
362e948693eSPhilip Paeps 		}
363e948693eSPhilip Paeps 		txq->n_pend_desc = n_dma_seg;
364e948693eSPhilip Paeps 	}
365e948693eSPhilip Paeps 
366e948693eSPhilip Paeps 	/*
367e948693eSPhilip Paeps 	 * If the mapping required more than one descriptor
368e948693eSPhilip Paeps 	 * then we need to associate the DMA map with the last
369e948693eSPhilip Paeps 	 * descriptor, not the first.
370e948693eSPhilip Paeps 	 */
371e948693eSPhilip Paeps 	if (used_map != &stmp->map) {
372e948693eSPhilip Paeps 		map = stmp->map;
373e948693eSPhilip Paeps 		stmp->map = *used_map;
374e948693eSPhilip Paeps 		*used_map = map;
375e948693eSPhilip Paeps 	}
376e948693eSPhilip Paeps 
377e948693eSPhilip Paeps 	stmp->u.mbuf = mbuf;
378e948693eSPhilip Paeps 	stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
379e948693eSPhilip Paeps 
380e948693eSPhilip Paeps 	/* Post the fragment list. */
381e948693eSPhilip Paeps 	sfxge_tx_qlist_post(txq);
382e948693eSPhilip Paeps 
383b7b0edd1SGeorge V. Neville-Neil 	return (0);
384e948693eSPhilip Paeps 
385e948693eSPhilip Paeps reject_mapped:
386e948693eSPhilip Paeps 	bus_dmamap_unload(txq->packet_dma_tag, *used_map);
387e948693eSPhilip Paeps reject:
388e948693eSPhilip Paeps 	/* Drop the packet on the floor. */
389e948693eSPhilip Paeps 	m_freem(mbuf);
390e948693eSPhilip Paeps 	++txq->drops;
391e948693eSPhilip Paeps 
392b7b0edd1SGeorge V. Neville-Neil 	return (rc);
393e948693eSPhilip Paeps }
394e948693eSPhilip Paeps 
395e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
396e948693eSPhilip Paeps 
397e948693eSPhilip Paeps /*
398e948693eSPhilip Paeps  * Drain the deferred packet list into the transmit queue.
399e948693eSPhilip Paeps  */
400e948693eSPhilip Paeps static void
401e948693eSPhilip Paeps sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
402e948693eSPhilip Paeps {
403e948693eSPhilip Paeps 	struct sfxge_softc *sc;
404e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
405e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
406e948693eSPhilip Paeps 	unsigned int count;
40793929f25SAndrew Rybchenko 	unsigned int non_tcp_count;
408e948693eSPhilip Paeps 	unsigned int pushed;
409e948693eSPhilip Paeps 	int rc;
410e948693eSPhilip Paeps 
411763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
412e948693eSPhilip Paeps 
413e948693eSPhilip Paeps 	sc = txq->sc;
414e948693eSPhilip Paeps 	stdp = &txq->dpl;
415e948693eSPhilip Paeps 	pushed = txq->added;
416e948693eSPhilip Paeps 
417e948693eSPhilip Paeps 	prefetch_read_many(sc->enp);
418e948693eSPhilip Paeps 	prefetch_read_many(txq->common);
419e948693eSPhilip Paeps 
420e948693eSPhilip Paeps 	mbuf = stdp->std_get;
421bc85c897SGeorge V. Neville-Neil 	count = stdp->std_get_count;
42293929f25SAndrew Rybchenko 	non_tcp_count = stdp->std_get_non_tcp_count;
42393929f25SAndrew Rybchenko 
42493929f25SAndrew Rybchenko 	if (count > stdp->std_get_hiwat)
42593929f25SAndrew Rybchenko 		stdp->std_get_hiwat = count;
426e948693eSPhilip Paeps 
427e948693eSPhilip Paeps 	while (count != 0) {
428e948693eSPhilip Paeps 		KASSERT(mbuf != NULL, ("mbuf == NULL"));
429e948693eSPhilip Paeps 
430e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
431e948693eSPhilip Paeps 		mbuf->m_nextpkt = NULL;
432e948693eSPhilip Paeps 
433e948693eSPhilip Paeps 		ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
434e948693eSPhilip Paeps 
435e948693eSPhilip Paeps 		if (next != NULL)
436e948693eSPhilip Paeps 			prefetch_read_many(next);
437e948693eSPhilip Paeps 
438e948693eSPhilip Paeps 		rc = sfxge_tx_queue_mbuf(txq, mbuf);
439e948693eSPhilip Paeps 		--count;
44093929f25SAndrew Rybchenko 		non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf);
441e948693eSPhilip Paeps 		mbuf = next;
442e948693eSPhilip Paeps 		if (rc != 0)
443e948693eSPhilip Paeps 			continue;
444e948693eSPhilip Paeps 
445e948693eSPhilip Paeps 		if (txq->blocked)
446e948693eSPhilip Paeps 			break;
447e948693eSPhilip Paeps 
448e948693eSPhilip Paeps 		/* Push the fragments to the hardware in batches. */
449e948693eSPhilip Paeps 		if (txq->added - pushed >= SFXGE_TX_BATCH) {
450e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
451e948693eSPhilip Paeps 			pushed = txq->added;
452e948693eSPhilip Paeps 		}
453e948693eSPhilip Paeps 	}
454e948693eSPhilip Paeps 
455e948693eSPhilip Paeps 	if (count == 0) {
456e948693eSPhilip Paeps 		KASSERT(mbuf == NULL, ("mbuf != NULL"));
45793929f25SAndrew Rybchenko 		KASSERT(non_tcp_count == 0,
45893929f25SAndrew Rybchenko 			("inconsistent TCP/non-TCP detection"));
459e948693eSPhilip Paeps 		stdp->std_get = NULL;
460bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = 0;
46193929f25SAndrew Rybchenko 		stdp->std_get_non_tcp_count = 0;
462e948693eSPhilip Paeps 		stdp->std_getp = &stdp->std_get;
463e948693eSPhilip Paeps 	} else {
464e948693eSPhilip Paeps 		stdp->std_get = mbuf;
465bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = count;
46693929f25SAndrew Rybchenko 		stdp->std_get_non_tcp_count = non_tcp_count;
467e948693eSPhilip Paeps 	}
468e948693eSPhilip Paeps 
469e948693eSPhilip Paeps 	if (txq->added != pushed)
470e948693eSPhilip Paeps 		efx_tx_qpush(txq->common, txq->added);
471e948693eSPhilip Paeps 
472bc85c897SGeorge V. Neville-Neil 	KASSERT(txq->blocked || stdp->std_get_count == 0,
473e948693eSPhilip Paeps 		("queue unblocked but count is non-zero"));
474e948693eSPhilip Paeps }
475e948693eSPhilip Paeps 
476e948693eSPhilip Paeps #define	SFXGE_TX_QDPL_PENDING(_txq)					\
477e948693eSPhilip Paeps 	((_txq)->dpl.std_put != 0)
478e948693eSPhilip Paeps 
479e948693eSPhilip Paeps /*
480e948693eSPhilip Paeps  * Service the deferred packet list.
481e948693eSPhilip Paeps  *
482e948693eSPhilip Paeps  * NOTE: drops the txq mutex!
483e948693eSPhilip Paeps  */
4840b28bbdcSAndrew Rybchenko static void
485e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq)
486e948693eSPhilip Paeps {
487763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
488e948693eSPhilip Paeps 
489e948693eSPhilip Paeps 	do {
490e948693eSPhilip Paeps 		if (SFXGE_TX_QDPL_PENDING(txq))
491e948693eSPhilip Paeps 			sfxge_tx_qdpl_swizzle(txq);
492e948693eSPhilip Paeps 
493e948693eSPhilip Paeps 		if (!txq->blocked)
494e948693eSPhilip Paeps 			sfxge_tx_qdpl_drain(txq);
495e948693eSPhilip Paeps 
496763cab71SAndrew Rybchenko 		SFXGE_TXQ_UNLOCK(txq);
497e948693eSPhilip Paeps 	} while (SFXGE_TX_QDPL_PENDING(txq) &&
498763cab71SAndrew Rybchenko 		 SFXGE_TXQ_TRYLOCK(txq));
499e948693eSPhilip Paeps }
500e948693eSPhilip Paeps 
501e948693eSPhilip Paeps /*
502e948693eSPhilip Paeps  * Put a packet on the deferred packet list.
503e948693eSPhilip Paeps  *
504e948693eSPhilip Paeps  * If we are called with the txq lock held, we put the packet on the "get
505e948693eSPhilip Paeps  * list", otherwise we atomically push it on the "put list".  The swizzle
506e948693eSPhilip Paeps  * function takes care of ordering.
507e948693eSPhilip Paeps  *
508e948693eSPhilip Paeps  * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED.  We
509e948693eSPhilip Paeps  * overload the csum_data field in the mbuf to keep track of this length
510e948693eSPhilip Paeps  * because there is no cheap alternative to avoid races.
511e948693eSPhilip Paeps  */
5120b28bbdcSAndrew Rybchenko static int
513e948693eSPhilip Paeps sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
514e948693eSPhilip Paeps {
515e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
516e948693eSPhilip Paeps 
517e948693eSPhilip Paeps 	stdp = &txq->dpl;
518e948693eSPhilip Paeps 
519e948693eSPhilip Paeps 	KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
520e948693eSPhilip Paeps 
521e948693eSPhilip Paeps 	if (locked) {
522763cab71SAndrew Rybchenko 		SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
523e948693eSPhilip Paeps 
524e948693eSPhilip Paeps 		sfxge_tx_qdpl_swizzle(txq);
525e948693eSPhilip Paeps 
52693929f25SAndrew Rybchenko 		if (stdp->std_get_count >= stdp->std_get_max) {
52793929f25SAndrew Rybchenko 			txq->get_overflow++;
528c1974e29SGleb Smirnoff 			return (ENOBUFS);
52993929f25SAndrew Rybchenko 		}
53093929f25SAndrew Rybchenko 		if (sfxge_is_mbuf_non_tcp(mbuf)) {
53193929f25SAndrew Rybchenko 			if (stdp->std_get_non_tcp_count >=
53293929f25SAndrew Rybchenko 			    stdp->std_get_non_tcp_max) {
53393929f25SAndrew Rybchenko 				txq->get_non_tcp_overflow++;
53493929f25SAndrew Rybchenko 				return (ENOBUFS);
53593929f25SAndrew Rybchenko 			}
53693929f25SAndrew Rybchenko 			stdp->std_get_non_tcp_count++;
53793929f25SAndrew Rybchenko 		}
538c1974e29SGleb Smirnoff 
539e948693eSPhilip Paeps 		*(stdp->std_getp) = mbuf;
540e948693eSPhilip Paeps 		stdp->std_getp = &mbuf->m_nextpkt;
541bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count++;
542e948693eSPhilip Paeps 	} else {
543e948693eSPhilip Paeps 		volatile uintptr_t *putp;
544e948693eSPhilip Paeps 		uintptr_t old;
545e948693eSPhilip Paeps 		uintptr_t new;
546e948693eSPhilip Paeps 		unsigned old_len;
547e948693eSPhilip Paeps 
548e948693eSPhilip Paeps 		putp = &stdp->std_put;
549e948693eSPhilip Paeps 		new = (uintptr_t)mbuf;
550e948693eSPhilip Paeps 
551e948693eSPhilip Paeps 		do {
552e948693eSPhilip Paeps 			old = *putp;
553b7b0edd1SGeorge V. Neville-Neil 			if (old != 0) {
554e948693eSPhilip Paeps 				struct mbuf *mp = (struct mbuf *)old;
555e948693eSPhilip Paeps 				old_len = mp->m_pkthdr.csum_data;
556e948693eSPhilip Paeps 			} else
557e948693eSPhilip Paeps 				old_len = 0;
55893929f25SAndrew Rybchenko 			if (old_len >= stdp->std_put_max) {
55993929f25SAndrew Rybchenko 				atomic_add_long(&txq->put_overflow, 1);
560c1974e29SGleb Smirnoff 				return (ENOBUFS);
56193929f25SAndrew Rybchenko 			}
562e948693eSPhilip Paeps 			mbuf->m_pkthdr.csum_data = old_len + 1;
563e948693eSPhilip Paeps 			mbuf->m_nextpkt = (void *)old;
564fb8ccc78SMarius Strobl 		} while (atomic_cmpset_ptr(putp, old, new) == 0);
565e948693eSPhilip Paeps 	}
566e948693eSPhilip Paeps 
567e948693eSPhilip Paeps 	return (0);
568e948693eSPhilip Paeps }
569e948693eSPhilip Paeps 
570e948693eSPhilip Paeps /*
571e948693eSPhilip Paeps  * Called from if_transmit - will try to grab the txq lock and enqueue to the
572e948693eSPhilip Paeps  * put list if it succeeds, otherwise will push onto the defer list.
573e948693eSPhilip Paeps  */
574e948693eSPhilip Paeps int
575e948693eSPhilip Paeps sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
576e948693eSPhilip Paeps {
577e948693eSPhilip Paeps 	int locked;
578e948693eSPhilip Paeps 	int rc;
579e948693eSPhilip Paeps 
580d7ac87d3SGleb Smirnoff 	if (!SFXGE_LINK_UP(txq->sc)) {
581d7ac87d3SGleb Smirnoff 		rc = ENETDOWN;
58293929f25SAndrew Rybchenko 		atomic_add_long(&txq->netdown_drops, 1);
583d7ac87d3SGleb Smirnoff 		goto fail;
584d7ac87d3SGleb Smirnoff 	}
585d7ac87d3SGleb Smirnoff 
586e948693eSPhilip Paeps 	/*
587e948693eSPhilip Paeps 	 * Try to grab the txq lock.  If we are able to get the lock,
588e948693eSPhilip Paeps 	 * the packet will be appended to the "get list" of the deferred
589e948693eSPhilip Paeps 	 * packet list.  Otherwise, it will be pushed on the "put list".
590e948693eSPhilip Paeps 	 */
591763cab71SAndrew Rybchenko 	locked = SFXGE_TXQ_TRYLOCK(txq);
592e948693eSPhilip Paeps 
593e948693eSPhilip Paeps 	if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
594c1974e29SGleb Smirnoff 		if (locked)
595763cab71SAndrew Rybchenko 			SFXGE_TXQ_UNLOCK(txq);
596e948693eSPhilip Paeps 		rc = ENOBUFS;
597e948693eSPhilip Paeps 		goto fail;
598e948693eSPhilip Paeps 	}
599e948693eSPhilip Paeps 
600e948693eSPhilip Paeps 	/*
601e948693eSPhilip Paeps 	 * Try to grab the lock again.
602e948693eSPhilip Paeps 	 *
603e948693eSPhilip Paeps 	 * If we are able to get the lock, we need to process the deferred
604e948693eSPhilip Paeps 	 * packet list.  If we are not able to get the lock, another thread
605e948693eSPhilip Paeps 	 * is processing the list.
606e948693eSPhilip Paeps 	 */
607e948693eSPhilip Paeps 	if (!locked)
608763cab71SAndrew Rybchenko 		locked = SFXGE_TXQ_TRYLOCK(txq);
609e948693eSPhilip Paeps 
610e948693eSPhilip Paeps 	if (locked) {
611e948693eSPhilip Paeps 		/* Try to service the list. */
612e948693eSPhilip Paeps 		sfxge_tx_qdpl_service(txq);
613e948693eSPhilip Paeps 		/* Lock has been dropped. */
614e948693eSPhilip Paeps 	}
615e948693eSPhilip Paeps 
616e948693eSPhilip Paeps 	return (0);
617e948693eSPhilip Paeps 
618e948693eSPhilip Paeps fail:
61910d0bdcaSGeorge V. Neville-Neil 	m_freem(m);
620e948693eSPhilip Paeps 	return (rc);
621e948693eSPhilip Paeps }
622e948693eSPhilip Paeps 
623e948693eSPhilip Paeps static void
624e948693eSPhilip Paeps sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
625e948693eSPhilip Paeps {
626e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp = &txq->dpl;
627e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
628e948693eSPhilip Paeps 
629763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
630e948693eSPhilip Paeps 
631e948693eSPhilip Paeps 	sfxge_tx_qdpl_swizzle(txq);
632e948693eSPhilip Paeps 	for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
633e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
634e948693eSPhilip Paeps 		m_freem(mbuf);
635e948693eSPhilip Paeps 	}
636e948693eSPhilip Paeps 	stdp->std_get = NULL;
637bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count = 0;
63893929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_count = 0;
639e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
640e948693eSPhilip Paeps 
641763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
642e948693eSPhilip Paeps }
643e948693eSPhilip Paeps 
644e948693eSPhilip Paeps void
645e948693eSPhilip Paeps sfxge_if_qflush(struct ifnet *ifp)
646e948693eSPhilip Paeps {
647e948693eSPhilip Paeps 	struct sfxge_softc *sc;
648e948693eSPhilip Paeps 	int i;
649e948693eSPhilip Paeps 
650e948693eSPhilip Paeps 	sc = ifp->if_softc;
651e948693eSPhilip Paeps 
6522cd617a8SAndrew Rybchenko 	for (i = 0; i < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc); i++)
653e948693eSPhilip Paeps 		sfxge_tx_qdpl_flush(sc->txq[i]);
654e948693eSPhilip Paeps }
655e948693eSPhilip Paeps 
656e948693eSPhilip Paeps /*
657e948693eSPhilip Paeps  * TX start -- called by the stack.
658e948693eSPhilip Paeps  */
659e948693eSPhilip Paeps int
660e948693eSPhilip Paeps sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
661e948693eSPhilip Paeps {
662e948693eSPhilip Paeps 	struct sfxge_softc *sc;
663e948693eSPhilip Paeps 	struct sfxge_txq *txq;
664e948693eSPhilip Paeps 	int rc;
665e948693eSPhilip Paeps 
666e948693eSPhilip Paeps 	sc = (struct sfxge_softc *)ifp->if_softc;
667e948693eSPhilip Paeps 
668e948693eSPhilip Paeps 	KASSERT(ifp->if_flags & IFF_UP, ("interface not up"));
669e948693eSPhilip Paeps 
670e948693eSPhilip Paeps 	/* Pick the desired transmit queue. */
671e948693eSPhilip Paeps 	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
672e948693eSPhilip Paeps 		int index = 0;
673e948693eSPhilip Paeps 
674c2529042SHans Petter Selasky 		/* check if flowid is set */
675c2529042SHans Petter Selasky 		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
676e948693eSPhilip Paeps 			uint32_t hash = m->m_pkthdr.flowid;
677e948693eSPhilip Paeps 
678e948693eSPhilip Paeps 			index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
679e948693eSPhilip Paeps 		}
680e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
681e948693eSPhilip Paeps 	} else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
682e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
683e948693eSPhilip Paeps 	} else {
684e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
685e948693eSPhilip Paeps 	}
686e948693eSPhilip Paeps 
687e948693eSPhilip Paeps 	rc = sfxge_tx_packet_add(txq, m);
688e948693eSPhilip Paeps 
689e948693eSPhilip Paeps 	return (rc);
690e948693eSPhilip Paeps }
691e948693eSPhilip Paeps 
692e948693eSPhilip Paeps #else /* !SFXGE_HAVE_MQ */
693e948693eSPhilip Paeps 
694e948693eSPhilip Paeps static void sfxge_if_start_locked(struct ifnet *ifp)
695e948693eSPhilip Paeps {
696e948693eSPhilip Paeps 	struct sfxge_softc *sc = ifp->if_softc;
697e948693eSPhilip Paeps 	struct sfxge_txq *txq;
698e948693eSPhilip Paeps 	struct mbuf *mbuf;
699e948693eSPhilip Paeps 	unsigned int pushed[SFXGE_TXQ_NTYPES];
700e948693eSPhilip Paeps 	unsigned int q_index;
701e948693eSPhilip Paeps 
702e948693eSPhilip Paeps 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
703e948693eSPhilip Paeps 	    IFF_DRV_RUNNING)
704e948693eSPhilip Paeps 		return;
705e948693eSPhilip Paeps 
706e948693eSPhilip Paeps 	if (!sc->port.link_up)
707e948693eSPhilip Paeps 		return;
708e948693eSPhilip Paeps 
709e948693eSPhilip Paeps 	for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
710e948693eSPhilip Paeps 		txq = sc->txq[q_index];
711e948693eSPhilip Paeps 		pushed[q_index] = txq->added;
712e948693eSPhilip Paeps 	}
713e948693eSPhilip Paeps 
714e948693eSPhilip Paeps 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
715e948693eSPhilip Paeps 		IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
716e948693eSPhilip Paeps 		if (mbuf == NULL)
717e948693eSPhilip Paeps 			break;
718e948693eSPhilip Paeps 
719e948693eSPhilip Paeps 		ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */
720e948693eSPhilip Paeps 
721e948693eSPhilip Paeps 		/* Pick the desired transmit queue. */
722e948693eSPhilip Paeps 		if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO))
723e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM;
724e948693eSPhilip Paeps 		else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP)
725e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_IP_CKSUM;
726e948693eSPhilip Paeps 		else
727e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_NON_CKSUM;
728e948693eSPhilip Paeps 		txq = sc->txq[q_index];
729e948693eSPhilip Paeps 
730e948693eSPhilip Paeps 		if (sfxge_tx_queue_mbuf(txq, mbuf) != 0)
731e948693eSPhilip Paeps 			continue;
732e948693eSPhilip Paeps 
733e948693eSPhilip Paeps 		if (txq->blocked) {
734e948693eSPhilip Paeps 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
735e948693eSPhilip Paeps 			break;
736e948693eSPhilip Paeps 		}
737e948693eSPhilip Paeps 
738e948693eSPhilip Paeps 		/* Push the fragments to the hardware in batches. */
739e948693eSPhilip Paeps 		if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) {
740e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
741e948693eSPhilip Paeps 			pushed[q_index] = txq->added;
742e948693eSPhilip Paeps 		}
743e948693eSPhilip Paeps 	}
744e948693eSPhilip Paeps 
745e948693eSPhilip Paeps 	for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
746e948693eSPhilip Paeps 		txq = sc->txq[q_index];
747e948693eSPhilip Paeps 		if (txq->added != pushed[q_index])
748e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
749e948693eSPhilip Paeps 	}
750e948693eSPhilip Paeps }
751e948693eSPhilip Paeps 
752e948693eSPhilip Paeps void sfxge_if_start(struct ifnet *ifp)
753e948693eSPhilip Paeps {
754e948693eSPhilip Paeps 	struct sfxge_softc *sc = ifp->if_softc;
755e948693eSPhilip Paeps 
756763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(sc->txq[0]);
757e948693eSPhilip Paeps 	sfxge_if_start_locked(ifp);
758763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(sc->txq[0]);
759e948693eSPhilip Paeps }
760e948693eSPhilip Paeps 
7610b28bbdcSAndrew Rybchenko static void
762e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq)
763e948693eSPhilip Paeps {
764763cab71SAndrew Rybchenko 	struct ifnet *ifp = txq->sc->ifnet;
765e948693eSPhilip Paeps 
766763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
767e948693eSPhilip Paeps 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
768e948693eSPhilip Paeps 	sfxge_if_start_locked(ifp);
769763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
770e948693eSPhilip Paeps }
771e948693eSPhilip Paeps 
772e948693eSPhilip Paeps #endif /* SFXGE_HAVE_MQ */
773e948693eSPhilip Paeps 
774e948693eSPhilip Paeps /*
775e948693eSPhilip Paeps  * Software "TSO".  Not quite as good as doing it in hardware, but
776e948693eSPhilip Paeps  * still faster than segmenting in the stack.
777e948693eSPhilip Paeps  */
778e948693eSPhilip Paeps 
779e948693eSPhilip Paeps struct sfxge_tso_state {
780e948693eSPhilip Paeps 	/* Output position */
781e948693eSPhilip Paeps 	unsigned out_len;	/* Remaining length in current segment */
782e948693eSPhilip Paeps 	unsigned seqnum;	/* Current sequence number */
783e948693eSPhilip Paeps 	unsigned packet_space;	/* Remaining space in current packet */
784e948693eSPhilip Paeps 
785e948693eSPhilip Paeps 	/* Input position */
786e948693eSPhilip Paeps 	uint64_t dma_addr;	/* DMA address of current position */
787e948693eSPhilip Paeps 	unsigned in_len;	/* Remaining length in current mbuf */
788e948693eSPhilip Paeps 
789e948693eSPhilip Paeps 	const struct mbuf *mbuf; /* Input mbuf (head of chain) */
790e948693eSPhilip Paeps 	u_short protocol;	/* Network protocol (after VLAN decap) */
791e948693eSPhilip Paeps 	ssize_t nh_off;		/* Offset of network header */
792e948693eSPhilip Paeps 	ssize_t tcph_off;	/* Offset of TCP header */
793e948693eSPhilip Paeps 	unsigned header_len;	/* Number of bytes of header */
794d0f73877SAndrew Rybchenko 	unsigned seg_size;	/* TCP segment size */
795e948693eSPhilip Paeps };
796e948693eSPhilip Paeps 
7970b28bbdcSAndrew Rybchenko static const struct ip *tso_iph(const struct sfxge_tso_state *tso)
798e948693eSPhilip Paeps {
799e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IP),
800e948693eSPhilip Paeps 		("tso_iph() in non-IPv4 state"));
801e948693eSPhilip Paeps 	return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
802e948693eSPhilip Paeps }
8030b28bbdcSAndrew Rybchenko static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
804e948693eSPhilip Paeps {
805e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
806e948693eSPhilip Paeps 		("tso_ip6h() in non-IPv6 state"));
807e948693eSPhilip Paeps 	return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
808e948693eSPhilip Paeps }
8090b28bbdcSAndrew Rybchenko static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
810e948693eSPhilip Paeps {
811e948693eSPhilip Paeps 	return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
812e948693eSPhilip Paeps }
813e948693eSPhilip Paeps 
814e948693eSPhilip Paeps /* Size of preallocated TSO header buffers.  Larger blocks must be
815e948693eSPhilip Paeps  * allocated from the heap.
816e948693eSPhilip Paeps  */
817e948693eSPhilip Paeps #define	TSOH_STD_SIZE	128
818e948693eSPhilip Paeps 
819e948693eSPhilip Paeps /* At most half the descriptors in the queue at any time will refer to
820e948693eSPhilip Paeps  * a TSO header buffer, since they must always be followed by a
821e948693eSPhilip Paeps  * payload descriptor referring to an mbuf.
822e948693eSPhilip Paeps  */
823385b1d8eSGeorge V. Neville-Neil #define	TSOH_COUNT(_txq_entries)	((_txq_entries) / 2u)
824e948693eSPhilip Paeps #define	TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
825385b1d8eSGeorge V. Neville-Neil #define	TSOH_PAGE_COUNT(_txq_entries)	\
826385b1d8eSGeorge V. Neville-Neil 	((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
827e948693eSPhilip Paeps 
828e948693eSPhilip Paeps static int tso_init(struct sfxge_txq *txq)
829e948693eSPhilip Paeps {
830e948693eSPhilip Paeps 	struct sfxge_softc *sc = txq->sc;
831385b1d8eSGeorge V. Neville-Neil 	unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
832e948693eSPhilip Paeps 	int i, rc;
833e948693eSPhilip Paeps 
834e948693eSPhilip Paeps 	/* Allocate TSO header buffers */
835385b1d8eSGeorge V. Neville-Neil 	txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
836e948693eSPhilip Paeps 				  M_SFXGE, M_WAITOK);
837e948693eSPhilip Paeps 
838385b1d8eSGeorge V. Neville-Neil 	for (i = 0; i < tsoh_page_count; i++) {
839e948693eSPhilip Paeps 		rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
840b7b0edd1SGeorge V. Neville-Neil 		if (rc != 0)
841e948693eSPhilip Paeps 			goto fail;
842e948693eSPhilip Paeps 	}
843e948693eSPhilip Paeps 
844b7b0edd1SGeorge V. Neville-Neil 	return (0);
845e948693eSPhilip Paeps 
846e948693eSPhilip Paeps fail:
847e948693eSPhilip Paeps 	while (i-- > 0)
848e948693eSPhilip Paeps 		sfxge_dma_free(&txq->tsoh_buffer[i]);
849e948693eSPhilip Paeps 	free(txq->tsoh_buffer, M_SFXGE);
850e948693eSPhilip Paeps 	txq->tsoh_buffer = NULL;
851b7b0edd1SGeorge V. Neville-Neil 	return (rc);
852e948693eSPhilip Paeps }
853e948693eSPhilip Paeps 
854e948693eSPhilip Paeps static void tso_fini(struct sfxge_txq *txq)
855e948693eSPhilip Paeps {
856e948693eSPhilip Paeps 	int i;
857e948693eSPhilip Paeps 
858b7b0edd1SGeorge V. Neville-Neil 	if (txq->tsoh_buffer != NULL) {
859385b1d8eSGeorge V. Neville-Neil 		for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
860e948693eSPhilip Paeps 			sfxge_dma_free(&txq->tsoh_buffer[i]);
861e948693eSPhilip Paeps 		free(txq->tsoh_buffer, M_SFXGE);
862e948693eSPhilip Paeps 	}
863e948693eSPhilip Paeps }
864e948693eSPhilip Paeps 
865e948693eSPhilip Paeps static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
866e948693eSPhilip Paeps {
867e948693eSPhilip Paeps 	struct ether_header *eh = mtod(mbuf, struct ether_header *);
868e948693eSPhilip Paeps 
869e948693eSPhilip Paeps 	tso->mbuf = mbuf;
870e948693eSPhilip Paeps 
871e948693eSPhilip Paeps 	/* Find network protocol and header */
872e948693eSPhilip Paeps 	tso->protocol = eh->ether_type;
873e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_VLAN)) {
874e948693eSPhilip Paeps 		struct ether_vlan_header *veh =
875e948693eSPhilip Paeps 			mtod(mbuf, struct ether_vlan_header *);
876e948693eSPhilip Paeps 		tso->protocol = veh->evl_proto;
877e948693eSPhilip Paeps 		tso->nh_off = sizeof(*veh);
878e948693eSPhilip Paeps 	} else {
879e948693eSPhilip Paeps 		tso->nh_off = sizeof(*eh);
880e948693eSPhilip Paeps 	}
881e948693eSPhilip Paeps 
882e948693eSPhilip Paeps 	/* Find TCP header */
883e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_IP)) {
884e948693eSPhilip Paeps 		KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
885e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
886e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
887e948693eSPhilip Paeps 	} else {
888e948693eSPhilip Paeps 		KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
889e948693eSPhilip Paeps 			("TSO required on non-IP packet"));
890e948693eSPhilip Paeps 		KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
891e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
892e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
893e948693eSPhilip Paeps 	}
894e948693eSPhilip Paeps 
895e948693eSPhilip Paeps 	tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off;
896d0f73877SAndrew Rybchenko 	tso->seg_size = mbuf->m_pkthdr.tso_segsz;
897e948693eSPhilip Paeps 
898e948693eSPhilip Paeps 	tso->seqnum = ntohl(tso_tcph(tso)->th_seq);
899e948693eSPhilip Paeps 
900e948693eSPhilip Paeps 	/* These flags must not be duplicated */
901e948693eSPhilip Paeps 	KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)),
902e948693eSPhilip Paeps 		("incompatible TCP flag on TSO packet"));
903e948693eSPhilip Paeps 
904e948693eSPhilip Paeps 	tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
905e948693eSPhilip Paeps }
906e948693eSPhilip Paeps 
907e948693eSPhilip Paeps /*
908e948693eSPhilip Paeps  * tso_fill_packet_with_fragment - form descriptors for the current fragment
909e948693eSPhilip Paeps  *
910e948693eSPhilip Paeps  * Form descriptors for the current fragment, until we reach the end
911e948693eSPhilip Paeps  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
912e948693eSPhilip Paeps  * space.
913e948693eSPhilip Paeps  */
914e948693eSPhilip Paeps static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
915e948693eSPhilip Paeps 					  struct sfxge_tso_state *tso)
916e948693eSPhilip Paeps {
917e948693eSPhilip Paeps 	efx_buffer_t *desc;
918e948693eSPhilip Paeps 	int n;
919e948693eSPhilip Paeps 
920e948693eSPhilip Paeps 	if (tso->in_len == 0 || tso->packet_space == 0)
921e948693eSPhilip Paeps 		return;
922e948693eSPhilip Paeps 
923e948693eSPhilip Paeps 	KASSERT(tso->in_len > 0, ("TSO input length went negative"));
924e948693eSPhilip Paeps 	KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
925e948693eSPhilip Paeps 
926e948693eSPhilip Paeps 	n = min(tso->in_len, tso->packet_space);
927e948693eSPhilip Paeps 
928e948693eSPhilip Paeps 	tso->packet_space -= n;
929e948693eSPhilip Paeps 	tso->out_len -= n;
930e948693eSPhilip Paeps 	tso->in_len -= n;
931e948693eSPhilip Paeps 
932e948693eSPhilip Paeps 	desc = &txq->pend_desc[txq->n_pend_desc++];
933e948693eSPhilip Paeps 	desc->eb_addr = tso->dma_addr;
934e948693eSPhilip Paeps 	desc->eb_size = n;
935e948693eSPhilip Paeps 	desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
936e948693eSPhilip Paeps 
937e948693eSPhilip Paeps 	tso->dma_addr += n;
938e948693eSPhilip Paeps }
939e948693eSPhilip Paeps 
940e948693eSPhilip Paeps /* Callback from bus_dmamap_load() for long TSO headers. */
941e948693eSPhilip Paeps static void tso_map_long_header(void *dma_addr_ret,
942e948693eSPhilip Paeps 				bus_dma_segment_t *segs, int nseg,
943e948693eSPhilip Paeps 				int error)
944e948693eSPhilip Paeps {
945e948693eSPhilip Paeps 	*(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
946e948693eSPhilip Paeps 				      __predict_true(nseg == 1)) ?
947e948693eSPhilip Paeps 				     segs->ds_addr : 0);
948e948693eSPhilip Paeps }
949e948693eSPhilip Paeps 
950e948693eSPhilip Paeps /*
951e948693eSPhilip Paeps  * tso_start_new_packet - generate a new header and prepare for the new packet
952e948693eSPhilip Paeps  *
953e948693eSPhilip Paeps  * Generate a new header and prepare for the new packet.  Return 0 on
954e948693eSPhilip Paeps  * success, or an error code if failed to alloc header.
955e948693eSPhilip Paeps  */
956e948693eSPhilip Paeps static int tso_start_new_packet(struct sfxge_txq *txq,
957e948693eSPhilip Paeps 				struct sfxge_tso_state *tso,
958e948693eSPhilip Paeps 				unsigned int id)
959e948693eSPhilip Paeps {
960e948693eSPhilip Paeps 	struct sfxge_tx_mapping *stmp = &txq->stmp[id];
961e948693eSPhilip Paeps 	struct tcphdr *tsoh_th;
962e948693eSPhilip Paeps 	unsigned ip_length;
963e948693eSPhilip Paeps 	caddr_t header;
964e948693eSPhilip Paeps 	uint64_t dma_addr;
965e948693eSPhilip Paeps 	bus_dmamap_t map;
966e948693eSPhilip Paeps 	efx_buffer_t *desc;
967e948693eSPhilip Paeps 	int rc;
968e948693eSPhilip Paeps 
969e948693eSPhilip Paeps 	/* Allocate a DMA-mapped header buffer. */
970e948693eSPhilip Paeps 	if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
971e948693eSPhilip Paeps 		unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
972e948693eSPhilip Paeps 		unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
973e948693eSPhilip Paeps 
974e948693eSPhilip Paeps 		header = (txq->tsoh_buffer[page_index].esm_base +
975e948693eSPhilip Paeps 			  buf_index * TSOH_STD_SIZE);
976e948693eSPhilip Paeps 		dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
977e948693eSPhilip Paeps 			    buf_index * TSOH_STD_SIZE);
978e948693eSPhilip Paeps 		map = txq->tsoh_buffer[page_index].esm_map;
979e948693eSPhilip Paeps 
980e948693eSPhilip Paeps 		stmp->flags = 0;
981e948693eSPhilip Paeps 	} else {
982e948693eSPhilip Paeps 		/* We cannot use bus_dmamem_alloc() as that may sleep */
983e948693eSPhilip Paeps 		header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
984e948693eSPhilip Paeps 		if (__predict_false(!header))
985b7b0edd1SGeorge V. Neville-Neil 			return (ENOMEM);
986e948693eSPhilip Paeps 		rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
987e948693eSPhilip Paeps 				     header, tso->header_len,
988e948693eSPhilip Paeps 				     tso_map_long_header, &dma_addr,
989e948693eSPhilip Paeps 				     BUS_DMA_NOWAIT);
990e948693eSPhilip Paeps 		if (__predict_false(dma_addr == 0)) {
991e948693eSPhilip Paeps 			if (rc == 0) {
992e948693eSPhilip Paeps 				/* Succeeded but got >1 segment */
993e948693eSPhilip Paeps 				bus_dmamap_unload(txq->packet_dma_tag,
994e948693eSPhilip Paeps 						  stmp->map);
995e948693eSPhilip Paeps 				rc = EINVAL;
996e948693eSPhilip Paeps 			}
997e948693eSPhilip Paeps 			free(header, M_SFXGE);
998b7b0edd1SGeorge V. Neville-Neil 			return (rc);
999e948693eSPhilip Paeps 		}
1000e948693eSPhilip Paeps 		map = stmp->map;
1001e948693eSPhilip Paeps 
1002e948693eSPhilip Paeps 		txq->tso_long_headers++;
1003e948693eSPhilip Paeps 		stmp->u.heap_buf = header;
1004e948693eSPhilip Paeps 		stmp->flags = TX_BUF_UNMAP;
1005e948693eSPhilip Paeps 	}
1006e948693eSPhilip Paeps 
1007e948693eSPhilip Paeps 	tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
1008e948693eSPhilip Paeps 
1009e948693eSPhilip Paeps 	/* Copy and update the headers. */
1010a35485aaSAndrew Rybchenko 	m_copydata(tso->mbuf, 0, tso->header_len, header);
1011e948693eSPhilip Paeps 
1012e948693eSPhilip Paeps 	tsoh_th->th_seq = htonl(tso->seqnum);
1013d0f73877SAndrew Rybchenko 	tso->seqnum += tso->seg_size;
1014d0f73877SAndrew Rybchenko 	if (tso->out_len > tso->seg_size) {
1015e948693eSPhilip Paeps 		/* This packet will not finish the TSO burst. */
1016d0f73877SAndrew Rybchenko 		ip_length = tso->header_len - tso->nh_off + tso->seg_size;
1017e948693eSPhilip Paeps 		tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
1018e948693eSPhilip Paeps 	} else {
1019e948693eSPhilip Paeps 		/* This packet will be the last in the TSO burst. */
1020e948693eSPhilip Paeps 		ip_length = tso->header_len - tso->nh_off + tso->out_len;
1021e948693eSPhilip Paeps 	}
1022e948693eSPhilip Paeps 
1023e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_IP)) {
1024e948693eSPhilip Paeps 		struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
1025e948693eSPhilip Paeps 		tsoh_iph->ip_len = htons(ip_length);
1026e948693eSPhilip Paeps 		/* XXX We should increment ip_id, but FreeBSD doesn't
1027e948693eSPhilip Paeps 		 * currently allocate extra IDs for multiple segments.
1028e948693eSPhilip Paeps 		 */
1029e948693eSPhilip Paeps 	} else {
1030e948693eSPhilip Paeps 		struct ip6_hdr *tsoh_iph =
1031e948693eSPhilip Paeps 			(struct ip6_hdr *)(header + tso->nh_off);
1032e948693eSPhilip Paeps 		tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
1033e948693eSPhilip Paeps 	}
1034e948693eSPhilip Paeps 
1035e948693eSPhilip Paeps 	/* Make the header visible to the hardware. */
1036e948693eSPhilip Paeps 	bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1037e948693eSPhilip Paeps 
1038d0f73877SAndrew Rybchenko 	tso->packet_space = tso->seg_size;
1039e948693eSPhilip Paeps 	txq->tso_packets++;
1040e948693eSPhilip Paeps 
1041e948693eSPhilip Paeps 	/* Form a descriptor for this header. */
1042e948693eSPhilip Paeps 	desc = &txq->pend_desc[txq->n_pend_desc++];
1043e948693eSPhilip Paeps 	desc->eb_addr = dma_addr;
1044e948693eSPhilip Paeps 	desc->eb_size = tso->header_len;
1045e948693eSPhilip Paeps 	desc->eb_eop = 0;
1046e948693eSPhilip Paeps 
1047b7b0edd1SGeorge V. Neville-Neil 	return (0);
1048e948693eSPhilip Paeps }
1049e948693eSPhilip Paeps 
1050e948693eSPhilip Paeps static int
1051e948693eSPhilip Paeps sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1052e948693eSPhilip Paeps 		   const bus_dma_segment_t *dma_seg, int n_dma_seg)
1053e948693eSPhilip Paeps {
1054e948693eSPhilip Paeps 	struct sfxge_tso_state tso;
1055e948693eSPhilip Paeps 	unsigned int id, next_id;
1056a35485aaSAndrew Rybchenko 	unsigned skipped = 0;
1057e948693eSPhilip Paeps 
1058e948693eSPhilip Paeps 	tso_start(&tso, mbuf);
1059e948693eSPhilip Paeps 
1060a35485aaSAndrew Rybchenko 	while (dma_seg->ds_len + skipped <= tso.header_len) {
1061a35485aaSAndrew Rybchenko 		skipped += dma_seg->ds_len;
1062e948693eSPhilip Paeps 		--n_dma_seg;
1063e948693eSPhilip Paeps 		KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1064e948693eSPhilip Paeps 		++dma_seg;
1065e948693eSPhilip Paeps 	}
1066a35485aaSAndrew Rybchenko 	tso.in_len = dma_seg->ds_len + (tso.header_len - skipped);
1067a35485aaSAndrew Rybchenko 	tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
1068e948693eSPhilip Paeps 
1069385b1d8eSGeorge V. Neville-Neil 	id = txq->added & txq->ptr_mask;
1070e948693eSPhilip Paeps 	if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1071385b1d8eSGeorge V. Neville-Neil 		return (-1);
1072e948693eSPhilip Paeps 
1073e948693eSPhilip Paeps 	while (1) {
1074385b1d8eSGeorge V. Neville-Neil 		id = (id + 1) & txq->ptr_mask;
1075e948693eSPhilip Paeps 		tso_fill_packet_with_fragment(txq, &tso);
1076e948693eSPhilip Paeps 
1077e948693eSPhilip Paeps 		/* Move onto the next fragment? */
1078e948693eSPhilip Paeps 		if (tso.in_len == 0) {
1079e948693eSPhilip Paeps 			--n_dma_seg;
1080e948693eSPhilip Paeps 			if (n_dma_seg == 0)
1081e948693eSPhilip Paeps 				break;
1082e948693eSPhilip Paeps 			++dma_seg;
1083e948693eSPhilip Paeps 			tso.in_len = dma_seg->ds_len;
1084e948693eSPhilip Paeps 			tso.dma_addr = dma_seg->ds_addr;
1085e948693eSPhilip Paeps 		}
1086e948693eSPhilip Paeps 
1087e948693eSPhilip Paeps 		/* End of packet? */
1088e948693eSPhilip Paeps 		if (tso.packet_space == 0) {
1089e948693eSPhilip Paeps 			/* If the queue is now full due to tiny MSS,
1090e948693eSPhilip Paeps 			 * or we can't create another header, discard
1091e948693eSPhilip Paeps 			 * the remainder of the input mbuf but do not
1092e948693eSPhilip Paeps 			 * roll back the work we have done.
1093e948693eSPhilip Paeps 			 */
1094e948693eSPhilip Paeps 			if (txq->n_pend_desc >
1095e1a3d10eSAndrew Rybchenko 			    SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) {
1096e1a3d10eSAndrew Rybchenko 				txq->tso_pdrop_too_many++;
1097e948693eSPhilip Paeps 				break;
1098e1a3d10eSAndrew Rybchenko 			}
1099385b1d8eSGeorge V. Neville-Neil 			next_id = (id + 1) & txq->ptr_mask;
1100e948693eSPhilip Paeps 			if (__predict_false(tso_start_new_packet(txq, &tso,
1101e1a3d10eSAndrew Rybchenko 								 next_id))) {
1102e1a3d10eSAndrew Rybchenko 				txq->tso_pdrop_no_rsrc++;
1103e948693eSPhilip Paeps 				break;
1104e1a3d10eSAndrew Rybchenko 			}
1105e948693eSPhilip Paeps 			id = next_id;
1106e948693eSPhilip Paeps 		}
1107e948693eSPhilip Paeps 	}
1108e948693eSPhilip Paeps 
1109e948693eSPhilip Paeps 	txq->tso_bursts++;
1110b7b0edd1SGeorge V. Neville-Neil 	return (id);
1111e948693eSPhilip Paeps }
1112e948693eSPhilip Paeps 
1113e948693eSPhilip Paeps static void
1114e948693eSPhilip Paeps sfxge_tx_qunblock(struct sfxge_txq *txq)
1115e948693eSPhilip Paeps {
1116e948693eSPhilip Paeps 	struct sfxge_softc *sc;
1117e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1118e948693eSPhilip Paeps 
1119e948693eSPhilip Paeps 	sc = txq->sc;
1120e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1121e948693eSPhilip Paeps 
1122763cab71SAndrew Rybchenko 	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1123e948693eSPhilip Paeps 
1124e948693eSPhilip Paeps 	if (txq->init_state != SFXGE_TXQ_STARTED)
1125e948693eSPhilip Paeps 		return;
1126e948693eSPhilip Paeps 
1127763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1128e948693eSPhilip Paeps 
1129e948693eSPhilip Paeps 	if (txq->blocked) {
1130e948693eSPhilip Paeps 		unsigned int level;
1131e948693eSPhilip Paeps 
1132e948693eSPhilip Paeps 		level = txq->added - txq->completed;
1133385b1d8eSGeorge V. Neville-Neil 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
1134e948693eSPhilip Paeps 			txq->blocked = 0;
1135e948693eSPhilip Paeps 	}
1136e948693eSPhilip Paeps 
1137e948693eSPhilip Paeps 	sfxge_tx_qdpl_service(txq);
1138e948693eSPhilip Paeps 	/* note: lock has been dropped */
1139e948693eSPhilip Paeps }
1140e948693eSPhilip Paeps 
1141e948693eSPhilip Paeps void
1142e948693eSPhilip Paeps sfxge_tx_qflush_done(struct sfxge_txq *txq)
1143e948693eSPhilip Paeps {
1144e948693eSPhilip Paeps 
1145e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_DONE;
1146e948693eSPhilip Paeps }
1147e948693eSPhilip Paeps 
1148e948693eSPhilip Paeps static void
1149e948693eSPhilip Paeps sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1150e948693eSPhilip Paeps {
1151e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1152e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1153e948693eSPhilip Paeps 	unsigned int count;
1154e948693eSPhilip Paeps 
1155e948693eSPhilip Paeps 	txq = sc->txq[index];
1156e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1157e948693eSPhilip Paeps 
1158763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1159e948693eSPhilip Paeps 
1160e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1161e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_STARTED"));
1162e948693eSPhilip Paeps 
1163e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
1164e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_PENDING;
1165e948693eSPhilip Paeps 
1166e948693eSPhilip Paeps 	/* Flush the transmit queue. */
1167e948693eSPhilip Paeps 	efx_tx_qflush(txq->common);
1168e948693eSPhilip Paeps 
1169763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
1170e948693eSPhilip Paeps 
1171e948693eSPhilip Paeps 	count = 0;
1172e948693eSPhilip Paeps 	do {
1173e948693eSPhilip Paeps 		/* Spin for 100ms. */
1174e948693eSPhilip Paeps 		DELAY(100000);
1175e948693eSPhilip Paeps 
1176e948693eSPhilip Paeps 		if (txq->flush_state != SFXGE_FLUSH_PENDING)
1177e948693eSPhilip Paeps 			break;
1178e948693eSPhilip Paeps 	} while (++count < 20);
1179e948693eSPhilip Paeps 
1180763cab71SAndrew Rybchenko 	SFXGE_EVQ_LOCK(evq);
1181763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1182e948693eSPhilip Paeps 
1183e948693eSPhilip Paeps 	KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1184e948693eSPhilip Paeps 	    ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1185e948693eSPhilip Paeps 
1186e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_DONE;
1187e948693eSPhilip Paeps 
1188e948693eSPhilip Paeps 	txq->blocked = 0;
1189e948693eSPhilip Paeps 	txq->pending = txq->added;
1190e948693eSPhilip Paeps 
1191cc933626SAndrew Rybchenko 	sfxge_tx_qcomplete(txq, evq);
1192e948693eSPhilip Paeps 	KASSERT(txq->completed == txq->added,
1193e948693eSPhilip Paeps 	    ("txq->completed != txq->added"));
1194e948693eSPhilip Paeps 
1195e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
1196e948693eSPhilip Paeps 	KASSERT(txq->reaped == txq->completed,
1197e948693eSPhilip Paeps 	    ("txq->reaped != txq->completed"));
1198e948693eSPhilip Paeps 
1199e948693eSPhilip Paeps 	txq->added = 0;
1200e948693eSPhilip Paeps 	txq->pending = 0;
1201e948693eSPhilip Paeps 	txq->completed = 0;
1202e948693eSPhilip Paeps 	txq->reaped = 0;
1203e948693eSPhilip Paeps 
1204e948693eSPhilip Paeps 	/* Destroy the common code transmit queue. */
1205e948693eSPhilip Paeps 	efx_tx_qdestroy(txq->common);
1206e948693eSPhilip Paeps 	txq->common = NULL;
1207e948693eSPhilip Paeps 
1208e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1209385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1210e948693eSPhilip Paeps 
1211763cab71SAndrew Rybchenko 	SFXGE_EVQ_UNLOCK(evq);
1212763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
1213e948693eSPhilip Paeps }
1214e948693eSPhilip Paeps 
1215e948693eSPhilip Paeps static int
1216e948693eSPhilip Paeps sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1217e948693eSPhilip Paeps {
1218e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1219e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1220e948693eSPhilip Paeps 	uint16_t flags;
1221e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1222e948693eSPhilip Paeps 	int rc;
1223e948693eSPhilip Paeps 
1224e948693eSPhilip Paeps 	txq = sc->txq[index];
1225e948693eSPhilip Paeps 	esmp = &txq->mem;
1226e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1227e948693eSPhilip Paeps 
1228e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1229e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1230e948693eSPhilip Paeps 	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1231e948693eSPhilip Paeps 	    ("evq->init_state != SFXGE_EVQ_STARTED"));
1232e948693eSPhilip Paeps 
1233e948693eSPhilip Paeps 	/* Program the buffer table. */
1234e948693eSPhilip Paeps 	if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1235385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1236385b1d8eSGeorge V. Neville-Neil 		return (rc);
1237e948693eSPhilip Paeps 
1238e948693eSPhilip Paeps 	/* Determine the kind of queue we are creating. */
1239e948693eSPhilip Paeps 	switch (txq->type) {
1240e948693eSPhilip Paeps 	case SFXGE_TXQ_NON_CKSUM:
1241e948693eSPhilip Paeps 		flags = 0;
1242e948693eSPhilip Paeps 		break;
1243e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_CKSUM:
1244e948693eSPhilip Paeps 		flags = EFX_CKSUM_IPV4;
1245e948693eSPhilip Paeps 		break;
1246e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1247e948693eSPhilip Paeps 		flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1248e948693eSPhilip Paeps 		break;
1249e948693eSPhilip Paeps 	default:
1250e948693eSPhilip Paeps 		KASSERT(0, ("Impossible TX queue"));
1251e948693eSPhilip Paeps 		flags = 0;
1252e948693eSPhilip Paeps 		break;
1253e948693eSPhilip Paeps 	}
1254e948693eSPhilip Paeps 
1255e948693eSPhilip Paeps 	/* Create the common code transmit queue. */
1256cf07c70dSGeorge V. Neville-Neil 	if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1257385b1d8eSGeorge V. Neville-Neil 	    sc->txq_entries, txq->buf_base_id, flags, evq->common,
1258e948693eSPhilip Paeps 	    &txq->common)) != 0)
1259e948693eSPhilip Paeps 		goto fail;
1260e948693eSPhilip Paeps 
1261763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK(txq);
1262e948693eSPhilip Paeps 
1263e948693eSPhilip Paeps 	/* Enable the transmit queue. */
1264e948693eSPhilip Paeps 	efx_tx_qenable(txq->common);
1265e948693eSPhilip Paeps 
1266e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_STARTED;
1267e948693eSPhilip Paeps 
1268763cab71SAndrew Rybchenko 	SFXGE_TXQ_UNLOCK(txq);
1269e948693eSPhilip Paeps 
1270e948693eSPhilip Paeps 	return (0);
1271e948693eSPhilip Paeps 
1272e948693eSPhilip Paeps fail:
1273e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1274385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1275385b1d8eSGeorge V. Neville-Neil 	return (rc);
1276e948693eSPhilip Paeps }
1277e948693eSPhilip Paeps 
1278e948693eSPhilip Paeps void
1279e948693eSPhilip Paeps sfxge_tx_stop(struct sfxge_softc *sc)
1280e948693eSPhilip Paeps {
1281e948693eSPhilip Paeps 	int index;
1282e948693eSPhilip Paeps 
1283e948693eSPhilip Paeps 	index = SFXGE_TX_SCALE(sc);
1284e948693eSPhilip Paeps 	while (--index >= 0)
1285e948693eSPhilip Paeps 		sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1286e948693eSPhilip Paeps 
1287e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1288e948693eSPhilip Paeps 
1289e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1290e948693eSPhilip Paeps 
1291e948693eSPhilip Paeps 	/* Tear down the transmit module */
1292e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1293e948693eSPhilip Paeps }
1294e948693eSPhilip Paeps 
1295e948693eSPhilip Paeps int
1296e948693eSPhilip Paeps sfxge_tx_start(struct sfxge_softc *sc)
1297e948693eSPhilip Paeps {
1298e948693eSPhilip Paeps 	int index;
1299e948693eSPhilip Paeps 	int rc;
1300e948693eSPhilip Paeps 
1301e948693eSPhilip Paeps 	/* Initialize the common code transmit module. */
1302e948693eSPhilip Paeps 	if ((rc = efx_tx_init(sc->enp)) != 0)
1303e948693eSPhilip Paeps 		return (rc);
1304e948693eSPhilip Paeps 
1305e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0)
1306e948693eSPhilip Paeps 		goto fail;
1307e948693eSPhilip Paeps 
1308e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0)
1309e948693eSPhilip Paeps 		goto fail2;
1310e948693eSPhilip Paeps 
1311e948693eSPhilip Paeps 	for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1312e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM +
1313e948693eSPhilip Paeps 		    index)) != 0)
1314e948693eSPhilip Paeps 			goto fail3;
1315e948693eSPhilip Paeps 	}
1316e948693eSPhilip Paeps 
1317e948693eSPhilip Paeps 	return (0);
1318e948693eSPhilip Paeps 
1319e948693eSPhilip Paeps fail3:
1320e948693eSPhilip Paeps 	while (--index >= 0)
1321e948693eSPhilip Paeps 		sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1322e948693eSPhilip Paeps 
1323e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1324e948693eSPhilip Paeps 
1325e948693eSPhilip Paeps fail2:
1326e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1327e948693eSPhilip Paeps 
1328e948693eSPhilip Paeps fail:
1329e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1330e948693eSPhilip Paeps 
1331e948693eSPhilip Paeps 	return (rc);
1332e948693eSPhilip Paeps }
1333e948693eSPhilip Paeps 
1334e948693eSPhilip Paeps /**
1335e948693eSPhilip Paeps  * Destroy a transmit queue.
1336e948693eSPhilip Paeps  */
1337e948693eSPhilip Paeps static void
1338e948693eSPhilip Paeps sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1339e948693eSPhilip Paeps {
1340e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1341385b1d8eSGeorge V. Neville-Neil 	unsigned int nmaps;
1342e948693eSPhilip Paeps 
1343e948693eSPhilip Paeps 	txq = sc->txq[index];
1344e948693eSPhilip Paeps 
1345e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1346e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1347e948693eSPhilip Paeps 
1348e948693eSPhilip Paeps 	if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1349e948693eSPhilip Paeps 		tso_fini(txq);
1350e948693eSPhilip Paeps 
1351e948693eSPhilip Paeps 	/* Free the context arrays. */
1352e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1353385b1d8eSGeorge V. Neville-Neil 	nmaps = sc->txq_entries;
1354b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1355e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1356e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1357e948693eSPhilip Paeps 
1358e948693eSPhilip Paeps 	/* Release DMA memory mapping. */
1359e948693eSPhilip Paeps 	sfxge_dma_free(&txq->mem);
1360e948693eSPhilip Paeps 
1361e948693eSPhilip Paeps 	sc->txq[index] = NULL;
1362e948693eSPhilip Paeps 
1363e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1364763cab71SAndrew Rybchenko 	SFXGE_TXQ_LOCK_DESTROY(txq);
1365e948693eSPhilip Paeps #endif
1366e948693eSPhilip Paeps 
1367e948693eSPhilip Paeps 	free(txq, M_SFXGE);
1368e948693eSPhilip Paeps }
1369e948693eSPhilip Paeps 
1370e948693eSPhilip Paeps static int
1371e948693eSPhilip Paeps sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1372e948693eSPhilip Paeps     enum sfxge_txq_type type, unsigned int evq_index)
1373e948693eSPhilip Paeps {
1374bc85c897SGeorge V. Neville-Neil 	char name[16];
1375bc85c897SGeorge V. Neville-Neil 	struct sysctl_oid *txq_node;
1376e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1377e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1378e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1379e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
1380e948693eSPhilip Paeps #endif
1381e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1382e948693eSPhilip Paeps 	unsigned int nmaps;
1383e948693eSPhilip Paeps 	int rc;
1384e948693eSPhilip Paeps 
1385e948693eSPhilip Paeps 	txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1386e948693eSPhilip Paeps 	txq->sc = sc;
1387385b1d8eSGeorge V. Neville-Neil 	txq->entries = sc->txq_entries;
1388385b1d8eSGeorge V. Neville-Neil 	txq->ptr_mask = txq->entries - 1;
1389e948693eSPhilip Paeps 
1390e948693eSPhilip Paeps 	sc->txq[txq_index] = txq;
1391e948693eSPhilip Paeps 	esmp = &txq->mem;
1392e948693eSPhilip Paeps 
1393e948693eSPhilip Paeps 	evq = sc->evq[evq_index];
1394e948693eSPhilip Paeps 
1395e948693eSPhilip Paeps 	/* Allocate and zero DMA space for the descriptor ring. */
1396385b1d8eSGeorge V. Neville-Neil 	if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1397e948693eSPhilip Paeps 		return (rc);
1398385b1d8eSGeorge V. Neville-Neil 	(void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
1399e948693eSPhilip Paeps 
1400e948693eSPhilip Paeps 	/* Allocate buffer table entries. */
1401385b1d8eSGeorge V. Neville-Neil 	sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1402e948693eSPhilip Paeps 				 &txq->buf_base_id);
1403e948693eSPhilip Paeps 
1404e948693eSPhilip Paeps 	/* Create a DMA tag for packet mappings. */
1405fb8ccc78SMarius Strobl 	if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1406fb8ccc78SMarius Strobl 	    MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1407fb8ccc78SMarius Strobl 	    NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1408e948693eSPhilip Paeps 	    &txq->packet_dma_tag) != 0) {
1409e948693eSPhilip Paeps 		device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1410e948693eSPhilip Paeps 		rc = ENOMEM;
1411e948693eSPhilip Paeps 		goto fail;
1412e948693eSPhilip Paeps 	}
1413e948693eSPhilip Paeps 
1414e948693eSPhilip Paeps 	/* Allocate pending descriptor array for batching writes. */
1415385b1d8eSGeorge V. Neville-Neil 	txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
1416e948693eSPhilip Paeps 				M_SFXGE, M_ZERO | M_WAITOK);
1417e948693eSPhilip Paeps 
1418e948693eSPhilip Paeps 	/* Allocate and initialise mbuf DMA mapping array. */
1419385b1d8eSGeorge V. Neville-Neil 	txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1420e948693eSPhilip Paeps 	    M_SFXGE, M_ZERO | M_WAITOK);
1421385b1d8eSGeorge V. Neville-Neil 	for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1422e948693eSPhilip Paeps 		rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1423e948693eSPhilip Paeps 				       &txq->stmp[nmaps].map);
1424e948693eSPhilip Paeps 		if (rc != 0)
1425e948693eSPhilip Paeps 			goto fail2;
1426e948693eSPhilip Paeps 	}
1427e948693eSPhilip Paeps 
1428bc85c897SGeorge V. Neville-Neil 	snprintf(name, sizeof(name), "%u", txq_index);
1429bc85c897SGeorge V. Neville-Neil 	txq_node = SYSCTL_ADD_NODE(
1430bc85c897SGeorge V. Neville-Neil 		device_get_sysctl_ctx(sc->dev),
1431bc85c897SGeorge V. Neville-Neil 		SYSCTL_CHILDREN(sc->txqs_node),
1432bc85c897SGeorge V. Neville-Neil 		OID_AUTO, name, CTLFLAG_RD, NULL, "");
1433bc85c897SGeorge V. Neville-Neil 	if (txq_node == NULL) {
1434bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
1435bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
1436bc85c897SGeorge V. Neville-Neil 	}
1437bc85c897SGeorge V. Neville-Neil 
1438e948693eSPhilip Paeps 	if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1439e948693eSPhilip Paeps 	    (rc = tso_init(txq)) != 0)
1440e948693eSPhilip Paeps 		goto fail3;
1441e948693eSPhilip Paeps 
1442e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1443060a95efSGeorge V. Neville-Neil 	if (sfxge_tx_dpl_get_max <= 0) {
1444060a95efSGeorge V. Neville-Neil 		log(LOG_ERR, "%s=%d must be greater than 0",
1445060a95efSGeorge V. Neville-Neil 		    SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
1446060a95efSGeorge V. Neville-Neil 		rc = EINVAL;
1447060a95efSGeorge V. Neville-Neil 		goto fail_tx_dpl_get_max;
1448060a95efSGeorge V. Neville-Neil 	}
144993929f25SAndrew Rybchenko 	if (sfxge_tx_dpl_get_non_tcp_max <= 0) {
145093929f25SAndrew Rybchenko 		log(LOG_ERR, "%s=%d must be greater than 0",
145193929f25SAndrew Rybchenko 		    SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX,
145293929f25SAndrew Rybchenko 		    sfxge_tx_dpl_get_non_tcp_max);
145393929f25SAndrew Rybchenko 		rc = EINVAL;
145493929f25SAndrew Rybchenko 		goto fail_tx_dpl_get_max;
145593929f25SAndrew Rybchenko 	}
1456060a95efSGeorge V. Neville-Neil 	if (sfxge_tx_dpl_put_max < 0) {
1457060a95efSGeorge V. Neville-Neil 		log(LOG_ERR, "%s=%d must be greater or equal to 0",
1458060a95efSGeorge V. Neville-Neil 		    SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
1459060a95efSGeorge V. Neville-Neil 		rc = EINVAL;
1460060a95efSGeorge V. Neville-Neil 		goto fail_tx_dpl_put_max;
1461060a95efSGeorge V. Neville-Neil 	}
1462060a95efSGeorge V. Neville-Neil 
1463e948693eSPhilip Paeps 	/* Initialize the deferred packet list. */
1464e948693eSPhilip Paeps 	stdp = &txq->dpl;
1465060a95efSGeorge V. Neville-Neil 	stdp->std_put_max = sfxge_tx_dpl_put_max;
1466060a95efSGeorge V. Neville-Neil 	stdp->std_get_max = sfxge_tx_dpl_get_max;
146793929f25SAndrew Rybchenko 	stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1468e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
1469e948693eSPhilip Paeps 
147033d45dc5SAndrew Rybchenko 	SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1471bc85c897SGeorge V. Neville-Neil 
1472bc85c897SGeorge V. Neville-Neil 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
1473bc85c897SGeorge V. Neville-Neil 			SYSCTL_CHILDREN(txq_node), OID_AUTO,
1474bc85c897SGeorge V. Neville-Neil 			"dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS,
1475bc85c897SGeorge V. Neville-Neil 			&stdp->std_get_count, 0, "");
147693929f25SAndrew Rybchenko 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
147793929f25SAndrew Rybchenko 			SYSCTL_CHILDREN(txq_node), OID_AUTO,
147893929f25SAndrew Rybchenko 			"dpl_get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,
147993929f25SAndrew Rybchenko 			&stdp->std_get_non_tcp_count, 0, "");
148093929f25SAndrew Rybchenko 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
148193929f25SAndrew Rybchenko 			SYSCTL_CHILDREN(txq_node), OID_AUTO,
148293929f25SAndrew Rybchenko 			"dpl_get_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
148393929f25SAndrew Rybchenko 			&stdp->std_get_hiwat, 0, "");
1484e948693eSPhilip Paeps #endif
1485e948693eSPhilip Paeps 
1486e948693eSPhilip Paeps 	txq->type = type;
1487e948693eSPhilip Paeps 	txq->evq_index = evq_index;
1488e948693eSPhilip Paeps 	txq->txq_index = txq_index;
1489e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
1490e948693eSPhilip Paeps 
1491e948693eSPhilip Paeps 	return (0);
1492e948693eSPhilip Paeps 
1493060a95efSGeorge V. Neville-Neil fail_tx_dpl_put_max:
1494060a95efSGeorge V. Neville-Neil fail_tx_dpl_get_max:
1495e948693eSPhilip Paeps fail3:
1496bc85c897SGeorge V. Neville-Neil fail_txq_node:
1497e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1498e948693eSPhilip Paeps fail2:
1499b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1500e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1501e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1502e948693eSPhilip Paeps 	bus_dma_tag_destroy(txq->packet_dma_tag);
1503e948693eSPhilip Paeps 
1504e948693eSPhilip Paeps fail:
1505e948693eSPhilip Paeps 	sfxge_dma_free(esmp);
1506e948693eSPhilip Paeps 
1507e948693eSPhilip Paeps 	return (rc);
1508e948693eSPhilip Paeps }
1509e948693eSPhilip Paeps 
1510e948693eSPhilip Paeps static const struct {
1511e948693eSPhilip Paeps 	const char *name;
1512e948693eSPhilip Paeps 	size_t offset;
1513e948693eSPhilip Paeps } sfxge_tx_stats[] = {
1514e948693eSPhilip Paeps #define	SFXGE_TX_STAT(name, member) \
1515e948693eSPhilip Paeps 	{ #name, offsetof(struct sfxge_txq, member) }
1516e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_bursts, tso_bursts),
1517e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_packets, tso_packets),
1518e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
1519e1a3d10eSAndrew Rybchenko 	SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
1520e1a3d10eSAndrew Rybchenko 	SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
1521e948693eSPhilip Paeps 	SFXGE_TX_STAT(tx_collapses, collapses),
1522e948693eSPhilip Paeps 	SFXGE_TX_STAT(tx_drops, drops),
152393929f25SAndrew Rybchenko 	SFXGE_TX_STAT(tx_get_overflow, get_overflow),
152493929f25SAndrew Rybchenko 	SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow),
152593929f25SAndrew Rybchenko 	SFXGE_TX_STAT(tx_put_overflow, put_overflow),
152693929f25SAndrew Rybchenko 	SFXGE_TX_STAT(tx_netdown_drops, netdown_drops),
1527e948693eSPhilip Paeps };
1528e948693eSPhilip Paeps 
1529e948693eSPhilip Paeps static int
1530e948693eSPhilip Paeps sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1531e948693eSPhilip Paeps {
1532e948693eSPhilip Paeps 	struct sfxge_softc *sc = arg1;
1533e948693eSPhilip Paeps 	unsigned int id = arg2;
1534e948693eSPhilip Paeps 	unsigned long sum;
1535e948693eSPhilip Paeps 	unsigned int index;
1536e948693eSPhilip Paeps 
1537e948693eSPhilip Paeps 	/* Sum across all TX queues */
1538e948693eSPhilip Paeps 	sum = 0;
1539e948693eSPhilip Paeps 	for (index = 0;
1540e948693eSPhilip Paeps 	     index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc);
1541e948693eSPhilip Paeps 	     index++)
1542e948693eSPhilip Paeps 		sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1543e948693eSPhilip Paeps 					  sfxge_tx_stats[id].offset);
1544e948693eSPhilip Paeps 
1545b7b0edd1SGeorge V. Neville-Neil 	return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1546e948693eSPhilip Paeps }
1547e948693eSPhilip Paeps 
1548e948693eSPhilip Paeps static void
1549e948693eSPhilip Paeps sfxge_tx_stat_init(struct sfxge_softc *sc)
1550e948693eSPhilip Paeps {
1551e948693eSPhilip Paeps 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1552e948693eSPhilip Paeps 	struct sysctl_oid_list *stat_list;
1553e948693eSPhilip Paeps 	unsigned int id;
1554e948693eSPhilip Paeps 
1555e948693eSPhilip Paeps 	stat_list = SYSCTL_CHILDREN(sc->stats_node);
1556e948693eSPhilip Paeps 
1557e948693eSPhilip Paeps 	for (id = 0;
1558e948693eSPhilip Paeps 	     id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]);
1559e948693eSPhilip Paeps 	     id++) {
1560e948693eSPhilip Paeps 		SYSCTL_ADD_PROC(
1561e948693eSPhilip Paeps 			ctx, stat_list,
1562e948693eSPhilip Paeps 			OID_AUTO, sfxge_tx_stats[id].name,
1563e948693eSPhilip Paeps 			CTLTYPE_ULONG|CTLFLAG_RD,
1564e948693eSPhilip Paeps 			sc, id, sfxge_tx_stat_handler, "LU",
1565e948693eSPhilip Paeps 			"");
1566e948693eSPhilip Paeps 	}
1567e948693eSPhilip Paeps }
1568e948693eSPhilip Paeps 
1569e948693eSPhilip Paeps void
1570e948693eSPhilip Paeps sfxge_tx_fini(struct sfxge_softc *sc)
1571e948693eSPhilip Paeps {
1572e948693eSPhilip Paeps 	int index;
1573e948693eSPhilip Paeps 
1574e948693eSPhilip Paeps 	index = SFXGE_TX_SCALE(sc);
1575e948693eSPhilip Paeps 	while (--index >= 0)
1576e948693eSPhilip Paeps 		sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1577e948693eSPhilip Paeps 
1578e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1579e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1580e948693eSPhilip Paeps }
1581e948693eSPhilip Paeps 
1582e948693eSPhilip Paeps 
1583e948693eSPhilip Paeps int
1584e948693eSPhilip Paeps sfxge_tx_init(struct sfxge_softc *sc)
1585e948693eSPhilip Paeps {
1586e948693eSPhilip Paeps 	struct sfxge_intr *intr;
1587e948693eSPhilip Paeps 	int index;
1588e948693eSPhilip Paeps 	int rc;
1589e948693eSPhilip Paeps 
1590e948693eSPhilip Paeps 	intr = &sc->intr;
1591e948693eSPhilip Paeps 
1592e948693eSPhilip Paeps 	KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1593e948693eSPhilip Paeps 	    ("intr->state != SFXGE_INTR_INITIALIZED"));
1594e948693eSPhilip Paeps 
1595bc85c897SGeorge V. Neville-Neil 	sc->txqs_node = SYSCTL_ADD_NODE(
1596bc85c897SGeorge V. Neville-Neil 		device_get_sysctl_ctx(sc->dev),
1597bc85c897SGeorge V. Neville-Neil 		SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1598bc85c897SGeorge V. Neville-Neil 		OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
1599bc85c897SGeorge V. Neville-Neil 	if (sc->txqs_node == NULL) {
1600bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
1601bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
1602bc85c897SGeorge V. Neville-Neil 	}
1603bc85c897SGeorge V. Neville-Neil 
1604e948693eSPhilip Paeps 	/* Initialize the transmit queues */
1605e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1606e948693eSPhilip Paeps 	    SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1607e948693eSPhilip Paeps 		goto fail;
1608e948693eSPhilip Paeps 
1609e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1610e948693eSPhilip Paeps 	    SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1611e948693eSPhilip Paeps 		goto fail2;
1612e948693eSPhilip Paeps 
1613e948693eSPhilip Paeps 	for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1614e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index,
1615e948693eSPhilip Paeps 		    SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1616e948693eSPhilip Paeps 			goto fail3;
1617e948693eSPhilip Paeps 	}
1618e948693eSPhilip Paeps 
1619e948693eSPhilip Paeps 	sfxge_tx_stat_init(sc);
1620e948693eSPhilip Paeps 
1621e948693eSPhilip Paeps 	return (0);
1622e948693eSPhilip Paeps 
1623e948693eSPhilip Paeps fail3:
1624e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1625e948693eSPhilip Paeps 
1626e948693eSPhilip Paeps 	while (--index >= 0)
1627e948693eSPhilip Paeps 		sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1628e948693eSPhilip Paeps 
1629e948693eSPhilip Paeps fail2:
1630e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1631e948693eSPhilip Paeps 
1632e948693eSPhilip Paeps fail:
1633bc85c897SGeorge V. Neville-Neil fail_txq_node:
1634e948693eSPhilip Paeps 	return (rc);
1635e948693eSPhilip Paeps }
1636