xref: /freebsd/sys/dev/sfxge/sfxge_tx.c (revision c2529042)
1e948693eSPhilip Paeps /*-
2e948693eSPhilip Paeps  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3e948693eSPhilip Paeps  * All rights reserved.
4e948693eSPhilip Paeps  *
5e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
6e948693eSPhilip Paeps  * Solarflare Communications, Inc.
7e948693eSPhilip Paeps  *
8e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
9e948693eSPhilip Paeps  * modification, are permitted provided that the following conditions
10e948693eSPhilip Paeps  * are met:
11e948693eSPhilip Paeps  * 1. Redistributions of source code must retain the above copyright
12e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer.
13e948693eSPhilip Paeps  * 2. Redistributions in binary form must reproduce the above copyright
14e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer in the
15e948693eSPhilip Paeps  *    documentation and/or other materials provided with the distribution.
16e948693eSPhilip Paeps  *
17e948693eSPhilip Paeps  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18e948693eSPhilip Paeps  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19e948693eSPhilip Paeps  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20e948693eSPhilip Paeps  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21e948693eSPhilip Paeps  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22e948693eSPhilip Paeps  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23e948693eSPhilip Paeps  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24e948693eSPhilip Paeps  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25e948693eSPhilip Paeps  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26e948693eSPhilip Paeps  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27e948693eSPhilip Paeps  * SUCH DAMAGE.
28e948693eSPhilip Paeps  */
29e948693eSPhilip Paeps 
30cf07c70dSGeorge V. Neville-Neil /* Theory of operation:
31cf07c70dSGeorge V. Neville-Neil  *
32cf07c70dSGeorge V. Neville-Neil  * Tx queues allocation and mapping
33cf07c70dSGeorge V. Neville-Neil  *
34cf07c70dSGeorge V. Neville-Neil  * One Tx queue with enabled checksum offload is allocated per Rx channel
35cf07c70dSGeorge V. Neville-Neil  * (event queue).  Also 2 Tx queues (one without checksum offload and one
36cf07c70dSGeorge V. Neville-Neil  * with IP checksum offload only) are allocated and bound to event queue 0.
37cf07c70dSGeorge V. Neville-Neil  * sfxge_txq_type is used as Tx queue label.
38cf07c70dSGeorge V. Neville-Neil  *
39cf07c70dSGeorge V. Neville-Neil  * So, event queue plus label mapping to Tx queue index is:
40cf07c70dSGeorge V. Neville-Neil  *	if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41cf07c70dSGeorge V. Neville-Neil  *	else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42cf07c70dSGeorge V. Neville-Neil  * See sfxge_get_txq_by_label() sfxge_ev.c
43cf07c70dSGeorge V. Neville-Neil  */
44cf07c70dSGeorge V. Neville-Neil 
45e948693eSPhilip Paeps #include <sys/cdefs.h>
46e948693eSPhilip Paeps __FBSDID("$FreeBSD$");
47e948693eSPhilip Paeps 
48e948693eSPhilip Paeps #include <sys/types.h>
49e948693eSPhilip Paeps #include <sys/mbuf.h>
50e948693eSPhilip Paeps #include <sys/smp.h>
51e948693eSPhilip Paeps #include <sys/socket.h>
52e948693eSPhilip Paeps #include <sys/sysctl.h>
53060a95efSGeorge V. Neville-Neil #include <sys/syslog.h>
54e948693eSPhilip Paeps 
55e948693eSPhilip Paeps #include <net/bpf.h>
56e948693eSPhilip Paeps #include <net/ethernet.h>
57e948693eSPhilip Paeps #include <net/if.h>
58e948693eSPhilip Paeps #include <net/if_vlan_var.h>
59e948693eSPhilip Paeps 
60e948693eSPhilip Paeps #include <netinet/in.h>
61e948693eSPhilip Paeps #include <netinet/ip.h>
62e948693eSPhilip Paeps #include <netinet/ip6.h>
63e948693eSPhilip Paeps #include <netinet/tcp.h>
64e948693eSPhilip Paeps 
65e948693eSPhilip Paeps #include "common/efx.h"
66e948693eSPhilip Paeps 
67e948693eSPhilip Paeps #include "sfxge.h"
68e948693eSPhilip Paeps #include "sfxge_tx.h"
69e948693eSPhilip Paeps 
70e948693eSPhilip Paeps /* Set the block level to ensure there is space to generate a
71e948693eSPhilip Paeps  * large number of descriptors for TSO.  With minimum MSS and
72e948693eSPhilip Paeps  * maximum mbuf length we might need more than a ring-ful of
73e948693eSPhilip Paeps  * descriptors, but this should not happen in practice except
74e948693eSPhilip Paeps  * due to deliberate attack.  In that case we will truncate
75e948693eSPhilip Paeps  * the output at a packet boundary.  Allow for a reasonable
76e948693eSPhilip Paeps  * minimum MSS of 512.
77e948693eSPhilip Paeps  */
78e948693eSPhilip Paeps #define	SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
79385b1d8eSGeorge V. Neville-Neil #define	SFXGE_TXQ_BLOCK_LEVEL(_entries)	((_entries) - SFXGE_TSO_MAX_DESC)
80e948693eSPhilip Paeps 
81060a95efSGeorge V. Neville-Neil #ifdef SFXGE_HAVE_MQ
82060a95efSGeorge V. Neville-Neil 
83060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_GET_MAX	SFXGE_PARAM(tx_dpl_get_max)
84060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
85060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
86060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
87060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_get_max, 0,
88060a95efSGeorge V. Neville-Neil 	   "Maximum number of packets in deferred packet get-list");
89060a95efSGeorge V. Neville-Neil 
90060a95efSGeorge V. Neville-Neil #define	SFXGE_PARAM_TX_DPL_PUT_MAX	SFXGE_PARAM(tx_dpl_put_max)
91060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
92060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
93060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
94060a95efSGeorge V. Neville-Neil 	   &sfxge_tx_dpl_put_max, 0,
95060a95efSGeorge V. Neville-Neil 	   "Maximum number of packets in deferred packet put-list");
96060a95efSGeorge V. Neville-Neil 
97060a95efSGeorge V. Neville-Neil #endif
98060a95efSGeorge V. Neville-Neil 
99060a95efSGeorge V. Neville-Neil 
100e948693eSPhilip Paeps /* Forward declarations. */
101e948693eSPhilip Paeps static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
102e948693eSPhilip Paeps static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
103e948693eSPhilip Paeps static void sfxge_tx_qunblock(struct sfxge_txq *txq);
104e948693eSPhilip Paeps static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
105e948693eSPhilip Paeps 			      const bus_dma_segment_t *dma_seg, int n_dma_seg);
106e948693eSPhilip Paeps 
107e948693eSPhilip Paeps void
108e948693eSPhilip Paeps sfxge_tx_qcomplete(struct sfxge_txq *txq)
109e948693eSPhilip Paeps {
110e948693eSPhilip Paeps 	struct sfxge_softc *sc;
111e948693eSPhilip Paeps 	struct sfxge_evq *evq;
112e948693eSPhilip Paeps 	unsigned int completed;
113e948693eSPhilip Paeps 
114e948693eSPhilip Paeps 	sc = txq->sc;
115e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
116e948693eSPhilip Paeps 
117e948693eSPhilip Paeps 	mtx_assert(&evq->lock, MA_OWNED);
118e948693eSPhilip Paeps 
119e948693eSPhilip Paeps 	completed = txq->completed;
120e948693eSPhilip Paeps 	while (completed != txq->pending) {
121e948693eSPhilip Paeps 		struct sfxge_tx_mapping *stmp;
122e948693eSPhilip Paeps 		unsigned int id;
123e948693eSPhilip Paeps 
124385b1d8eSGeorge V. Neville-Neil 		id = completed++ & txq->ptr_mask;
125e948693eSPhilip Paeps 
126e948693eSPhilip Paeps 		stmp = &txq->stmp[id];
127e948693eSPhilip Paeps 		if (stmp->flags & TX_BUF_UNMAP) {
128e948693eSPhilip Paeps 			bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
129e948693eSPhilip Paeps 			if (stmp->flags & TX_BUF_MBUF) {
130e948693eSPhilip Paeps 				struct mbuf *m = stmp->u.mbuf;
131e948693eSPhilip Paeps 				do
132e948693eSPhilip Paeps 					m = m_free(m);
133e948693eSPhilip Paeps 				while (m != NULL);
134e948693eSPhilip Paeps 			} else {
135e948693eSPhilip Paeps 				free(stmp->u.heap_buf, M_SFXGE);
136e948693eSPhilip Paeps 			}
137e948693eSPhilip Paeps 			stmp->flags = 0;
138e948693eSPhilip Paeps 		}
139e948693eSPhilip Paeps 	}
140e948693eSPhilip Paeps 	txq->completed = completed;
141e948693eSPhilip Paeps 
142e948693eSPhilip Paeps 	/* Check whether we need to unblock the queue. */
143e948693eSPhilip Paeps 	mb();
144e948693eSPhilip Paeps 	if (txq->blocked) {
145e948693eSPhilip Paeps 		unsigned int level;
146e948693eSPhilip Paeps 
147e948693eSPhilip Paeps 		level = txq->added - txq->completed;
148385b1d8eSGeorge V. Neville-Neil 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
149e948693eSPhilip Paeps 			sfxge_tx_qunblock(txq);
150e948693eSPhilip Paeps 	}
151e948693eSPhilip Paeps }
152e948693eSPhilip Paeps 
153e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
154e948693eSPhilip Paeps 
155e948693eSPhilip Paeps /*
156e948693eSPhilip Paeps  * Reorder the put list and append it to the get list.
157e948693eSPhilip Paeps  */
158e948693eSPhilip Paeps static void
159e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
160e948693eSPhilip Paeps {
161e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
162e948693eSPhilip Paeps 	struct mbuf *mbuf, *get_next, **get_tailp;
163e948693eSPhilip Paeps 	volatile uintptr_t *putp;
164e948693eSPhilip Paeps 	uintptr_t put;
165e948693eSPhilip Paeps 	unsigned int count;
166e948693eSPhilip Paeps 
167e948693eSPhilip Paeps 	mtx_assert(&txq->lock, MA_OWNED);
168e948693eSPhilip Paeps 
169e948693eSPhilip Paeps 	stdp = &txq->dpl;
170e948693eSPhilip Paeps 
171e948693eSPhilip Paeps 	/* Acquire the put list. */
172e948693eSPhilip Paeps 	putp = &stdp->std_put;
173fb8ccc78SMarius Strobl 	put = atomic_readandclear_ptr(putp);
174e948693eSPhilip Paeps 	mbuf = (void *)put;
175e948693eSPhilip Paeps 
176e948693eSPhilip Paeps 	if (mbuf == NULL)
177e948693eSPhilip Paeps 		return;
178e948693eSPhilip Paeps 
179e948693eSPhilip Paeps 	/* Reverse the put list. */
180e948693eSPhilip Paeps 	get_tailp = &mbuf->m_nextpkt;
181e948693eSPhilip Paeps 	get_next = NULL;
182e948693eSPhilip Paeps 
183e948693eSPhilip Paeps 	count = 0;
184e948693eSPhilip Paeps 	do {
185e948693eSPhilip Paeps 		struct mbuf *put_next;
186e948693eSPhilip Paeps 
187e948693eSPhilip Paeps 		put_next = mbuf->m_nextpkt;
188e948693eSPhilip Paeps 		mbuf->m_nextpkt = get_next;
189e948693eSPhilip Paeps 		get_next = mbuf;
190e948693eSPhilip Paeps 		mbuf = put_next;
191e948693eSPhilip Paeps 
192e948693eSPhilip Paeps 		count++;
193e948693eSPhilip Paeps 	} while (mbuf != NULL);
194e948693eSPhilip Paeps 
195e948693eSPhilip Paeps 	/* Append the reversed put list to the get list. */
196e948693eSPhilip Paeps 	KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
197e948693eSPhilip Paeps 	*stdp->std_getp = get_next;
198e948693eSPhilip Paeps 	stdp->std_getp = get_tailp;
199bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count += count;
200e948693eSPhilip Paeps }
201e948693eSPhilip Paeps 
202e948693eSPhilip Paeps #endif /* SFXGE_HAVE_MQ */
203e948693eSPhilip Paeps 
204e948693eSPhilip Paeps static void
205e948693eSPhilip Paeps sfxge_tx_qreap(struct sfxge_txq *txq)
206e948693eSPhilip Paeps {
207e948693eSPhilip Paeps 	mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
208e948693eSPhilip Paeps 
209e948693eSPhilip Paeps 	txq->reaped = txq->completed;
210e948693eSPhilip Paeps }
211e948693eSPhilip Paeps 
212e948693eSPhilip Paeps static void
213e948693eSPhilip Paeps sfxge_tx_qlist_post(struct sfxge_txq *txq)
214e948693eSPhilip Paeps {
215e948693eSPhilip Paeps 	unsigned int old_added;
216e948693eSPhilip Paeps 	unsigned int level;
217e948693eSPhilip Paeps 	int rc;
218e948693eSPhilip Paeps 
219e948693eSPhilip Paeps 	mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
220e948693eSPhilip Paeps 
221e948693eSPhilip Paeps 	KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
222e948693eSPhilip Paeps 	KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
223e948693eSPhilip Paeps 		("txq->n_pend_desc too large"));
224e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
225e948693eSPhilip Paeps 
226e948693eSPhilip Paeps 	old_added = txq->added;
227e948693eSPhilip Paeps 
228e948693eSPhilip Paeps 	/* Post the fragment list. */
229e948693eSPhilip Paeps 	rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
230e948693eSPhilip Paeps 			  txq->reaped, &txq->added);
231e948693eSPhilip Paeps 	KASSERT(rc == 0, ("efx_tx_qpost() failed"));
232e948693eSPhilip Paeps 
233e948693eSPhilip Paeps 	/* If efx_tx_qpost() had to refragment, our information about
234e948693eSPhilip Paeps 	 * buffers to free may be associated with the wrong
235e948693eSPhilip Paeps 	 * descriptors.
236e948693eSPhilip Paeps 	 */
237e948693eSPhilip Paeps 	KASSERT(txq->added - old_added == txq->n_pend_desc,
238e948693eSPhilip Paeps 		("efx_tx_qpost() refragmented descriptors"));
239e948693eSPhilip Paeps 
240e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
241385b1d8eSGeorge V. Neville-Neil 	KASSERT(level <= txq->entries, ("overfilled TX queue"));
242e948693eSPhilip Paeps 
243e948693eSPhilip Paeps 	/* Clear the fragment list. */
244e948693eSPhilip Paeps 	txq->n_pend_desc = 0;
245e948693eSPhilip Paeps 
246e948693eSPhilip Paeps 	/* Have we reached the block level? */
247385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
248e948693eSPhilip Paeps 		return;
249e948693eSPhilip Paeps 
250e948693eSPhilip Paeps 	/* Reap, and check again */
251e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
252e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
253385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
254e948693eSPhilip Paeps 		return;
255e948693eSPhilip Paeps 
256e948693eSPhilip Paeps 	txq->blocked = 1;
257e948693eSPhilip Paeps 
258e948693eSPhilip Paeps 	/*
259e948693eSPhilip Paeps 	 * Avoid a race with completion interrupt handling that could leave
260e948693eSPhilip Paeps 	 * the queue blocked.
261e948693eSPhilip Paeps 	 */
262e948693eSPhilip Paeps 	mb();
263e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
264e948693eSPhilip Paeps 	level = txq->added - txq->reaped;
265385b1d8eSGeorge V. Neville-Neil 	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
266e948693eSPhilip Paeps 		mb();
267e948693eSPhilip Paeps 		txq->blocked = 0;
268e948693eSPhilip Paeps 	}
269e948693eSPhilip Paeps }
270e948693eSPhilip Paeps 
271e948693eSPhilip Paeps static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
272e948693eSPhilip Paeps {
273e948693eSPhilip Paeps 	bus_dmamap_t *used_map;
274e948693eSPhilip Paeps 	bus_dmamap_t map;
275e948693eSPhilip Paeps 	bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
276e948693eSPhilip Paeps 	unsigned int id;
277e948693eSPhilip Paeps 	struct sfxge_tx_mapping *stmp;
278e948693eSPhilip Paeps 	efx_buffer_t *desc;
279e948693eSPhilip Paeps 	int n_dma_seg;
280e948693eSPhilip Paeps 	int rc;
281e948693eSPhilip Paeps 	int i;
282e948693eSPhilip Paeps 
283e948693eSPhilip Paeps 	KASSERT(!txq->blocked, ("txq->blocked"));
284e948693eSPhilip Paeps 
285e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
286e948693eSPhilip Paeps 		prefetch_read_many(mbuf->m_data);
287e948693eSPhilip Paeps 
288e948693eSPhilip Paeps 	if (txq->init_state != SFXGE_TXQ_STARTED) {
289e948693eSPhilip Paeps 		rc = EINTR;
290e948693eSPhilip Paeps 		goto reject;
291e948693eSPhilip Paeps 	}
292e948693eSPhilip Paeps 
293e948693eSPhilip Paeps 	/* Load the packet for DMA. */
294385b1d8eSGeorge V. Neville-Neil 	id = txq->added & txq->ptr_mask;
295e948693eSPhilip Paeps 	stmp = &txq->stmp[id];
296e948693eSPhilip Paeps 	rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
297e948693eSPhilip Paeps 				     mbuf, dma_seg, &n_dma_seg, 0);
298e948693eSPhilip Paeps 	if (rc == EFBIG) {
299e948693eSPhilip Paeps 		/* Try again. */
300c6499eccSGleb Smirnoff 		struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
301e948693eSPhilip Paeps 						   SFXGE_TX_MAPPING_MAX_SEG);
302e948693eSPhilip Paeps 		if (new_mbuf == NULL)
303e948693eSPhilip Paeps 			goto reject;
304e948693eSPhilip Paeps 		++txq->collapses;
305e948693eSPhilip Paeps 		mbuf = new_mbuf;
306e948693eSPhilip Paeps 		rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
307e948693eSPhilip Paeps 					     stmp->map, mbuf,
308e948693eSPhilip Paeps 					     dma_seg, &n_dma_seg, 0);
309e948693eSPhilip Paeps 	}
310e948693eSPhilip Paeps 	if (rc != 0)
311e948693eSPhilip Paeps 		goto reject;
312e948693eSPhilip Paeps 
313e948693eSPhilip Paeps 	/* Make the packet visible to the hardware. */
314e948693eSPhilip Paeps 	bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
315e948693eSPhilip Paeps 
316e948693eSPhilip Paeps 	used_map = &stmp->map;
317e948693eSPhilip Paeps 
318e948693eSPhilip Paeps 	if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
319e948693eSPhilip Paeps 		rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
320e948693eSPhilip Paeps 		if (rc < 0)
321e948693eSPhilip Paeps 			goto reject_mapped;
322e948693eSPhilip Paeps 		stmp = &txq->stmp[rc];
323e948693eSPhilip Paeps 	} else {
324e948693eSPhilip Paeps 		/* Add the mapping to the fragment list, and set flags
325e948693eSPhilip Paeps 		 * for the buffer.
326e948693eSPhilip Paeps 		 */
327e948693eSPhilip Paeps 		i = 0;
328e948693eSPhilip Paeps 		for (;;) {
329e948693eSPhilip Paeps 			desc = &txq->pend_desc[i];
330e948693eSPhilip Paeps 			desc->eb_addr = dma_seg[i].ds_addr;
331e948693eSPhilip Paeps 			desc->eb_size = dma_seg[i].ds_len;
332e948693eSPhilip Paeps 			if (i == n_dma_seg - 1) {
333e948693eSPhilip Paeps 				desc->eb_eop = 1;
334e948693eSPhilip Paeps 				break;
335e948693eSPhilip Paeps 			}
336e948693eSPhilip Paeps 			desc->eb_eop = 0;
337e948693eSPhilip Paeps 			i++;
338e948693eSPhilip Paeps 
339e948693eSPhilip Paeps 			stmp->flags = 0;
340e948693eSPhilip Paeps 			if (__predict_false(stmp ==
341385b1d8eSGeorge V. Neville-Neil 					    &txq->stmp[txq->ptr_mask]))
342e948693eSPhilip Paeps 				stmp = &txq->stmp[0];
343e948693eSPhilip Paeps 			else
344e948693eSPhilip Paeps 				stmp++;
345e948693eSPhilip Paeps 		}
346e948693eSPhilip Paeps 		txq->n_pend_desc = n_dma_seg;
347e948693eSPhilip Paeps 	}
348e948693eSPhilip Paeps 
349e948693eSPhilip Paeps 	/*
350e948693eSPhilip Paeps 	 * If the mapping required more than one descriptor
351e948693eSPhilip Paeps 	 * then we need to associate the DMA map with the last
352e948693eSPhilip Paeps 	 * descriptor, not the first.
353e948693eSPhilip Paeps 	 */
354e948693eSPhilip Paeps 	if (used_map != &stmp->map) {
355e948693eSPhilip Paeps 		map = stmp->map;
356e948693eSPhilip Paeps 		stmp->map = *used_map;
357e948693eSPhilip Paeps 		*used_map = map;
358e948693eSPhilip Paeps 	}
359e948693eSPhilip Paeps 
360e948693eSPhilip Paeps 	stmp->u.mbuf = mbuf;
361e948693eSPhilip Paeps 	stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
362e948693eSPhilip Paeps 
363e948693eSPhilip Paeps 	/* Post the fragment list. */
364e948693eSPhilip Paeps 	sfxge_tx_qlist_post(txq);
365e948693eSPhilip Paeps 
366b7b0edd1SGeorge V. Neville-Neil 	return (0);
367e948693eSPhilip Paeps 
368e948693eSPhilip Paeps reject_mapped:
369e948693eSPhilip Paeps 	bus_dmamap_unload(txq->packet_dma_tag, *used_map);
370e948693eSPhilip Paeps reject:
371e948693eSPhilip Paeps 	/* Drop the packet on the floor. */
372e948693eSPhilip Paeps 	m_freem(mbuf);
373e948693eSPhilip Paeps 	++txq->drops;
374e948693eSPhilip Paeps 
375b7b0edd1SGeorge V. Neville-Neil 	return (rc);
376e948693eSPhilip Paeps }
377e948693eSPhilip Paeps 
378e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
379e948693eSPhilip Paeps 
380e948693eSPhilip Paeps /*
381e948693eSPhilip Paeps  * Drain the deferred packet list into the transmit queue.
382e948693eSPhilip Paeps  */
383e948693eSPhilip Paeps static void
384e948693eSPhilip Paeps sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
385e948693eSPhilip Paeps {
386e948693eSPhilip Paeps 	struct sfxge_softc *sc;
387e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
388e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
389e948693eSPhilip Paeps 	unsigned int count;
390e948693eSPhilip Paeps 	unsigned int pushed;
391e948693eSPhilip Paeps 	int rc;
392e948693eSPhilip Paeps 
393e948693eSPhilip Paeps 	mtx_assert(&txq->lock, MA_OWNED);
394e948693eSPhilip Paeps 
395e948693eSPhilip Paeps 	sc = txq->sc;
396e948693eSPhilip Paeps 	stdp = &txq->dpl;
397e948693eSPhilip Paeps 	pushed = txq->added;
398e948693eSPhilip Paeps 
399e948693eSPhilip Paeps 	prefetch_read_many(sc->enp);
400e948693eSPhilip Paeps 	prefetch_read_many(txq->common);
401e948693eSPhilip Paeps 
402e948693eSPhilip Paeps 	mbuf = stdp->std_get;
403bc85c897SGeorge V. Neville-Neil 	count = stdp->std_get_count;
404e948693eSPhilip Paeps 
405e948693eSPhilip Paeps 	while (count != 0) {
406e948693eSPhilip Paeps 		KASSERT(mbuf != NULL, ("mbuf == NULL"));
407e948693eSPhilip Paeps 
408e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
409e948693eSPhilip Paeps 		mbuf->m_nextpkt = NULL;
410e948693eSPhilip Paeps 
411e948693eSPhilip Paeps 		ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
412e948693eSPhilip Paeps 
413e948693eSPhilip Paeps 		if (next != NULL)
414e948693eSPhilip Paeps 			prefetch_read_many(next);
415e948693eSPhilip Paeps 
416e948693eSPhilip Paeps 		rc = sfxge_tx_queue_mbuf(txq, mbuf);
417e948693eSPhilip Paeps 		--count;
418e948693eSPhilip Paeps 		mbuf = next;
419e948693eSPhilip Paeps 		if (rc != 0)
420e948693eSPhilip Paeps 			continue;
421e948693eSPhilip Paeps 
422e948693eSPhilip Paeps 		if (txq->blocked)
423e948693eSPhilip Paeps 			break;
424e948693eSPhilip Paeps 
425e948693eSPhilip Paeps 		/* Push the fragments to the hardware in batches. */
426e948693eSPhilip Paeps 		if (txq->added - pushed >= SFXGE_TX_BATCH) {
427e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
428e948693eSPhilip Paeps 			pushed = txq->added;
429e948693eSPhilip Paeps 		}
430e948693eSPhilip Paeps 	}
431e948693eSPhilip Paeps 
432e948693eSPhilip Paeps 	if (count == 0) {
433e948693eSPhilip Paeps 		KASSERT(mbuf == NULL, ("mbuf != NULL"));
434e948693eSPhilip Paeps 		stdp->std_get = NULL;
435bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = 0;
436e948693eSPhilip Paeps 		stdp->std_getp = &stdp->std_get;
437e948693eSPhilip Paeps 	} else {
438e948693eSPhilip Paeps 		stdp->std_get = mbuf;
439bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count = count;
440e948693eSPhilip Paeps 	}
441e948693eSPhilip Paeps 
442e948693eSPhilip Paeps 	if (txq->added != pushed)
443e948693eSPhilip Paeps 		efx_tx_qpush(txq->common, txq->added);
444e948693eSPhilip Paeps 
445bc85c897SGeorge V. Neville-Neil 	KASSERT(txq->blocked || stdp->std_get_count == 0,
446e948693eSPhilip Paeps 		("queue unblocked but count is non-zero"));
447e948693eSPhilip Paeps }
448e948693eSPhilip Paeps 
449e948693eSPhilip Paeps #define	SFXGE_TX_QDPL_PENDING(_txq)					\
450e948693eSPhilip Paeps 	((_txq)->dpl.std_put != 0)
451e948693eSPhilip Paeps 
452e948693eSPhilip Paeps /*
453e948693eSPhilip Paeps  * Service the deferred packet list.
454e948693eSPhilip Paeps  *
455e948693eSPhilip Paeps  * NOTE: drops the txq mutex!
456e948693eSPhilip Paeps  */
457e948693eSPhilip Paeps static inline void
458e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq)
459e948693eSPhilip Paeps {
460e948693eSPhilip Paeps 	mtx_assert(&txq->lock, MA_OWNED);
461e948693eSPhilip Paeps 
462e948693eSPhilip Paeps 	do {
463e948693eSPhilip Paeps 		if (SFXGE_TX_QDPL_PENDING(txq))
464e948693eSPhilip Paeps 			sfxge_tx_qdpl_swizzle(txq);
465e948693eSPhilip Paeps 
466e948693eSPhilip Paeps 		if (!txq->blocked)
467e948693eSPhilip Paeps 			sfxge_tx_qdpl_drain(txq);
468e948693eSPhilip Paeps 
469e948693eSPhilip Paeps 		mtx_unlock(&txq->lock);
470e948693eSPhilip Paeps 	} while (SFXGE_TX_QDPL_PENDING(txq) &&
471e948693eSPhilip Paeps 		 mtx_trylock(&txq->lock));
472e948693eSPhilip Paeps }
473e948693eSPhilip Paeps 
474e948693eSPhilip Paeps /*
475e948693eSPhilip Paeps  * Put a packet on the deferred packet list.
476e948693eSPhilip Paeps  *
477e948693eSPhilip Paeps  * If we are called with the txq lock held, we put the packet on the "get
478e948693eSPhilip Paeps  * list", otherwise we atomically push it on the "put list".  The swizzle
479e948693eSPhilip Paeps  * function takes care of ordering.
480e948693eSPhilip Paeps  *
481e948693eSPhilip Paeps  * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED.  We
482e948693eSPhilip Paeps  * overload the csum_data field in the mbuf to keep track of this length
483e948693eSPhilip Paeps  * because there is no cheap alternative to avoid races.
484e948693eSPhilip Paeps  */
485e948693eSPhilip Paeps static inline int
486e948693eSPhilip Paeps sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
487e948693eSPhilip Paeps {
488e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
489e948693eSPhilip Paeps 
490e948693eSPhilip Paeps 	stdp = &txq->dpl;
491e948693eSPhilip Paeps 
492e948693eSPhilip Paeps 	KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
493e948693eSPhilip Paeps 
494e948693eSPhilip Paeps 	if (locked) {
495e948693eSPhilip Paeps 		mtx_assert(&txq->lock, MA_OWNED);
496e948693eSPhilip Paeps 
497e948693eSPhilip Paeps 		sfxge_tx_qdpl_swizzle(txq);
498e948693eSPhilip Paeps 
499060a95efSGeorge V. Neville-Neil 		if (stdp->std_get_count >= stdp->std_get_max)
500c1974e29SGleb Smirnoff 			return (ENOBUFS);
501c1974e29SGleb Smirnoff 
502e948693eSPhilip Paeps 		*(stdp->std_getp) = mbuf;
503e948693eSPhilip Paeps 		stdp->std_getp = &mbuf->m_nextpkt;
504bc85c897SGeorge V. Neville-Neil 		stdp->std_get_count++;
505e948693eSPhilip Paeps 	} else {
506e948693eSPhilip Paeps 		volatile uintptr_t *putp;
507e948693eSPhilip Paeps 		uintptr_t old;
508e948693eSPhilip Paeps 		uintptr_t new;
509e948693eSPhilip Paeps 		unsigned old_len;
510e948693eSPhilip Paeps 
511e948693eSPhilip Paeps 		putp = &stdp->std_put;
512e948693eSPhilip Paeps 		new = (uintptr_t)mbuf;
513e948693eSPhilip Paeps 
514e948693eSPhilip Paeps 		do {
515e948693eSPhilip Paeps 			old = *putp;
516b7b0edd1SGeorge V. Neville-Neil 			if (old != 0) {
517e948693eSPhilip Paeps 				struct mbuf *mp = (struct mbuf *)old;
518e948693eSPhilip Paeps 				old_len = mp->m_pkthdr.csum_data;
519e948693eSPhilip Paeps 			} else
520e948693eSPhilip Paeps 				old_len = 0;
521060a95efSGeorge V. Neville-Neil 			if (old_len >= stdp->std_put_max)
522c1974e29SGleb Smirnoff 				return (ENOBUFS);
523e948693eSPhilip Paeps 			mbuf->m_pkthdr.csum_data = old_len + 1;
524e948693eSPhilip Paeps 			mbuf->m_nextpkt = (void *)old;
525fb8ccc78SMarius Strobl 		} while (atomic_cmpset_ptr(putp, old, new) == 0);
526e948693eSPhilip Paeps 	}
527e948693eSPhilip Paeps 
528e948693eSPhilip Paeps 	return (0);
529e948693eSPhilip Paeps }
530e948693eSPhilip Paeps 
531e948693eSPhilip Paeps /*
532e948693eSPhilip Paeps  * Called from if_transmit - will try to grab the txq lock and enqueue to the
533e948693eSPhilip Paeps  * put list if it succeeds, otherwise will push onto the defer list.
534e948693eSPhilip Paeps  */
535e948693eSPhilip Paeps int
536e948693eSPhilip Paeps sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
537e948693eSPhilip Paeps {
538e948693eSPhilip Paeps 	int locked;
539e948693eSPhilip Paeps 	int rc;
540e948693eSPhilip Paeps 
541d7ac87d3SGleb Smirnoff 	if (!SFXGE_LINK_UP(txq->sc)) {
542d7ac87d3SGleb Smirnoff 		rc = ENETDOWN;
543d7ac87d3SGleb Smirnoff 		goto fail;
544d7ac87d3SGleb Smirnoff 	}
545d7ac87d3SGleb Smirnoff 
546e948693eSPhilip Paeps 	/*
547e948693eSPhilip Paeps 	 * Try to grab the txq lock.  If we are able to get the lock,
548e948693eSPhilip Paeps 	 * the packet will be appended to the "get list" of the deferred
549e948693eSPhilip Paeps 	 * packet list.  Otherwise, it will be pushed on the "put list".
550e948693eSPhilip Paeps 	 */
551e948693eSPhilip Paeps 	locked = mtx_trylock(&txq->lock);
552e948693eSPhilip Paeps 
553e948693eSPhilip Paeps 	if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
554c1974e29SGleb Smirnoff 		if (locked)
555c1974e29SGleb Smirnoff 			mtx_unlock(&txq->lock);
556e948693eSPhilip Paeps 		rc = ENOBUFS;
557e948693eSPhilip Paeps 		goto fail;
558e948693eSPhilip Paeps 	}
559e948693eSPhilip Paeps 
560e948693eSPhilip Paeps 	/*
561e948693eSPhilip Paeps 	 * Try to grab the lock again.
562e948693eSPhilip Paeps 	 *
563e948693eSPhilip Paeps 	 * If we are able to get the lock, we need to process the deferred
564e948693eSPhilip Paeps 	 * packet list.  If we are not able to get the lock, another thread
565e948693eSPhilip Paeps 	 * is processing the list.
566e948693eSPhilip Paeps 	 */
567e948693eSPhilip Paeps 	if (!locked)
568e948693eSPhilip Paeps 		locked = mtx_trylock(&txq->lock);
569e948693eSPhilip Paeps 
570e948693eSPhilip Paeps 	if (locked) {
571e948693eSPhilip Paeps 		/* Try to service the list. */
572e948693eSPhilip Paeps 		sfxge_tx_qdpl_service(txq);
573e948693eSPhilip Paeps 		/* Lock has been dropped. */
574e948693eSPhilip Paeps 	}
575e948693eSPhilip Paeps 
576e948693eSPhilip Paeps 	return (0);
577e948693eSPhilip Paeps 
578e948693eSPhilip Paeps fail:
57910d0bdcaSGeorge V. Neville-Neil 	m_freem(m);
580d7ac87d3SGleb Smirnoff 	atomic_add_long(&txq->early_drops, 1);
581e948693eSPhilip Paeps 	return (rc);
582e948693eSPhilip Paeps }
583e948693eSPhilip Paeps 
584e948693eSPhilip Paeps static void
585e948693eSPhilip Paeps sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
586e948693eSPhilip Paeps {
587e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp = &txq->dpl;
588e948693eSPhilip Paeps 	struct mbuf *mbuf, *next;
589e948693eSPhilip Paeps 
590e948693eSPhilip Paeps 	mtx_lock(&txq->lock);
591e948693eSPhilip Paeps 
592e948693eSPhilip Paeps 	sfxge_tx_qdpl_swizzle(txq);
593e948693eSPhilip Paeps 	for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
594e948693eSPhilip Paeps 		next = mbuf->m_nextpkt;
595e948693eSPhilip Paeps 		m_freem(mbuf);
596e948693eSPhilip Paeps 	}
597e948693eSPhilip Paeps 	stdp->std_get = NULL;
598bc85c897SGeorge V. Neville-Neil 	stdp->std_get_count = 0;
599e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
600e948693eSPhilip Paeps 
601e948693eSPhilip Paeps 	mtx_unlock(&txq->lock);
602e948693eSPhilip Paeps }
603e948693eSPhilip Paeps 
604e948693eSPhilip Paeps void
605e948693eSPhilip Paeps sfxge_if_qflush(struct ifnet *ifp)
606e948693eSPhilip Paeps {
607e948693eSPhilip Paeps 	struct sfxge_softc *sc;
608e948693eSPhilip Paeps 	int i;
609e948693eSPhilip Paeps 
610e948693eSPhilip Paeps 	sc = ifp->if_softc;
611e948693eSPhilip Paeps 
612e948693eSPhilip Paeps 	for (i = 0; i < SFXGE_TX_SCALE(sc); i++)
613e948693eSPhilip Paeps 		sfxge_tx_qdpl_flush(sc->txq[i]);
614e948693eSPhilip Paeps }
615e948693eSPhilip Paeps 
616e948693eSPhilip Paeps /*
617e948693eSPhilip Paeps  * TX start -- called by the stack.
618e948693eSPhilip Paeps  */
619e948693eSPhilip Paeps int
620e948693eSPhilip Paeps sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
621e948693eSPhilip Paeps {
622e948693eSPhilip Paeps 	struct sfxge_softc *sc;
623e948693eSPhilip Paeps 	struct sfxge_txq *txq;
624e948693eSPhilip Paeps 	int rc;
625e948693eSPhilip Paeps 
626e948693eSPhilip Paeps 	sc = (struct sfxge_softc *)ifp->if_softc;
627e948693eSPhilip Paeps 
628e948693eSPhilip Paeps 	KASSERT(ifp->if_flags & IFF_UP, ("interface not up"));
629e948693eSPhilip Paeps 
630e948693eSPhilip Paeps 	/* Pick the desired transmit queue. */
631e948693eSPhilip Paeps 	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
632e948693eSPhilip Paeps 		int index = 0;
633e948693eSPhilip Paeps 
634c2529042SHans Petter Selasky 		/* check if flowid is set */
635c2529042SHans Petter Selasky 		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
636e948693eSPhilip Paeps 			uint32_t hash = m->m_pkthdr.flowid;
637e948693eSPhilip Paeps 
638e948693eSPhilip Paeps 			index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
639e948693eSPhilip Paeps 		}
640e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
641e948693eSPhilip Paeps 	} else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
642e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
643e948693eSPhilip Paeps 	} else {
644e948693eSPhilip Paeps 		txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
645e948693eSPhilip Paeps 	}
646e948693eSPhilip Paeps 
647e948693eSPhilip Paeps 	rc = sfxge_tx_packet_add(txq, m);
648e948693eSPhilip Paeps 
649e948693eSPhilip Paeps 	return (rc);
650e948693eSPhilip Paeps }
651e948693eSPhilip Paeps 
652e948693eSPhilip Paeps #else /* !SFXGE_HAVE_MQ */
653e948693eSPhilip Paeps 
654e948693eSPhilip Paeps static void sfxge_if_start_locked(struct ifnet *ifp)
655e948693eSPhilip Paeps {
656e948693eSPhilip Paeps 	struct sfxge_softc *sc = ifp->if_softc;
657e948693eSPhilip Paeps 	struct sfxge_txq *txq;
658e948693eSPhilip Paeps 	struct mbuf *mbuf;
659e948693eSPhilip Paeps 	unsigned int pushed[SFXGE_TXQ_NTYPES];
660e948693eSPhilip Paeps 	unsigned int q_index;
661e948693eSPhilip Paeps 
662e948693eSPhilip Paeps 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
663e948693eSPhilip Paeps 	    IFF_DRV_RUNNING)
664e948693eSPhilip Paeps 		return;
665e948693eSPhilip Paeps 
666e948693eSPhilip Paeps 	if (!sc->port.link_up)
667e948693eSPhilip Paeps 		return;
668e948693eSPhilip Paeps 
669e948693eSPhilip Paeps 	for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
670e948693eSPhilip Paeps 		txq = sc->txq[q_index];
671e948693eSPhilip Paeps 		pushed[q_index] = txq->added;
672e948693eSPhilip Paeps 	}
673e948693eSPhilip Paeps 
674e948693eSPhilip Paeps 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
675e948693eSPhilip Paeps 		IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
676e948693eSPhilip Paeps 		if (mbuf == NULL)
677e948693eSPhilip Paeps 			break;
678e948693eSPhilip Paeps 
679e948693eSPhilip Paeps 		ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */
680e948693eSPhilip Paeps 
681e948693eSPhilip Paeps 		/* Pick the desired transmit queue. */
682e948693eSPhilip Paeps 		if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO))
683e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM;
684e948693eSPhilip Paeps 		else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP)
685e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_IP_CKSUM;
686e948693eSPhilip Paeps 		else
687e948693eSPhilip Paeps 			q_index = SFXGE_TXQ_NON_CKSUM;
688e948693eSPhilip Paeps 		txq = sc->txq[q_index];
689e948693eSPhilip Paeps 
690e948693eSPhilip Paeps 		if (sfxge_tx_queue_mbuf(txq, mbuf) != 0)
691e948693eSPhilip Paeps 			continue;
692e948693eSPhilip Paeps 
693e948693eSPhilip Paeps 		if (txq->blocked) {
694e948693eSPhilip Paeps 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
695e948693eSPhilip Paeps 			break;
696e948693eSPhilip Paeps 		}
697e948693eSPhilip Paeps 
698e948693eSPhilip Paeps 		/* Push the fragments to the hardware in batches. */
699e948693eSPhilip Paeps 		if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) {
700e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
701e948693eSPhilip Paeps 			pushed[q_index] = txq->added;
702e948693eSPhilip Paeps 		}
703e948693eSPhilip Paeps 	}
704e948693eSPhilip Paeps 
705e948693eSPhilip Paeps 	for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
706e948693eSPhilip Paeps 		txq = sc->txq[q_index];
707e948693eSPhilip Paeps 		if (txq->added != pushed[q_index])
708e948693eSPhilip Paeps 			efx_tx_qpush(txq->common, txq->added);
709e948693eSPhilip Paeps 	}
710e948693eSPhilip Paeps }
711e948693eSPhilip Paeps 
712e948693eSPhilip Paeps void sfxge_if_start(struct ifnet *ifp)
713e948693eSPhilip Paeps {
714e948693eSPhilip Paeps 	struct sfxge_softc *sc = ifp->if_softc;
715e948693eSPhilip Paeps 
716e948693eSPhilip Paeps 	mtx_lock(&sc->tx_lock);
717e948693eSPhilip Paeps 	sfxge_if_start_locked(ifp);
718e948693eSPhilip Paeps 	mtx_unlock(&sc->tx_lock);
719e948693eSPhilip Paeps }
720e948693eSPhilip Paeps 
721e948693eSPhilip Paeps static inline void
722e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq)
723e948693eSPhilip Paeps {
724e948693eSPhilip Paeps 	struct sfxge_softc *sc = txq->sc;
725e948693eSPhilip Paeps 	struct ifnet *ifp = sc->ifnet;
726e948693eSPhilip Paeps 
727e948693eSPhilip Paeps 	mtx_assert(&sc->tx_lock, MA_OWNED);
728e948693eSPhilip Paeps 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
729e948693eSPhilip Paeps 	sfxge_if_start_locked(ifp);
730e948693eSPhilip Paeps 	mtx_unlock(&sc->tx_lock);
731e948693eSPhilip Paeps }
732e948693eSPhilip Paeps 
733e948693eSPhilip Paeps #endif /* SFXGE_HAVE_MQ */
734e948693eSPhilip Paeps 
735e948693eSPhilip Paeps /*
736e948693eSPhilip Paeps  * Software "TSO".  Not quite as good as doing it in hardware, but
737e948693eSPhilip Paeps  * still faster than segmenting in the stack.
738e948693eSPhilip Paeps  */
739e948693eSPhilip Paeps 
740e948693eSPhilip Paeps struct sfxge_tso_state {
741e948693eSPhilip Paeps 	/* Output position */
742e948693eSPhilip Paeps 	unsigned out_len;	/* Remaining length in current segment */
743e948693eSPhilip Paeps 	unsigned seqnum;	/* Current sequence number */
744e948693eSPhilip Paeps 	unsigned packet_space;	/* Remaining space in current packet */
745e948693eSPhilip Paeps 
746e948693eSPhilip Paeps 	/* Input position */
747e948693eSPhilip Paeps 	unsigned dma_seg_i;	/* Current DMA segment number */
748e948693eSPhilip Paeps 	uint64_t dma_addr;	/* DMA address of current position */
749e948693eSPhilip Paeps 	unsigned in_len;	/* Remaining length in current mbuf */
750e948693eSPhilip Paeps 
751e948693eSPhilip Paeps 	const struct mbuf *mbuf; /* Input mbuf (head of chain) */
752e948693eSPhilip Paeps 	u_short protocol;	/* Network protocol (after VLAN decap) */
753e948693eSPhilip Paeps 	ssize_t nh_off;		/* Offset of network header */
754e948693eSPhilip Paeps 	ssize_t tcph_off;	/* Offset of TCP header */
755e948693eSPhilip Paeps 	unsigned header_len;	/* Number of bytes of header */
756e948693eSPhilip Paeps 	int full_packet_size;	/* Number of bytes to put in each outgoing
757e948693eSPhilip Paeps 				 * segment */
758e948693eSPhilip Paeps };
759e948693eSPhilip Paeps 
760e948693eSPhilip Paeps static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso)
761e948693eSPhilip Paeps {
762e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IP),
763e948693eSPhilip Paeps 		("tso_iph() in non-IPv4 state"));
764e948693eSPhilip Paeps 	return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
765e948693eSPhilip Paeps }
766e948693eSPhilip Paeps static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
767e948693eSPhilip Paeps {
768e948693eSPhilip Paeps 	KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
769e948693eSPhilip Paeps 		("tso_ip6h() in non-IPv6 state"));
770e948693eSPhilip Paeps 	return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
771e948693eSPhilip Paeps }
772e948693eSPhilip Paeps static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
773e948693eSPhilip Paeps {
774e948693eSPhilip Paeps 	return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
775e948693eSPhilip Paeps }
776e948693eSPhilip Paeps 
777e948693eSPhilip Paeps /* Size of preallocated TSO header buffers.  Larger blocks must be
778e948693eSPhilip Paeps  * allocated from the heap.
779e948693eSPhilip Paeps  */
780e948693eSPhilip Paeps #define	TSOH_STD_SIZE	128
781e948693eSPhilip Paeps 
782e948693eSPhilip Paeps /* At most half the descriptors in the queue at any time will refer to
783e948693eSPhilip Paeps  * a TSO header buffer, since they must always be followed by a
784e948693eSPhilip Paeps  * payload descriptor referring to an mbuf.
785e948693eSPhilip Paeps  */
786385b1d8eSGeorge V. Neville-Neil #define	TSOH_COUNT(_txq_entries)	((_txq_entries) / 2u)
787e948693eSPhilip Paeps #define	TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
788385b1d8eSGeorge V. Neville-Neil #define	TSOH_PAGE_COUNT(_txq_entries)	\
789385b1d8eSGeorge V. Neville-Neil 	((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
790e948693eSPhilip Paeps 
791e948693eSPhilip Paeps static int tso_init(struct sfxge_txq *txq)
792e948693eSPhilip Paeps {
793e948693eSPhilip Paeps 	struct sfxge_softc *sc = txq->sc;
794385b1d8eSGeorge V. Neville-Neil 	unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
795e948693eSPhilip Paeps 	int i, rc;
796e948693eSPhilip Paeps 
797e948693eSPhilip Paeps 	/* Allocate TSO header buffers */
798385b1d8eSGeorge V. Neville-Neil 	txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
799e948693eSPhilip Paeps 				  M_SFXGE, M_WAITOK);
800e948693eSPhilip Paeps 
801385b1d8eSGeorge V. Neville-Neil 	for (i = 0; i < tsoh_page_count; i++) {
802e948693eSPhilip Paeps 		rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
803b7b0edd1SGeorge V. Neville-Neil 		if (rc != 0)
804e948693eSPhilip Paeps 			goto fail;
805e948693eSPhilip Paeps 	}
806e948693eSPhilip Paeps 
807b7b0edd1SGeorge V. Neville-Neil 	return (0);
808e948693eSPhilip Paeps 
809e948693eSPhilip Paeps fail:
810e948693eSPhilip Paeps 	while (i-- > 0)
811e948693eSPhilip Paeps 		sfxge_dma_free(&txq->tsoh_buffer[i]);
812e948693eSPhilip Paeps 	free(txq->tsoh_buffer, M_SFXGE);
813e948693eSPhilip Paeps 	txq->tsoh_buffer = NULL;
814b7b0edd1SGeorge V. Neville-Neil 	return (rc);
815e948693eSPhilip Paeps }
816e948693eSPhilip Paeps 
817e948693eSPhilip Paeps static void tso_fini(struct sfxge_txq *txq)
818e948693eSPhilip Paeps {
819e948693eSPhilip Paeps 	int i;
820e948693eSPhilip Paeps 
821b7b0edd1SGeorge V. Neville-Neil 	if (txq->tsoh_buffer != NULL) {
822385b1d8eSGeorge V. Neville-Neil 		for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
823e948693eSPhilip Paeps 			sfxge_dma_free(&txq->tsoh_buffer[i]);
824e948693eSPhilip Paeps 		free(txq->tsoh_buffer, M_SFXGE);
825e948693eSPhilip Paeps 	}
826e948693eSPhilip Paeps }
827e948693eSPhilip Paeps 
828e948693eSPhilip Paeps static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
829e948693eSPhilip Paeps {
830e948693eSPhilip Paeps 	struct ether_header *eh = mtod(mbuf, struct ether_header *);
831e948693eSPhilip Paeps 
832e948693eSPhilip Paeps 	tso->mbuf = mbuf;
833e948693eSPhilip Paeps 
834e948693eSPhilip Paeps 	/* Find network protocol and header */
835e948693eSPhilip Paeps 	tso->protocol = eh->ether_type;
836e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_VLAN)) {
837e948693eSPhilip Paeps 		struct ether_vlan_header *veh =
838e948693eSPhilip Paeps 			mtod(mbuf, struct ether_vlan_header *);
839e948693eSPhilip Paeps 		tso->protocol = veh->evl_proto;
840e948693eSPhilip Paeps 		tso->nh_off = sizeof(*veh);
841e948693eSPhilip Paeps 	} else {
842e948693eSPhilip Paeps 		tso->nh_off = sizeof(*eh);
843e948693eSPhilip Paeps 	}
844e948693eSPhilip Paeps 
845e948693eSPhilip Paeps 	/* Find TCP header */
846e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_IP)) {
847e948693eSPhilip Paeps 		KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
848e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
849e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
850e948693eSPhilip Paeps 	} else {
851e948693eSPhilip Paeps 		KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
852e948693eSPhilip Paeps 			("TSO required on non-IP packet"));
853e948693eSPhilip Paeps 		KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
854e948693eSPhilip Paeps 			("TSO required on non-TCP packet"));
855e948693eSPhilip Paeps 		tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
856e948693eSPhilip Paeps 	}
857e948693eSPhilip Paeps 
858e948693eSPhilip Paeps 	/* We assume all headers are linear in the head mbuf */
859e948693eSPhilip Paeps 	tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off;
860e948693eSPhilip Paeps 	KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented"));
861e948693eSPhilip Paeps 	tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz;
862e948693eSPhilip Paeps 
863e948693eSPhilip Paeps 	tso->seqnum = ntohl(tso_tcph(tso)->th_seq);
864e948693eSPhilip Paeps 
865e948693eSPhilip Paeps 	/* These flags must not be duplicated */
866e948693eSPhilip Paeps 	KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)),
867e948693eSPhilip Paeps 		("incompatible TCP flag on TSO packet"));
868e948693eSPhilip Paeps 
869e948693eSPhilip Paeps 	tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
870e948693eSPhilip Paeps }
871e948693eSPhilip Paeps 
872e948693eSPhilip Paeps /*
873e948693eSPhilip Paeps  * tso_fill_packet_with_fragment - form descriptors for the current fragment
874e948693eSPhilip Paeps  *
875e948693eSPhilip Paeps  * Form descriptors for the current fragment, until we reach the end
876e948693eSPhilip Paeps  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
877e948693eSPhilip Paeps  * space.
878e948693eSPhilip Paeps  */
879e948693eSPhilip Paeps static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
880e948693eSPhilip Paeps 					  struct sfxge_tso_state *tso)
881e948693eSPhilip Paeps {
882e948693eSPhilip Paeps 	efx_buffer_t *desc;
883e948693eSPhilip Paeps 	int n;
884e948693eSPhilip Paeps 
885e948693eSPhilip Paeps 	if (tso->in_len == 0 || tso->packet_space == 0)
886e948693eSPhilip Paeps 		return;
887e948693eSPhilip Paeps 
888e948693eSPhilip Paeps 	KASSERT(tso->in_len > 0, ("TSO input length went negative"));
889e948693eSPhilip Paeps 	KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
890e948693eSPhilip Paeps 
891e948693eSPhilip Paeps 	n = min(tso->in_len, tso->packet_space);
892e948693eSPhilip Paeps 
893e948693eSPhilip Paeps 	tso->packet_space -= n;
894e948693eSPhilip Paeps 	tso->out_len -= n;
895e948693eSPhilip Paeps 	tso->in_len -= n;
896e948693eSPhilip Paeps 
897e948693eSPhilip Paeps 	desc = &txq->pend_desc[txq->n_pend_desc++];
898e948693eSPhilip Paeps 	desc->eb_addr = tso->dma_addr;
899e948693eSPhilip Paeps 	desc->eb_size = n;
900e948693eSPhilip Paeps 	desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
901e948693eSPhilip Paeps 
902e948693eSPhilip Paeps 	tso->dma_addr += n;
903e948693eSPhilip Paeps }
904e948693eSPhilip Paeps 
905e948693eSPhilip Paeps /* Callback from bus_dmamap_load() for long TSO headers. */
906e948693eSPhilip Paeps static void tso_map_long_header(void *dma_addr_ret,
907e948693eSPhilip Paeps 				bus_dma_segment_t *segs, int nseg,
908e948693eSPhilip Paeps 				int error)
909e948693eSPhilip Paeps {
910e948693eSPhilip Paeps 	*(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
911e948693eSPhilip Paeps 				      __predict_true(nseg == 1)) ?
912e948693eSPhilip Paeps 				     segs->ds_addr : 0);
913e948693eSPhilip Paeps }
914e948693eSPhilip Paeps 
915e948693eSPhilip Paeps /*
916e948693eSPhilip Paeps  * tso_start_new_packet - generate a new header and prepare for the new packet
917e948693eSPhilip Paeps  *
918e948693eSPhilip Paeps  * Generate a new header and prepare for the new packet.  Return 0 on
919e948693eSPhilip Paeps  * success, or an error code if failed to alloc header.
920e948693eSPhilip Paeps  */
921e948693eSPhilip Paeps static int tso_start_new_packet(struct sfxge_txq *txq,
922e948693eSPhilip Paeps 				struct sfxge_tso_state *tso,
923e948693eSPhilip Paeps 				unsigned int id)
924e948693eSPhilip Paeps {
925e948693eSPhilip Paeps 	struct sfxge_tx_mapping *stmp = &txq->stmp[id];
926e948693eSPhilip Paeps 	struct tcphdr *tsoh_th;
927e948693eSPhilip Paeps 	unsigned ip_length;
928e948693eSPhilip Paeps 	caddr_t header;
929e948693eSPhilip Paeps 	uint64_t dma_addr;
930e948693eSPhilip Paeps 	bus_dmamap_t map;
931e948693eSPhilip Paeps 	efx_buffer_t *desc;
932e948693eSPhilip Paeps 	int rc;
933e948693eSPhilip Paeps 
934e948693eSPhilip Paeps 	/* Allocate a DMA-mapped header buffer. */
935e948693eSPhilip Paeps 	if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
936e948693eSPhilip Paeps 		unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
937e948693eSPhilip Paeps 		unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
938e948693eSPhilip Paeps 
939e948693eSPhilip Paeps 		header = (txq->tsoh_buffer[page_index].esm_base +
940e948693eSPhilip Paeps 			  buf_index * TSOH_STD_SIZE);
941e948693eSPhilip Paeps 		dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
942e948693eSPhilip Paeps 			    buf_index * TSOH_STD_SIZE);
943e948693eSPhilip Paeps 		map = txq->tsoh_buffer[page_index].esm_map;
944e948693eSPhilip Paeps 
945e948693eSPhilip Paeps 		stmp->flags = 0;
946e948693eSPhilip Paeps 	} else {
947e948693eSPhilip Paeps 		/* We cannot use bus_dmamem_alloc() as that may sleep */
948e948693eSPhilip Paeps 		header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
949e948693eSPhilip Paeps 		if (__predict_false(!header))
950b7b0edd1SGeorge V. Neville-Neil 			return (ENOMEM);
951e948693eSPhilip Paeps 		rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
952e948693eSPhilip Paeps 				     header, tso->header_len,
953e948693eSPhilip Paeps 				     tso_map_long_header, &dma_addr,
954e948693eSPhilip Paeps 				     BUS_DMA_NOWAIT);
955e948693eSPhilip Paeps 		if (__predict_false(dma_addr == 0)) {
956e948693eSPhilip Paeps 			if (rc == 0) {
957e948693eSPhilip Paeps 				/* Succeeded but got >1 segment */
958e948693eSPhilip Paeps 				bus_dmamap_unload(txq->packet_dma_tag,
959e948693eSPhilip Paeps 						  stmp->map);
960e948693eSPhilip Paeps 				rc = EINVAL;
961e948693eSPhilip Paeps 			}
962e948693eSPhilip Paeps 			free(header, M_SFXGE);
963b7b0edd1SGeorge V. Neville-Neil 			return (rc);
964e948693eSPhilip Paeps 		}
965e948693eSPhilip Paeps 		map = stmp->map;
966e948693eSPhilip Paeps 
967e948693eSPhilip Paeps 		txq->tso_long_headers++;
968e948693eSPhilip Paeps 		stmp->u.heap_buf = header;
969e948693eSPhilip Paeps 		stmp->flags = TX_BUF_UNMAP;
970e948693eSPhilip Paeps 	}
971e948693eSPhilip Paeps 
972e948693eSPhilip Paeps 	tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
973e948693eSPhilip Paeps 
974e948693eSPhilip Paeps 	/* Copy and update the headers. */
975e948693eSPhilip Paeps 	memcpy(header, tso->mbuf->m_data, tso->header_len);
976e948693eSPhilip Paeps 
977e948693eSPhilip Paeps 	tsoh_th->th_seq = htonl(tso->seqnum);
978e948693eSPhilip Paeps 	tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz;
979e948693eSPhilip Paeps 	if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) {
980e948693eSPhilip Paeps 		/* This packet will not finish the TSO burst. */
981e948693eSPhilip Paeps 		ip_length = tso->full_packet_size - tso->nh_off;
982e948693eSPhilip Paeps 		tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
983e948693eSPhilip Paeps 	} else {
984e948693eSPhilip Paeps 		/* This packet will be the last in the TSO burst. */
985e948693eSPhilip Paeps 		ip_length = tso->header_len - tso->nh_off + tso->out_len;
986e948693eSPhilip Paeps 	}
987e948693eSPhilip Paeps 
988e948693eSPhilip Paeps 	if (tso->protocol == htons(ETHERTYPE_IP)) {
989e948693eSPhilip Paeps 		struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
990e948693eSPhilip Paeps 		tsoh_iph->ip_len = htons(ip_length);
991e948693eSPhilip Paeps 		/* XXX We should increment ip_id, but FreeBSD doesn't
992e948693eSPhilip Paeps 		 * currently allocate extra IDs for multiple segments.
993e948693eSPhilip Paeps 		 */
994e948693eSPhilip Paeps 	} else {
995e948693eSPhilip Paeps 		struct ip6_hdr *tsoh_iph =
996e948693eSPhilip Paeps 			(struct ip6_hdr *)(header + tso->nh_off);
997e948693eSPhilip Paeps 		tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
998e948693eSPhilip Paeps 	}
999e948693eSPhilip Paeps 
1000e948693eSPhilip Paeps 	/* Make the header visible to the hardware. */
1001e948693eSPhilip Paeps 	bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1002e948693eSPhilip Paeps 
1003e948693eSPhilip Paeps 	tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz;
1004e948693eSPhilip Paeps 	txq->tso_packets++;
1005e948693eSPhilip Paeps 
1006e948693eSPhilip Paeps 	/* Form a descriptor for this header. */
1007e948693eSPhilip Paeps 	desc = &txq->pend_desc[txq->n_pend_desc++];
1008e948693eSPhilip Paeps 	desc->eb_addr = dma_addr;
1009e948693eSPhilip Paeps 	desc->eb_size = tso->header_len;
1010e948693eSPhilip Paeps 	desc->eb_eop = 0;
1011e948693eSPhilip Paeps 
1012b7b0edd1SGeorge V. Neville-Neil 	return (0);
1013e948693eSPhilip Paeps }
1014e948693eSPhilip Paeps 
1015e948693eSPhilip Paeps static int
1016e948693eSPhilip Paeps sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1017e948693eSPhilip Paeps 		   const bus_dma_segment_t *dma_seg, int n_dma_seg)
1018e948693eSPhilip Paeps {
1019e948693eSPhilip Paeps 	struct sfxge_tso_state tso;
1020e948693eSPhilip Paeps 	unsigned int id, next_id;
1021e948693eSPhilip Paeps 
1022e948693eSPhilip Paeps 	tso_start(&tso, mbuf);
1023e948693eSPhilip Paeps 
1024e948693eSPhilip Paeps 	/* Grab the first payload fragment. */
1025e948693eSPhilip Paeps 	if (dma_seg->ds_len == tso.header_len) {
1026e948693eSPhilip Paeps 		--n_dma_seg;
1027e948693eSPhilip Paeps 		KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1028e948693eSPhilip Paeps 		++dma_seg;
1029e948693eSPhilip Paeps 		tso.in_len = dma_seg->ds_len;
1030e948693eSPhilip Paeps 		tso.dma_addr = dma_seg->ds_addr;
1031e948693eSPhilip Paeps 	} else {
1032e948693eSPhilip Paeps 		tso.in_len = dma_seg->ds_len - tso.header_len;
1033e948693eSPhilip Paeps 		tso.dma_addr = dma_seg->ds_addr + tso.header_len;
1034e948693eSPhilip Paeps 	}
1035e948693eSPhilip Paeps 
1036385b1d8eSGeorge V. Neville-Neil 	id = txq->added & txq->ptr_mask;
1037e948693eSPhilip Paeps 	if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1038385b1d8eSGeorge V. Neville-Neil 		return (-1);
1039e948693eSPhilip Paeps 
1040e948693eSPhilip Paeps 	while (1) {
1041385b1d8eSGeorge V. Neville-Neil 		id = (id + 1) & txq->ptr_mask;
1042e948693eSPhilip Paeps 		tso_fill_packet_with_fragment(txq, &tso);
1043e948693eSPhilip Paeps 
1044e948693eSPhilip Paeps 		/* Move onto the next fragment? */
1045e948693eSPhilip Paeps 		if (tso.in_len == 0) {
1046e948693eSPhilip Paeps 			--n_dma_seg;
1047e948693eSPhilip Paeps 			if (n_dma_seg == 0)
1048e948693eSPhilip Paeps 				break;
1049e948693eSPhilip Paeps 			++dma_seg;
1050e948693eSPhilip Paeps 			tso.in_len = dma_seg->ds_len;
1051e948693eSPhilip Paeps 			tso.dma_addr = dma_seg->ds_addr;
1052e948693eSPhilip Paeps 		}
1053e948693eSPhilip Paeps 
1054e948693eSPhilip Paeps 		/* End of packet? */
1055e948693eSPhilip Paeps 		if (tso.packet_space == 0) {
1056e948693eSPhilip Paeps 			/* If the queue is now full due to tiny MSS,
1057e948693eSPhilip Paeps 			 * or we can't create another header, discard
1058e948693eSPhilip Paeps 			 * the remainder of the input mbuf but do not
1059e948693eSPhilip Paeps 			 * roll back the work we have done.
1060e948693eSPhilip Paeps 			 */
1061e948693eSPhilip Paeps 			if (txq->n_pend_desc >
1062e948693eSPhilip Paeps 			    SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
1063e948693eSPhilip Paeps 				break;
1064385b1d8eSGeorge V. Neville-Neil 			next_id = (id + 1) & txq->ptr_mask;
1065e948693eSPhilip Paeps 			if (__predict_false(tso_start_new_packet(txq, &tso,
1066e948693eSPhilip Paeps 								 next_id)))
1067e948693eSPhilip Paeps 				break;
1068e948693eSPhilip Paeps 			id = next_id;
1069e948693eSPhilip Paeps 		}
1070e948693eSPhilip Paeps 	}
1071e948693eSPhilip Paeps 
1072e948693eSPhilip Paeps 	txq->tso_bursts++;
1073b7b0edd1SGeorge V. Neville-Neil 	return (id);
1074e948693eSPhilip Paeps }
1075e948693eSPhilip Paeps 
1076e948693eSPhilip Paeps static void
1077e948693eSPhilip Paeps sfxge_tx_qunblock(struct sfxge_txq *txq)
1078e948693eSPhilip Paeps {
1079e948693eSPhilip Paeps 	struct sfxge_softc *sc;
1080e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1081e948693eSPhilip Paeps 
1082e948693eSPhilip Paeps 	sc = txq->sc;
1083e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1084e948693eSPhilip Paeps 
1085e948693eSPhilip Paeps 	mtx_assert(&evq->lock, MA_OWNED);
1086e948693eSPhilip Paeps 
1087e948693eSPhilip Paeps 	if (txq->init_state != SFXGE_TXQ_STARTED)
1088e948693eSPhilip Paeps 		return;
1089e948693eSPhilip Paeps 
1090e948693eSPhilip Paeps 	mtx_lock(SFXGE_TXQ_LOCK(txq));
1091e948693eSPhilip Paeps 
1092e948693eSPhilip Paeps 	if (txq->blocked) {
1093e948693eSPhilip Paeps 		unsigned int level;
1094e948693eSPhilip Paeps 
1095e948693eSPhilip Paeps 		level = txq->added - txq->completed;
1096385b1d8eSGeorge V. Neville-Neil 		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
1097e948693eSPhilip Paeps 			txq->blocked = 0;
1098e948693eSPhilip Paeps 	}
1099e948693eSPhilip Paeps 
1100e948693eSPhilip Paeps 	sfxge_tx_qdpl_service(txq);
1101e948693eSPhilip Paeps 	/* note: lock has been dropped */
1102e948693eSPhilip Paeps }
1103e948693eSPhilip Paeps 
1104e948693eSPhilip Paeps void
1105e948693eSPhilip Paeps sfxge_tx_qflush_done(struct sfxge_txq *txq)
1106e948693eSPhilip Paeps {
1107e948693eSPhilip Paeps 
1108e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_DONE;
1109e948693eSPhilip Paeps }
1110e948693eSPhilip Paeps 
1111e948693eSPhilip Paeps static void
1112e948693eSPhilip Paeps sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1113e948693eSPhilip Paeps {
1114e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1115e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1116e948693eSPhilip Paeps 	unsigned int count;
1117e948693eSPhilip Paeps 
1118e948693eSPhilip Paeps 	txq = sc->txq[index];
1119e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1120e948693eSPhilip Paeps 
1121e948693eSPhilip Paeps 	mtx_lock(SFXGE_TXQ_LOCK(txq));
1122e948693eSPhilip Paeps 
1123e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1124e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_STARTED"));
1125e948693eSPhilip Paeps 
1126e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
1127e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_PENDING;
1128e948693eSPhilip Paeps 
1129e948693eSPhilip Paeps 	/* Flush the transmit queue. */
1130e948693eSPhilip Paeps 	efx_tx_qflush(txq->common);
1131e948693eSPhilip Paeps 
1132e948693eSPhilip Paeps 	mtx_unlock(SFXGE_TXQ_LOCK(txq));
1133e948693eSPhilip Paeps 
1134e948693eSPhilip Paeps 	count = 0;
1135e948693eSPhilip Paeps 	do {
1136e948693eSPhilip Paeps 		/* Spin for 100ms. */
1137e948693eSPhilip Paeps 		DELAY(100000);
1138e948693eSPhilip Paeps 
1139e948693eSPhilip Paeps 		if (txq->flush_state != SFXGE_FLUSH_PENDING)
1140e948693eSPhilip Paeps 			break;
1141e948693eSPhilip Paeps 	} while (++count < 20);
1142e948693eSPhilip Paeps 
1143e948693eSPhilip Paeps 	mtx_lock(&evq->lock);
1144e948693eSPhilip Paeps 	mtx_lock(SFXGE_TXQ_LOCK(txq));
1145e948693eSPhilip Paeps 
1146e948693eSPhilip Paeps 	KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1147e948693eSPhilip Paeps 	    ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1148e948693eSPhilip Paeps 
1149e948693eSPhilip Paeps 	txq->flush_state = SFXGE_FLUSH_DONE;
1150e948693eSPhilip Paeps 
1151e948693eSPhilip Paeps 	txq->blocked = 0;
1152e948693eSPhilip Paeps 	txq->pending = txq->added;
1153e948693eSPhilip Paeps 
1154e948693eSPhilip Paeps 	sfxge_tx_qcomplete(txq);
1155e948693eSPhilip Paeps 	KASSERT(txq->completed == txq->added,
1156e948693eSPhilip Paeps 	    ("txq->completed != txq->added"));
1157e948693eSPhilip Paeps 
1158e948693eSPhilip Paeps 	sfxge_tx_qreap(txq);
1159e948693eSPhilip Paeps 	KASSERT(txq->reaped == txq->completed,
1160e948693eSPhilip Paeps 	    ("txq->reaped != txq->completed"));
1161e948693eSPhilip Paeps 
1162e948693eSPhilip Paeps 	txq->added = 0;
1163e948693eSPhilip Paeps 	txq->pending = 0;
1164e948693eSPhilip Paeps 	txq->completed = 0;
1165e948693eSPhilip Paeps 	txq->reaped = 0;
1166e948693eSPhilip Paeps 
1167e948693eSPhilip Paeps 	/* Destroy the common code transmit queue. */
1168e948693eSPhilip Paeps 	efx_tx_qdestroy(txq->common);
1169e948693eSPhilip Paeps 	txq->common = NULL;
1170e948693eSPhilip Paeps 
1171e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1172385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1173e948693eSPhilip Paeps 
1174e948693eSPhilip Paeps 	mtx_unlock(&evq->lock);
1175e948693eSPhilip Paeps 	mtx_unlock(SFXGE_TXQ_LOCK(txq));
1176e948693eSPhilip Paeps }
1177e948693eSPhilip Paeps 
1178e948693eSPhilip Paeps static int
1179e948693eSPhilip Paeps sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1180e948693eSPhilip Paeps {
1181e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1182e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1183e948693eSPhilip Paeps 	uint16_t flags;
1184e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1185e948693eSPhilip Paeps 	int rc;
1186e948693eSPhilip Paeps 
1187e948693eSPhilip Paeps 	txq = sc->txq[index];
1188e948693eSPhilip Paeps 	esmp = &txq->mem;
1189e948693eSPhilip Paeps 	evq = sc->evq[txq->evq_index];
1190e948693eSPhilip Paeps 
1191e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1192e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1193e948693eSPhilip Paeps 	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1194e948693eSPhilip Paeps 	    ("evq->init_state != SFXGE_EVQ_STARTED"));
1195e948693eSPhilip Paeps 
1196e948693eSPhilip Paeps 	/* Program the buffer table. */
1197e948693eSPhilip Paeps 	if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1198385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1199385b1d8eSGeorge V. Neville-Neil 		return (rc);
1200e948693eSPhilip Paeps 
1201e948693eSPhilip Paeps 	/* Determine the kind of queue we are creating. */
1202e948693eSPhilip Paeps 	switch (txq->type) {
1203e948693eSPhilip Paeps 	case SFXGE_TXQ_NON_CKSUM:
1204e948693eSPhilip Paeps 		flags = 0;
1205e948693eSPhilip Paeps 		break;
1206e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_CKSUM:
1207e948693eSPhilip Paeps 		flags = EFX_CKSUM_IPV4;
1208e948693eSPhilip Paeps 		break;
1209e948693eSPhilip Paeps 	case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1210e948693eSPhilip Paeps 		flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1211e948693eSPhilip Paeps 		break;
1212e948693eSPhilip Paeps 	default:
1213e948693eSPhilip Paeps 		KASSERT(0, ("Impossible TX queue"));
1214e948693eSPhilip Paeps 		flags = 0;
1215e948693eSPhilip Paeps 		break;
1216e948693eSPhilip Paeps 	}
1217e948693eSPhilip Paeps 
1218e948693eSPhilip Paeps 	/* Create the common code transmit queue. */
1219cf07c70dSGeorge V. Neville-Neil 	if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1220385b1d8eSGeorge V. Neville-Neil 	    sc->txq_entries, txq->buf_base_id, flags, evq->common,
1221e948693eSPhilip Paeps 	    &txq->common)) != 0)
1222e948693eSPhilip Paeps 		goto fail;
1223e948693eSPhilip Paeps 
1224e948693eSPhilip Paeps 	mtx_lock(SFXGE_TXQ_LOCK(txq));
1225e948693eSPhilip Paeps 
1226e948693eSPhilip Paeps 	/* Enable the transmit queue. */
1227e948693eSPhilip Paeps 	efx_tx_qenable(txq->common);
1228e948693eSPhilip Paeps 
1229e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_STARTED;
1230e948693eSPhilip Paeps 
1231e948693eSPhilip Paeps 	mtx_unlock(SFXGE_TXQ_LOCK(txq));
1232e948693eSPhilip Paeps 
1233e948693eSPhilip Paeps 	return (0);
1234e948693eSPhilip Paeps 
1235e948693eSPhilip Paeps fail:
1236e948693eSPhilip Paeps 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1237385b1d8eSGeorge V. Neville-Neil 	    EFX_TXQ_NBUFS(sc->txq_entries));
1238385b1d8eSGeorge V. Neville-Neil 	return (rc);
1239e948693eSPhilip Paeps }
1240e948693eSPhilip Paeps 
1241e948693eSPhilip Paeps void
1242e948693eSPhilip Paeps sfxge_tx_stop(struct sfxge_softc *sc)
1243e948693eSPhilip Paeps {
1244e948693eSPhilip Paeps 	const efx_nic_cfg_t *encp;
1245e948693eSPhilip Paeps 	int index;
1246e948693eSPhilip Paeps 
1247e948693eSPhilip Paeps 	index = SFXGE_TX_SCALE(sc);
1248e948693eSPhilip Paeps 	while (--index >= 0)
1249e948693eSPhilip Paeps 		sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1250e948693eSPhilip Paeps 
1251e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1252e948693eSPhilip Paeps 
1253e948693eSPhilip Paeps 	encp = efx_nic_cfg_get(sc->enp);
1254e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1255e948693eSPhilip Paeps 
1256e948693eSPhilip Paeps 	/* Tear down the transmit module */
1257e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1258e948693eSPhilip Paeps }
1259e948693eSPhilip Paeps 
1260e948693eSPhilip Paeps int
1261e948693eSPhilip Paeps sfxge_tx_start(struct sfxge_softc *sc)
1262e948693eSPhilip Paeps {
1263e948693eSPhilip Paeps 	int index;
1264e948693eSPhilip Paeps 	int rc;
1265e948693eSPhilip Paeps 
1266e948693eSPhilip Paeps 	/* Initialize the common code transmit module. */
1267e948693eSPhilip Paeps 	if ((rc = efx_tx_init(sc->enp)) != 0)
1268e948693eSPhilip Paeps 		return (rc);
1269e948693eSPhilip Paeps 
1270e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0)
1271e948693eSPhilip Paeps 		goto fail;
1272e948693eSPhilip Paeps 
1273e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0)
1274e948693eSPhilip Paeps 		goto fail2;
1275e948693eSPhilip Paeps 
1276e948693eSPhilip Paeps 	for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1277e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM +
1278e948693eSPhilip Paeps 		    index)) != 0)
1279e948693eSPhilip Paeps 			goto fail3;
1280e948693eSPhilip Paeps 	}
1281e948693eSPhilip Paeps 
1282e948693eSPhilip Paeps 	return (0);
1283e948693eSPhilip Paeps 
1284e948693eSPhilip Paeps fail3:
1285e948693eSPhilip Paeps 	while (--index >= 0)
1286e948693eSPhilip Paeps 		sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1287e948693eSPhilip Paeps 
1288e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1289e948693eSPhilip Paeps 
1290e948693eSPhilip Paeps fail2:
1291e948693eSPhilip Paeps 	sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1292e948693eSPhilip Paeps 
1293e948693eSPhilip Paeps fail:
1294e948693eSPhilip Paeps 	efx_tx_fini(sc->enp);
1295e948693eSPhilip Paeps 
1296e948693eSPhilip Paeps 	return (rc);
1297e948693eSPhilip Paeps }
1298e948693eSPhilip Paeps 
1299e948693eSPhilip Paeps /**
1300e948693eSPhilip Paeps  * Destroy a transmit queue.
1301e948693eSPhilip Paeps  */
1302e948693eSPhilip Paeps static void
1303e948693eSPhilip Paeps sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1304e948693eSPhilip Paeps {
1305e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1306385b1d8eSGeorge V. Neville-Neil 	unsigned int nmaps;
1307e948693eSPhilip Paeps 
1308e948693eSPhilip Paeps 	txq = sc->txq[index];
1309e948693eSPhilip Paeps 
1310e948693eSPhilip Paeps 	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1311e948693eSPhilip Paeps 	    ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1312e948693eSPhilip Paeps 
1313e948693eSPhilip Paeps 	if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1314e948693eSPhilip Paeps 		tso_fini(txq);
1315e948693eSPhilip Paeps 
1316e948693eSPhilip Paeps 	/* Free the context arrays. */
1317e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1318385b1d8eSGeorge V. Neville-Neil 	nmaps = sc->txq_entries;
1319b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1320e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1321e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1322e948693eSPhilip Paeps 
1323e948693eSPhilip Paeps 	/* Release DMA memory mapping. */
1324e948693eSPhilip Paeps 	sfxge_dma_free(&txq->mem);
1325e948693eSPhilip Paeps 
1326e948693eSPhilip Paeps 	sc->txq[index] = NULL;
1327e948693eSPhilip Paeps 
1328e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1329e948693eSPhilip Paeps 	mtx_destroy(&txq->lock);
1330e948693eSPhilip Paeps #endif
1331e948693eSPhilip Paeps 
1332e948693eSPhilip Paeps 	free(txq, M_SFXGE);
1333e948693eSPhilip Paeps }
1334e948693eSPhilip Paeps 
1335e948693eSPhilip Paeps static int
1336e948693eSPhilip Paeps sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1337e948693eSPhilip Paeps     enum sfxge_txq_type type, unsigned int evq_index)
1338e948693eSPhilip Paeps {
1339bc85c897SGeorge V. Neville-Neil 	char name[16];
1340bc85c897SGeorge V. Neville-Neil 	struct sysctl_oid *txq_node;
1341e948693eSPhilip Paeps 	struct sfxge_txq *txq;
1342e948693eSPhilip Paeps 	struct sfxge_evq *evq;
1343e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1344e948693eSPhilip Paeps 	struct sfxge_tx_dpl *stdp;
1345e948693eSPhilip Paeps #endif
1346e948693eSPhilip Paeps 	efsys_mem_t *esmp;
1347e948693eSPhilip Paeps 	unsigned int nmaps;
1348e948693eSPhilip Paeps 	int rc;
1349e948693eSPhilip Paeps 
1350e948693eSPhilip Paeps 	txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1351e948693eSPhilip Paeps 	txq->sc = sc;
1352385b1d8eSGeorge V. Neville-Neil 	txq->entries = sc->txq_entries;
1353385b1d8eSGeorge V. Neville-Neil 	txq->ptr_mask = txq->entries - 1;
1354e948693eSPhilip Paeps 
1355e948693eSPhilip Paeps 	sc->txq[txq_index] = txq;
1356e948693eSPhilip Paeps 	esmp = &txq->mem;
1357e948693eSPhilip Paeps 
1358e948693eSPhilip Paeps 	evq = sc->evq[evq_index];
1359e948693eSPhilip Paeps 
1360e948693eSPhilip Paeps 	/* Allocate and zero DMA space for the descriptor ring. */
1361385b1d8eSGeorge V. Neville-Neil 	if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1362e948693eSPhilip Paeps 		return (rc);
1363385b1d8eSGeorge V. Neville-Neil 	(void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
1364e948693eSPhilip Paeps 
1365e948693eSPhilip Paeps 	/* Allocate buffer table entries. */
1366385b1d8eSGeorge V. Neville-Neil 	sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1367e948693eSPhilip Paeps 				 &txq->buf_base_id);
1368e948693eSPhilip Paeps 
1369e948693eSPhilip Paeps 	/* Create a DMA tag for packet mappings. */
1370fb8ccc78SMarius Strobl 	if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1371fb8ccc78SMarius Strobl 	    MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1372fb8ccc78SMarius Strobl 	    NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1373e948693eSPhilip Paeps 	    &txq->packet_dma_tag) != 0) {
1374e948693eSPhilip Paeps 		device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1375e948693eSPhilip Paeps 		rc = ENOMEM;
1376e948693eSPhilip Paeps 		goto fail;
1377e948693eSPhilip Paeps 	}
1378e948693eSPhilip Paeps 
1379e948693eSPhilip Paeps 	/* Allocate pending descriptor array for batching writes. */
1380385b1d8eSGeorge V. Neville-Neil 	txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
1381e948693eSPhilip Paeps 				M_SFXGE, M_ZERO | M_WAITOK);
1382e948693eSPhilip Paeps 
1383e948693eSPhilip Paeps 	/* Allocate and initialise mbuf DMA mapping array. */
1384385b1d8eSGeorge V. Neville-Neil 	txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1385e948693eSPhilip Paeps 	    M_SFXGE, M_ZERO | M_WAITOK);
1386385b1d8eSGeorge V. Neville-Neil 	for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1387e948693eSPhilip Paeps 		rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1388e948693eSPhilip Paeps 				       &txq->stmp[nmaps].map);
1389e948693eSPhilip Paeps 		if (rc != 0)
1390e948693eSPhilip Paeps 			goto fail2;
1391e948693eSPhilip Paeps 	}
1392e948693eSPhilip Paeps 
1393bc85c897SGeorge V. Neville-Neil 	snprintf(name, sizeof(name), "%u", txq_index);
1394bc85c897SGeorge V. Neville-Neil 	txq_node = SYSCTL_ADD_NODE(
1395bc85c897SGeorge V. Neville-Neil 		device_get_sysctl_ctx(sc->dev),
1396bc85c897SGeorge V. Neville-Neil 		SYSCTL_CHILDREN(sc->txqs_node),
1397bc85c897SGeorge V. Neville-Neil 		OID_AUTO, name, CTLFLAG_RD, NULL, "");
1398bc85c897SGeorge V. Neville-Neil 	if (txq_node == NULL) {
1399bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
1400bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
1401bc85c897SGeorge V. Neville-Neil 	}
1402bc85c897SGeorge V. Neville-Neil 
1403e948693eSPhilip Paeps 	if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1404e948693eSPhilip Paeps 	    (rc = tso_init(txq)) != 0)
1405e948693eSPhilip Paeps 		goto fail3;
1406e948693eSPhilip Paeps 
1407e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
1408060a95efSGeorge V. Neville-Neil 	if (sfxge_tx_dpl_get_max <= 0) {
1409060a95efSGeorge V. Neville-Neil 		log(LOG_ERR, "%s=%d must be greater than 0",
1410060a95efSGeorge V. Neville-Neil 		    SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
1411060a95efSGeorge V. Neville-Neil 		rc = EINVAL;
1412060a95efSGeorge V. Neville-Neil 		goto fail_tx_dpl_get_max;
1413060a95efSGeorge V. Neville-Neil 	}
1414060a95efSGeorge V. Neville-Neil 	if (sfxge_tx_dpl_put_max < 0) {
1415060a95efSGeorge V. Neville-Neil 		log(LOG_ERR, "%s=%d must be greater or equal to 0",
1416060a95efSGeorge V. Neville-Neil 		    SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
1417060a95efSGeorge V. Neville-Neil 		rc = EINVAL;
1418060a95efSGeorge V. Neville-Neil 		goto fail_tx_dpl_put_max;
1419060a95efSGeorge V. Neville-Neil 	}
1420060a95efSGeorge V. Neville-Neil 
1421e948693eSPhilip Paeps 	/* Initialize the deferred packet list. */
1422e948693eSPhilip Paeps 	stdp = &txq->dpl;
1423060a95efSGeorge V. Neville-Neil 	stdp->std_put_max = sfxge_tx_dpl_put_max;
1424060a95efSGeorge V. Neville-Neil 	stdp->std_get_max = sfxge_tx_dpl_get_max;
1425e948693eSPhilip Paeps 	stdp->std_getp = &stdp->std_get;
1426e948693eSPhilip Paeps 
1427e948693eSPhilip Paeps 	mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
1428bc85c897SGeorge V. Neville-Neil 
1429bc85c897SGeorge V. Neville-Neil 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
1430bc85c897SGeorge V. Neville-Neil 			SYSCTL_CHILDREN(txq_node), OID_AUTO,
1431bc85c897SGeorge V. Neville-Neil 			"dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS,
1432bc85c897SGeorge V. Neville-Neil 			&stdp->std_get_count, 0, "");
1433e948693eSPhilip Paeps #endif
1434e948693eSPhilip Paeps 
1435e948693eSPhilip Paeps 	txq->type = type;
1436e948693eSPhilip Paeps 	txq->evq_index = evq_index;
1437e948693eSPhilip Paeps 	txq->txq_index = txq_index;
1438e948693eSPhilip Paeps 	txq->init_state = SFXGE_TXQ_INITIALIZED;
1439e948693eSPhilip Paeps 
1440e948693eSPhilip Paeps 	return (0);
1441e948693eSPhilip Paeps 
1442060a95efSGeorge V. Neville-Neil fail_tx_dpl_put_max:
1443060a95efSGeorge V. Neville-Neil fail_tx_dpl_get_max:
1444e948693eSPhilip Paeps fail3:
1445bc85c897SGeorge V. Neville-Neil fail_txq_node:
1446e948693eSPhilip Paeps 	free(txq->pend_desc, M_SFXGE);
1447e948693eSPhilip Paeps fail2:
1448b7b0edd1SGeorge V. Neville-Neil 	while (nmaps-- != 0)
1449e948693eSPhilip Paeps 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1450e948693eSPhilip Paeps 	free(txq->stmp, M_SFXGE);
1451e948693eSPhilip Paeps 	bus_dma_tag_destroy(txq->packet_dma_tag);
1452e948693eSPhilip Paeps 
1453e948693eSPhilip Paeps fail:
1454e948693eSPhilip Paeps 	sfxge_dma_free(esmp);
1455e948693eSPhilip Paeps 
1456e948693eSPhilip Paeps 	return (rc);
1457e948693eSPhilip Paeps }
1458e948693eSPhilip Paeps 
1459e948693eSPhilip Paeps static const struct {
1460e948693eSPhilip Paeps 	const char *name;
1461e948693eSPhilip Paeps 	size_t offset;
1462e948693eSPhilip Paeps } sfxge_tx_stats[] = {
1463e948693eSPhilip Paeps #define	SFXGE_TX_STAT(name, member) \
1464e948693eSPhilip Paeps 	{ #name, offsetof(struct sfxge_txq, member) }
1465e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_bursts, tso_bursts),
1466e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_packets, tso_packets),
1467e948693eSPhilip Paeps 	SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
1468e948693eSPhilip Paeps 	SFXGE_TX_STAT(tx_collapses, collapses),
1469e948693eSPhilip Paeps 	SFXGE_TX_STAT(tx_drops, drops),
1470d7ac87d3SGleb Smirnoff 	SFXGE_TX_STAT(tx_early_drops, early_drops),
1471e948693eSPhilip Paeps };
1472e948693eSPhilip Paeps 
1473e948693eSPhilip Paeps static int
1474e948693eSPhilip Paeps sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1475e948693eSPhilip Paeps {
1476e948693eSPhilip Paeps 	struct sfxge_softc *sc = arg1;
1477e948693eSPhilip Paeps 	unsigned int id = arg2;
1478e948693eSPhilip Paeps 	unsigned long sum;
1479e948693eSPhilip Paeps 	unsigned int index;
1480e948693eSPhilip Paeps 
1481e948693eSPhilip Paeps 	/* Sum across all TX queues */
1482e948693eSPhilip Paeps 	sum = 0;
1483e948693eSPhilip Paeps 	for (index = 0;
1484e948693eSPhilip Paeps 	     index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc);
1485e948693eSPhilip Paeps 	     index++)
1486e948693eSPhilip Paeps 		sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1487e948693eSPhilip Paeps 					  sfxge_tx_stats[id].offset);
1488e948693eSPhilip Paeps 
1489b7b0edd1SGeorge V. Neville-Neil 	return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1490e948693eSPhilip Paeps }
1491e948693eSPhilip Paeps 
1492e948693eSPhilip Paeps static void
1493e948693eSPhilip Paeps sfxge_tx_stat_init(struct sfxge_softc *sc)
1494e948693eSPhilip Paeps {
1495e948693eSPhilip Paeps 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1496e948693eSPhilip Paeps 	struct sysctl_oid_list *stat_list;
1497e948693eSPhilip Paeps 	unsigned int id;
1498e948693eSPhilip Paeps 
1499e948693eSPhilip Paeps 	stat_list = SYSCTL_CHILDREN(sc->stats_node);
1500e948693eSPhilip Paeps 
1501e948693eSPhilip Paeps 	for (id = 0;
1502e948693eSPhilip Paeps 	     id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]);
1503e948693eSPhilip Paeps 	     id++) {
1504e948693eSPhilip Paeps 		SYSCTL_ADD_PROC(
1505e948693eSPhilip Paeps 			ctx, stat_list,
1506e948693eSPhilip Paeps 			OID_AUTO, sfxge_tx_stats[id].name,
1507e948693eSPhilip Paeps 			CTLTYPE_ULONG|CTLFLAG_RD,
1508e948693eSPhilip Paeps 			sc, id, sfxge_tx_stat_handler, "LU",
1509e948693eSPhilip Paeps 			"");
1510e948693eSPhilip Paeps 	}
1511e948693eSPhilip Paeps }
1512e948693eSPhilip Paeps 
1513e948693eSPhilip Paeps void
1514e948693eSPhilip Paeps sfxge_tx_fini(struct sfxge_softc *sc)
1515e948693eSPhilip Paeps {
1516e948693eSPhilip Paeps 	int index;
1517e948693eSPhilip Paeps 
1518e948693eSPhilip Paeps 	index = SFXGE_TX_SCALE(sc);
1519e948693eSPhilip Paeps 	while (--index >= 0)
1520e948693eSPhilip Paeps 		sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1521e948693eSPhilip Paeps 
1522e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1523e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1524e948693eSPhilip Paeps }
1525e948693eSPhilip Paeps 
1526e948693eSPhilip Paeps 
1527e948693eSPhilip Paeps int
1528e948693eSPhilip Paeps sfxge_tx_init(struct sfxge_softc *sc)
1529e948693eSPhilip Paeps {
1530e948693eSPhilip Paeps 	struct sfxge_intr *intr;
1531e948693eSPhilip Paeps 	int index;
1532e948693eSPhilip Paeps 	int rc;
1533e948693eSPhilip Paeps 
1534e948693eSPhilip Paeps 	intr = &sc->intr;
1535e948693eSPhilip Paeps 
1536e948693eSPhilip Paeps 	KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1537e948693eSPhilip Paeps 	    ("intr->state != SFXGE_INTR_INITIALIZED"));
1538e948693eSPhilip Paeps 
1539bc85c897SGeorge V. Neville-Neil 	sc->txqs_node = SYSCTL_ADD_NODE(
1540bc85c897SGeorge V. Neville-Neil 		device_get_sysctl_ctx(sc->dev),
1541bc85c897SGeorge V. Neville-Neil 		SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1542bc85c897SGeorge V. Neville-Neil 		OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
1543bc85c897SGeorge V. Neville-Neil 	if (sc->txqs_node == NULL) {
1544bc85c897SGeorge V. Neville-Neil 		rc = ENOMEM;
1545bc85c897SGeorge V. Neville-Neil 		goto fail_txq_node;
1546bc85c897SGeorge V. Neville-Neil 	}
1547bc85c897SGeorge V. Neville-Neil 
1548e948693eSPhilip Paeps 	/* Initialize the transmit queues */
1549e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1550e948693eSPhilip Paeps 	    SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1551e948693eSPhilip Paeps 		goto fail;
1552e948693eSPhilip Paeps 
1553e948693eSPhilip Paeps 	if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1554e948693eSPhilip Paeps 	    SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1555e948693eSPhilip Paeps 		goto fail2;
1556e948693eSPhilip Paeps 
1557e948693eSPhilip Paeps 	for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1558e948693eSPhilip Paeps 		if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index,
1559e948693eSPhilip Paeps 		    SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1560e948693eSPhilip Paeps 			goto fail3;
1561e948693eSPhilip Paeps 	}
1562e948693eSPhilip Paeps 
1563e948693eSPhilip Paeps 	sfxge_tx_stat_init(sc);
1564e948693eSPhilip Paeps 
1565e948693eSPhilip Paeps 	return (0);
1566e948693eSPhilip Paeps 
1567e948693eSPhilip Paeps fail3:
1568e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1569e948693eSPhilip Paeps 
1570e948693eSPhilip Paeps 	while (--index >= 0)
1571e948693eSPhilip Paeps 		sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1572e948693eSPhilip Paeps 
1573e948693eSPhilip Paeps fail2:
1574e948693eSPhilip Paeps 	sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1575e948693eSPhilip Paeps 
1576e948693eSPhilip Paeps fail:
1577bc85c897SGeorge V. Neville-Neil fail_txq_node:
1578e948693eSPhilip Paeps 	return (rc);
1579e948693eSPhilip Paeps }
1580