xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 #include <machine/atomic.h>
30 
31 static inline bool
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
33 {
34 	sq->cev_counter++;
35 	/* interleave the CQEs */
36 	if (sq->cev_counter >= sq->cev_factor) {
37 		sq->cev_counter = 0;
38 		return (1);
39 	}
40 	return (0);
41 }
42 
43 void
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
45 {
46 	u16 pi = sq->pc & sq->wq.sz_m1;
47 	struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
48 
49 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
50 
51 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 	if (mlx5e_do_send_cqe(sq))
54 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
55 	else
56 		wqe->ctrl.fm_ce_se = 0;
57 
58 	/* Copy data for doorbell */
59 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
60 
61 	sq->mbuf[pi].mbuf = NULL;
62 	sq->mbuf[pi].num_bytes = 0;
63 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 	sq->pc += sq->mbuf[pi].num_wqebbs;
65 }
66 
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
69 
70 static void
71 mlx5e_hash_init(void *arg)
72 {
73 	mlx5e_hash_value = m_ether_tcpip_hash_init();
74 }
75 
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
78 #endif
79 
80 static struct mlx5e_sq *
81 mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
82 {
83 	struct mlx5e_snd_tag *ptag;
84 	struct mlx5e_sq *sq;
85 
86 	/* check for route change */
87 	if (mb->m_pkthdr.snd_tag->ifp != ifp)
88 		return (NULL);
89 
90 	/* get pointer to sendqueue */
91 	ptag = container_of(mb->m_pkthdr.snd_tag,
92 	    struct mlx5e_snd_tag, m_snd_tag);
93 
94 	switch (ptag->type) {
95 #ifdef RATELIMIT
96 	case IF_SND_TAG_TYPE_RATE_LIMIT:
97 		sq = container_of(ptag,
98 		    struct mlx5e_rl_channel, tag)->sq;
99 		break;
100 #endif
101 	case IF_SND_TAG_TYPE_UNLIMITED:
102 		sq = &container_of(ptag,
103 		    struct mlx5e_channel, tag)->sq[0];
104 		KASSERT(({
105 		    struct mlx5e_priv *priv = ifp->if_softc;
106 		    priv->channel_refs > 0; }),
107 		    ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
108 		break;
109 	default:
110 		sq = NULL;
111 		break;
112 	}
113 
114 	/* check if valid */
115 	if (sq != NULL && READ_ONCE(sq->running) != 0)
116 		return (sq);
117 
118 	return (NULL);
119 }
120 
121 static struct mlx5e_sq *
122 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
123 {
124 	struct mlx5e_priv *priv = ifp->if_softc;
125 	struct mlx5e_sq *sq;
126 	u32 ch;
127 	u32 tc;
128 
129 	/* obtain VLAN information if present */
130 	if (mb->m_flags & M_VLANTAG) {
131 		tc = (mb->m_pkthdr.ether_vtag >> 13);
132 		if (tc >= priv->num_tc)
133 			tc = priv->default_vlan_prio;
134 	} else {
135 		tc = priv->default_vlan_prio;
136 	}
137 
138 	ch = priv->params.num_channels;
139 
140 	/* check if flowid is set */
141 	if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
142 #ifdef RSS
143 		u32 temp;
144 
145 		if (rss_hash2bucket(mb->m_pkthdr.flowid,
146 		    M_HASHTYPE_GET(mb), &temp) == 0)
147 			ch = temp % ch;
148 		else
149 #endif
150 			ch = (mb->m_pkthdr.flowid % 128) % ch;
151 	} else {
152 #if (__FreeBSD_version >= 1100000)
153 		ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
154 		    MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
155 #else
156 		/*
157 		 * m_ether_tcpip_hash not present in stable, so just
158 		 * throw unhashed mbufs on queue 0
159 		 */
160 		ch = 0;
161 #endif
162 	}
163 
164 	/* check if send queue is running */
165 	sq = &priv->channel[ch].sq[tc];
166 	if (likely(READ_ONCE(sq->running) != 0))
167 		return (sq);
168 	return (NULL);
169 }
170 
171 static inline u16
172 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
173 {
174 	struct ether_vlan_header *eh;
175 	uint16_t eth_type;
176 	int min_inline;
177 
178 	eh = mtod(mb, struct ether_vlan_header *);
179 	if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
180 		goto max_inline;
181 	} else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
182 		if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
183 			goto max_inline;
184 		eth_type = ntohs(eh->evl_proto);
185 		min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
186 	} else {
187 		eth_type = ntohs(eh->evl_encap_proto);
188 		min_inline = ETHER_HDR_LEN;
189 	}
190 
191 	switch (eth_type) {
192 	case ETHERTYPE_IP:
193 	case ETHERTYPE_IPV6:
194 		/*
195 		 * Make sure the TOS(IPv4) or traffic class(IPv6)
196 		 * field gets inlined. Else the SQ may stall.
197 		 */
198 		min_inline += 4;
199 		break;
200 	default:
201 		goto max_inline;
202 	}
203 
204 	/*
205 	 * m_copydata() will be used on the remaining header which
206 	 * does not need to reside within the first m_len bytes of
207 	 * data:
208 	 */
209 	if (mb->m_pkthdr.len < min_inline)
210 		goto max_inline;
211 	return (min_inline);
212 
213 max_inline:
214 	return (MIN(mb->m_pkthdr.len, sq->max_inline));
215 }
216 
217 static int
218 mlx5e_get_full_header_size(struct mbuf *mb)
219 {
220 	struct ether_vlan_header *eh;
221 	struct tcphdr *th;
222 	struct ip *ip;
223 	int ip_hlen, tcp_hlen;
224 	struct ip6_hdr *ip6;
225 	uint16_t eth_type;
226 	int eth_hdr_len;
227 
228 	eh = mtod(mb, struct ether_vlan_header *);
229 	if (mb->m_len < ETHER_HDR_LEN)
230 		return (0);
231 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
232 		if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
233 			return (0);
234 		eth_type = ntohs(eh->evl_proto);
235 		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
236 	} else {
237 		eth_type = ntohs(eh->evl_encap_proto);
238 		eth_hdr_len = ETHER_HDR_LEN;
239 	}
240 	switch (eth_type) {
241 	case ETHERTYPE_IP:
242 		ip = (struct ip *)(mb->m_data + eth_hdr_len);
243 		if (mb->m_len < eth_hdr_len + sizeof(*ip))
244 			return (0);
245 		switch (ip->ip_p) {
246 		case IPPROTO_TCP:
247 			ip_hlen = ip->ip_hl << 2;
248 			eth_hdr_len += ip_hlen;
249 			break;
250 		case IPPROTO_UDP:
251 			ip_hlen = ip->ip_hl << 2;
252 			eth_hdr_len += ip_hlen + 8;
253 			goto done;
254 		default:
255 			return (0);
256 		}
257 		break;
258 	case ETHERTYPE_IPV6:
259 		ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
260 		if (mb->m_len < eth_hdr_len + sizeof(*ip6))
261 			return (0);
262 		switch (ip6->ip6_nxt) {
263 		case IPPROTO_TCP:
264 			eth_hdr_len += sizeof(*ip6);
265 			break;
266 		case IPPROTO_UDP:
267 			eth_hdr_len += sizeof(*ip6) + 8;
268 			goto done;
269 		default:
270 			return (0);
271 		}
272 		break;
273 	default:
274 		return (0);
275 	}
276 	if (mb->m_len < eth_hdr_len + sizeof(*th))
277 		return (0);
278 	th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
279 	tcp_hlen = th->th_off << 2;
280 	eth_hdr_len += tcp_hlen;
281 done:
282 	/*
283 	 * m_copydata() will be used on the remaining header which
284 	 * does not need to reside within the first m_len bytes of
285 	 * data:
286 	 */
287 	if (mb->m_pkthdr.len < eth_hdr_len)
288 		return (0);
289 	return (eth_hdr_len);
290 }
291 
292 static int
293 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
294 {
295 	bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
296 	struct mlx5_wqe_data_seg *dseg;
297 	struct mlx5e_tx_wqe *wqe;
298 	struct ifnet *ifp;
299 	int nsegs;
300 	int err;
301 	int x;
302 	struct mbuf *mb = *mbp;
303 	u16 ds_cnt;
304 	u16 ihs;
305 	u16 pi;
306 	u8 opcode;
307 
308 	/* Return ENOBUFS if the queue is full */
309 	if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS)))
310 		return (ENOBUFS);
311 
312 	/* Align SQ edge with NOPs to avoid WQE wrap around */
313 	pi = ((~sq->pc) & sq->wq.sz_m1);
314 	if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
315 		/* Send one multi NOP message instead of many */
316 		mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
317 		pi = ((~sq->pc) & sq->wq.sz_m1);
318 		if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
319 			return (ENOMEM);
320 	}
321 
322 	/* Setup local variables */
323 	pi = sq->pc & sq->wq.sz_m1;
324 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
325 	ifp = sq->ifp;
326 
327 	memset(wqe, 0, sizeof(*wqe));
328 
329 	/* Send a copy of the frame to the BPF listener, if any */
330 	if (ifp != NULL && ifp->if_bpf != NULL)
331 		ETHER_BPF_MTAP(ifp, mb);
332 
333 	if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
334 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
335 	}
336 	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
337 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
338 	}
339 	if (wqe->eth.cs_flags == 0) {
340 		sq->stats.csum_offload_none++;
341 	}
342 	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
343 		u32 payload_len;
344 		u32 mss = mb->m_pkthdr.tso_segsz;
345 		u32 num_pkts;
346 
347 		wqe->eth.mss = cpu_to_be16(mss);
348 		opcode = MLX5_OPCODE_LSO;
349 		ihs = mlx5e_get_full_header_size(mb);
350 		if (unlikely(ihs == 0)) {
351 			err = EINVAL;
352 			goto tx_drop;
353 		}
354 		payload_len = mb->m_pkthdr.len - ihs;
355 		if (payload_len == 0)
356 			num_pkts = 1;
357 		else
358 			num_pkts = DIV_ROUND_UP(payload_len, mss);
359 		sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
360 
361 		sq->stats.tso_packets++;
362 		sq->stats.tso_bytes += payload_len;
363 	} else {
364 		opcode = MLX5_OPCODE_SEND;
365 
366 		switch (sq->min_inline_mode) {
367 		case MLX5_INLINE_MODE_IP:
368 		case MLX5_INLINE_MODE_TCP_UDP:
369 			ihs = mlx5e_get_full_header_size(mb);
370 			if (unlikely(ihs == 0))
371 				ihs = mlx5e_get_l2_header_size(sq, mb);
372 			break;
373 		case MLX5_INLINE_MODE_L2:
374 			ihs = mlx5e_get_l2_header_size(sq, mb);
375 			break;
376 		case MLX5_INLINE_MODE_NONE:
377 			/* FALLTHROUGH */
378 		default:
379 			if ((mb->m_flags & M_VLANTAG) != 0 &&
380 			    (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
381 				/* inlining VLAN data is not required */
382 				wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
383 				wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
384 				ihs = 0;
385 			} else if ((mb->m_flags & M_VLANTAG) == 0 &&
386 				   (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
387 				/* inlining non-VLAN data is not required */
388 				ihs = 0;
389 			} else {
390 				/* we are forced to inlining L2 header, if any */
391 				ihs = mlx5e_get_l2_header_size(sq, mb);
392 			}
393 			break;
394 		}
395 		sq->mbuf[pi].num_bytes = max_t (unsigned int,
396 		    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
397 	}
398 
399 	if (likely(ihs == 0)) {
400 		/* nothing to inline */
401 	} else if (unlikely(ihs > sq->max_inline)) {
402 		/* inline header size is too big */
403 		err = EINVAL;
404 		goto tx_drop;
405 	} else if ((mb->m_flags & M_VLANTAG) != 0) {
406 		struct ether_vlan_header *eh = (struct ether_vlan_header *)
407 		    wqe->eth.inline_hdr_start;
408 
409 		/* Range checks */
410 		if (unlikely(ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)))
411 			ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
412 		else if (unlikely(ihs < ETHER_HDR_LEN)) {
413 			err = EINVAL;
414 			goto tx_drop;
415 		}
416 		m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
417 		m_adj(mb, ETHER_HDR_LEN);
418 		/* Insert 4 bytes VLAN tag into data stream */
419 		eh->evl_proto = eh->evl_encap_proto;
420 		eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
421 		eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
422 		/* Copy rest of header data, if any */
423 		m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
424 		m_adj(mb, ihs - ETHER_HDR_LEN);
425 		/* Extend header by 4 bytes */
426 		ihs += ETHER_VLAN_ENCAP_LEN;
427 		wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
428 	} else {
429 		m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
430 		m_adj(mb, ihs);
431 		wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
432 	}
433 
434 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
435 	if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
436 		ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
437 		    MLX5_SEND_WQE_DS);
438 	}
439 	dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
440 
441 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
442 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
443 	if (err == EFBIG) {
444 		/* Update statistics */
445 		sq->stats.defragged++;
446 		/* Too many mbuf fragments */
447 		mb = m_defrag(*mbp, M_NOWAIT);
448 		if (mb == NULL) {
449 			mb = *mbp;
450 			goto tx_drop;
451 		}
452 		/* Try again */
453 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
454 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
455 	}
456 	/* Catch errors */
457 	if (err != 0)
458 		goto tx_drop;
459 
460 	/* Make sure all mbuf data, if any, is written to RAM */
461 	if (nsegs != 0) {
462 		bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
463 		    BUS_DMASYNC_PREWRITE);
464 	} else {
465 		/* All data was inlined, free the mbuf. */
466 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
467 		m_freem(mb);
468 		mb = NULL;
469 	}
470 
471 	for (x = 0; x != nsegs; x++) {
472 		if (segs[x].ds_len == 0)
473 			continue;
474 		dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
475 		dseg->lkey = sq->mkey_be;
476 		dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
477 		dseg++;
478 	}
479 
480 	ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
481 
482 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
483 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
484 	if (mlx5e_do_send_cqe(sq))
485 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
486 	else
487 		wqe->ctrl.fm_ce_se = 0;
488 
489 	/* Copy data for doorbell */
490 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
491 
492 	/* Store pointer to mbuf */
493 	sq->mbuf[pi].mbuf = mb;
494 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
495 	sq->pc += sq->mbuf[pi].num_wqebbs;
496 
497 	/* Count all traffic going out */
498 	sq->stats.packets++;
499 	sq->stats.bytes += sq->mbuf[pi].num_bytes;
500 
501 	*mbp = NULL;	/* safety clear */
502 	return (0);
503 
504 tx_drop:
505 	sq->stats.dropped++;
506 	*mbp = NULL;
507 	m_freem(mb);
508 	return err;
509 }
510 
511 static void
512 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
513 {
514 	u16 sqcc;
515 
516 	/*
517 	 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
518 	 * otherwise a cq overrun may occur
519 	 */
520 	sqcc = sq->cc;
521 
522 	while (budget > 0) {
523 		struct mlx5_cqe64 *cqe;
524 		struct mbuf *mb;
525 		u16 x;
526 		u16 ci;
527 
528 		cqe = mlx5e_get_cqe(&sq->cq);
529 		if (!cqe)
530 			break;
531 
532 		mlx5_cqwq_pop(&sq->cq.wq);
533 
534 		/* update budget according to the event factor */
535 		budget -= sq->cev_factor;
536 
537 		for (x = 0; x != sq->cev_factor; x++) {
538 			ci = sqcc & sq->wq.sz_m1;
539 			mb = sq->mbuf[ci].mbuf;
540 			sq->mbuf[ci].mbuf = NULL;	/* Safety clear */
541 
542 			if (mb == NULL) {
543 				if (sq->mbuf[ci].num_bytes == 0) {
544 					/* NOP */
545 					sq->stats.nop++;
546 				}
547 			} else {
548 				bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
549 				    BUS_DMASYNC_POSTWRITE);
550 				bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
551 
552 				/* Free transmitted mbuf */
553 				m_freem(mb);
554 			}
555 			sqcc += sq->mbuf[ci].num_wqebbs;
556 		}
557 	}
558 
559 	mlx5_cqwq_update_db_record(&sq->cq.wq);
560 
561 	/* Ensure cq space is freed before enabling more cqes */
562 	atomic_thread_fence_rel();
563 
564 	sq->cc = sqcc;
565 }
566 
567 static int
568 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
569 {
570 	int err = 0;
571 
572 	if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
573 	    READ_ONCE(sq->running) == 0)) {
574 		m_freem(mb);
575 		return (ENETDOWN);
576 	}
577 
578 	/* Do transmit */
579 	if (mlx5e_sq_xmit(sq, &mb) != 0) {
580 		/* NOTE: m_freem() is NULL safe */
581 		m_freem(mb);
582 		err = ENOBUFS;
583 	}
584 
585 	/* Check if we need to write the doorbell */
586 	if (likely(sq->doorbell.d64 != 0)) {
587 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
588 		sq->doorbell.d64 = 0;
589 	}
590 
591 	/*
592 	 * Check if we need to start the event timer which flushes the
593 	 * transmit ring on timeout:
594 	 */
595 	if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
596 	    sq->cev_factor != 1)) {
597 		/* start the timer */
598 		mlx5e_sq_cev_timeout(sq);
599 	} else {
600 		/* don't send NOPs yet */
601 		sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
602 	}
603 	return (err);
604 }
605 
606 int
607 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
608 {
609 	struct mlx5e_sq *sq;
610 	int ret;
611 
612 	if (mb->m_pkthdr.snd_tag != NULL) {
613 		sq = mlx5e_select_queue_by_send_tag(ifp, mb);
614 		if (unlikely(sq == NULL)) {
615 			/* Check for route change */
616 			if (mb->m_pkthdr.snd_tag->ifp != ifp) {
617 				/* Free mbuf */
618 				m_freem(mb);
619 
620 				/*
621 				 * Tell upper layers about route
622 				 * change and to re-transmit this
623 				 * packet:
624 				 */
625 				return (EAGAIN);
626 			}
627 			goto select_queue;
628 		}
629 	} else {
630 select_queue:
631 		sq = mlx5e_select_queue(ifp, mb);
632 		if (unlikely(sq == NULL)) {
633 			/* Free mbuf */
634 			m_freem(mb);
635 
636 			/* Invalid send queue */
637 			return (ENXIO);
638 		}
639 	}
640 
641 	mtx_lock(&sq->lock);
642 	ret = mlx5e_xmit_locked(ifp, sq, mb);
643 	mtx_unlock(&sq->lock);
644 
645 	return (ret);
646 }
647 
648 void
649 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
650 {
651 	struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
652 
653 	mtx_lock(&sq->comp_lock);
654 	mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
655 	mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
656 	mtx_unlock(&sq->comp_lock);
657 }
658