xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c (revision 2f513db7)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 #include <machine/in_cksum.h>
30 
31 static inline int
32 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
33     struct mlx5e_rx_wqe *wqe, u16 ix)
34 {
35 	bus_dma_segment_t segs[MLX5E_MAX_BUSDMA_RX_SEGS];
36 	struct mbuf *mb;
37 	int nsegs;
38 	int err;
39 	struct mbuf *mb_head;
40 	int i;
41 
42 	if (rq->mbuf[ix].mbuf != NULL)
43 		return (0);
44 
45 	mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
46 	    MLX5E_MAX_RX_BYTES);
47 	if (unlikely(mb == NULL))
48 		return (-ENOMEM);
49 
50 	mb->m_len = MLX5E_MAX_RX_BYTES;
51 	mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
52 
53 	for (i = 1; i < rq->nsegs; i++) {
54 		if (mb_head->m_pkthdr.len >= rq->wqe_sz)
55 			break;
56 		mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
57 		    MLX5E_MAX_RX_BYTES);
58 		if (unlikely(mb == NULL)) {
59 			m_freem(mb_head);
60 			return (-ENOMEM);
61 		}
62 		mb->m_len = MLX5E_MAX_RX_BYTES;
63 		mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES;
64 	}
65 	/* rewind to first mbuf in chain */
66 	mb = mb_head;
67 
68 	/* get IP header aligned */
69 	m_adj(mb, MLX5E_NET_IP_ALIGN);
70 
71 	err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
72 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
73 	if (err != 0)
74 		goto err_free_mbuf;
75 	if (unlikely(nsegs == 0)) {
76 		bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
77 		err = -ENOMEM;
78 		goto err_free_mbuf;
79 	}
80 	wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
81 	wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
82 	    MLX5_HW_START_PADDING);
83 	for (i = 1; i != nsegs; i++) {
84 		wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
85 		wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
86 	}
87 	for (; i < rq->nsegs; i++) {
88 		wqe->data[i].addr = 0;
89 		wqe->data[i].byte_count = 0;
90 	}
91 
92 	rq->mbuf[ix].mbuf = mb;
93 	rq->mbuf[ix].data = mb->m_data;
94 
95 	bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
96 	    BUS_DMASYNC_PREREAD);
97 	return (0);
98 
99 err_free_mbuf:
100 	m_freem(mb);
101 	return (err);
102 }
103 
104 static void
105 mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
106 {
107 	if (unlikely(rq->enabled == 0))
108 		return;
109 
110 	while (!mlx5_wq_ll_is_full(&rq->wq)) {
111 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
112 
113 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
114 			callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq);
115 			break;
116 		}
117 		mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
118 	}
119 
120 	/* ensure wqes are visible to device before updating doorbell record */
121 	atomic_thread_fence_rel();
122 
123 	mlx5_wq_ll_update_db_record(&rq->wq);
124 }
125 
126 static void
127 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
128 {
129 	/* TODO: consider vlans, ip options, ... */
130 	struct ether_header *eh;
131 	uint16_t eh_type;
132 	uint16_t tot_len;
133 	struct ip6_hdr *ip6 = NULL;
134 	struct ip *ip4 = NULL;
135 	struct tcphdr *th;
136 	uint32_t *ts_ptr;
137 	uint8_t l4_hdr_type;
138 	int tcp_ack;
139 
140 	eh = mtod(mb, struct ether_header *);
141 	eh_type = ntohs(eh->ether_type);
142 
143 	l4_hdr_type = get_cqe_l4_hdr_type(cqe);
144 	tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
145 	    (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
146 
147 	/* TODO: consider vlan */
148 	tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
149 
150 	switch (eh_type) {
151 	case ETHERTYPE_IP:
152 		ip4 = (struct ip *)(eh + 1);
153 		th = (struct tcphdr *)(ip4 + 1);
154 		break;
155 	case ETHERTYPE_IPV6:
156 		ip6 = (struct ip6_hdr *)(eh + 1);
157 		th = (struct tcphdr *)(ip6 + 1);
158 		break;
159 	default:
160 		return;
161 	}
162 
163 	ts_ptr = (uint32_t *)(th + 1);
164 
165 	if (get_cqe_lro_tcppsh(cqe))
166 		th->th_flags |= TH_PUSH;
167 
168 	if (tcp_ack) {
169 		th->th_flags |= TH_ACK;
170 		th->th_ack = cqe->lro_ack_seq_num;
171 		th->th_win = cqe->lro_tcp_win;
172 
173 		/*
174 		 * FreeBSD handles only 32bit aligned timestamp right after
175 		 * the TCP hdr
176 		 * +--------+--------+--------+--------+
177 		 * |   NOP  |  NOP   |  TSopt |   10   |
178 		 * +--------+--------+--------+--------+
179 		 * |          TSval   timestamp        |
180 		 * +--------+--------+--------+--------+
181 		 * |          TSecr   timestamp        |
182 		 * +--------+--------+--------+--------+
183 		 */
184 		if (get_cqe_lro_timestamp_valid(cqe) &&
185 		    (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
186 		    TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
187 		    TCPOLEN_TIMESTAMP))) {
188 			/*
189 			 * cqe->timestamp is 64bit long.
190 			 * [0-31] - timestamp.
191 			 * [32-64] - timestamp echo replay.
192 			 */
193 			ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
194 			ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
195 		}
196 	}
197 	if (ip4) {
198 		ip4->ip_ttl = cqe->lro_min_ttl;
199 		ip4->ip_len = cpu_to_be16(tot_len);
200 		ip4->ip_sum = 0;
201 		ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
202 	} else {
203 		ip6->ip6_hlim = cqe->lro_min_ttl;
204 		ip6->ip6_plen = cpu_to_be16(tot_len -
205 		    sizeof(struct ip6_hdr));
206 	}
207 	/* TODO: handle tcp checksum */
208 }
209 
210 static uint64_t
211 mlx5e_mbuf_tstmp(struct mlx5e_priv *priv, uint64_t hw_tstmp)
212 {
213 	struct mlx5e_clbr_point *cp, dcp;
214 	uint64_t a1, a2, res;
215 	u_int gen;
216 
217 	do {
218 		cp = &priv->clbr_points[priv->clbr_curr];
219 		gen = atomic_load_acq_int(&cp->clbr_gen);
220 		if (gen == 0)
221 			return (0);
222 		dcp = *cp;
223 		atomic_thread_fence_acq();
224 	} while (gen != cp->clbr_gen);
225 
226 	a1 = (hw_tstmp - dcp.clbr_hw_prev) >> MLX5E_TSTMP_PREC;
227 	a2 = (dcp.base_curr - dcp.base_prev) >> MLX5E_TSTMP_PREC;
228 	res = (a1 * a2) << MLX5E_TSTMP_PREC;
229 
230 	/*
231 	 * Divisor cannot be zero because calibration callback
232 	 * checks for the condition and disables timestamping
233 	 * if clock halted.
234 	 */
235 	res /= (dcp.clbr_hw_curr - dcp.clbr_hw_prev) >> MLX5E_TSTMP_PREC;
236 
237 	res += dcp.base_prev;
238 	return (res);
239 }
240 
241 static inline void
242 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
243     struct mlx5e_rq *rq, struct mbuf *mb,
244     u32 cqe_bcnt)
245 {
246 	struct ifnet *ifp = rq->ifp;
247 	struct mlx5e_channel *c;
248 	struct mbuf *mb_head;
249 	int lro_num_seg;	/* HW LRO session aggregated packets counter */
250 	uint64_t tstmp;
251 
252 	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
253 	if (lro_num_seg > 1) {
254 		mlx5e_lro_update_hdr(mb, cqe);
255 		rq->stats.lro_packets++;
256 		rq->stats.lro_bytes += cqe_bcnt;
257 	}
258 
259 	mb->m_pkthdr.len = cqe_bcnt;
260 	for (mb_head = mb; mb != NULL; mb = mb->m_next) {
261 		if (mb->m_len > cqe_bcnt)
262 			mb->m_len = cqe_bcnt;
263 		cqe_bcnt -= mb->m_len;
264 		if (likely(cqe_bcnt == 0)) {
265 			if (likely(mb->m_next != NULL)) {
266 				/* trim off empty mbufs */
267 				m_freem(mb->m_next);
268 				mb->m_next = NULL;
269 			}
270 			break;
271 		}
272 	}
273 	/* rewind to first mbuf in chain */
274 	mb = mb_head;
275 
276 	/* check if a Toeplitz hash was computed */
277 	if (cqe->rss_hash_type != 0) {
278 		mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
279 #ifdef RSS
280 		/* decode the RSS hash type */
281 		switch (cqe->rss_hash_type &
282 		    (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) {
283 		/* IPv4 */
284 		case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4):
285 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
286 			break;
287 		case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4):
288 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
289 			break;
290 		case CQE_RSS_DST_HTYPE_IPV4:
291 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
292 			break;
293 		/* IPv6 */
294 		case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6):
295 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
296 			break;
297 		case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6):
298 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
299 			break;
300 		case CQE_RSS_DST_HTYPE_IPV6:
301 			M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
302 			break;
303 		default:	/* Other */
304 			M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
305 			break;
306 		}
307 #else
308 		M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
309 #endif
310 	} else {
311 		mb->m_pkthdr.flowid = rq->ix;
312 		M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
313 	}
314 	mb->m_pkthdr.rcvif = ifp;
315 
316 	if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
317 	    ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
318 	    (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
319 		mb->m_pkthdr.csum_flags =
320 		    CSUM_IP_CHECKED | CSUM_IP_VALID |
321 		    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
322 		mb->m_pkthdr.csum_data = htons(0xffff);
323 	} else {
324 		rq->stats.csum_none++;
325 	}
326 
327 	if (cqe_has_vlan(cqe)) {
328 		mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
329 		mb->m_flags |= M_VLANTAG;
330 	}
331 
332 	c = container_of(rq, struct mlx5e_channel, rq);
333 	if (c->priv->clbr_done >= 2) {
334 		tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp));
335 		if ((tstmp & MLX5_CQE_TSTMP_PTP) != 0) {
336 			/*
337 			 * Timestamp was taken on the packet entrance,
338 			 * instead of the cqe generation.
339 			 */
340 			tstmp &= ~MLX5_CQE_TSTMP_PTP;
341 			mb->m_flags |= M_TSTMP_HPREC;
342 		}
343 		mb->m_pkthdr.rcv_tstmp = tstmp;
344 		mb->m_flags |= M_TSTMP;
345 	}
346 }
347 
348 static inline void
349 mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
350 {
351 	memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
352 	    sizeof(struct mlx5_cqe64));
353 }
354 
355 static inline void
356 mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
357 {
358 	memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1),
359 	    data, sizeof(struct mlx5_cqe64));
360 }
361 
362 static inline void
363 mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title,
364     struct mlx5_mini_cqe8 *mini,
365     u16 wqe_counter, int i)
366 {
367 	/*
368 	 * NOTE: The fields which are not set here are copied from the
369 	 * initial and common title. See memcpy() in
370 	 * mlx5e_write_cqe_slot().
371 	 */
372 	title->byte_cnt = mini->byte_cnt;
373 	title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1);
374 	title->rss_hash_result = mini->rx_hash_result;
375 	/*
376 	 * Since we use MLX5_CQE_FORMAT_HASH when creating the RX CQ,
377 	 * the value of the checksum should be ignored.
378 	 */
379 	title->check_sum = 0;
380 	title->op_own = (title->op_own & 0xf0) |
381 	    (((cq->wq.cc + i) >> cq->wq.log_sz) & 1);
382 }
383 
384 #define MLX5E_MINI_ARRAY_SZ 8
385 /* Make sure structs are not packet differently */
386 CTASSERT(sizeof(struct mlx5_cqe64) ==
387     sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ);
388 static void
389 mlx5e_decompress_cqes(struct mlx5e_cq *cq)
390 {
391 	struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ];
392 	struct mlx5_cqe64 title;
393 	u32 cqe_count;
394 	u32 i = 0;
395 	u16 title_wqe_counter;
396 
397 	mlx5e_read_cqe_slot(cq, cq->wq.cc, &title);
398 	title_wqe_counter = be16_to_cpu(title.wqe_counter);
399 	cqe_count = be32_to_cpu(title.byte_cnt);
400 
401 	/* Make sure we won't overflow */
402 	KASSERT(cqe_count <= cq->wq.sz_m1,
403 	    ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__,
404 	    cqe_count, cq->wq.sz_m1));
405 
406 	mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array);
407 	while (true) {
408 		mlx5e_decompress_cqe(cq, &title,
409 		    &mini_array[i % MLX5E_MINI_ARRAY_SZ],
410 		    title_wqe_counter, i);
411 		mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title);
412 		i++;
413 
414 		if (i == cqe_count)
415 			break;
416 		if (i % MLX5E_MINI_ARRAY_SZ == 0)
417 			mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array);
418 	}
419 }
420 
421 static int
422 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
423 {
424 	struct pfil_head *pfil;
425 	int i, rv;
426 
427 	CURVNET_SET_QUIET(rq->ifp->if_vnet);
428 	pfil = rq->channel->priv->pfil;
429 	for (i = 0; i < budget; i++) {
430 		struct mlx5e_rx_wqe *wqe;
431 		struct mlx5_cqe64 *cqe;
432 		struct mbuf *mb;
433 		__be16 wqe_counter_be;
434 		u16 wqe_counter;
435 		u32 byte_cnt, seglen;
436 
437 		cqe = mlx5e_get_cqe(&rq->cq);
438 		if (!cqe)
439 			break;
440 
441 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
442 			mlx5e_decompress_cqes(&rq->cq);
443 
444 		mlx5_cqwq_pop(&rq->cq.wq);
445 
446 		wqe_counter_be = cqe->wqe_counter;
447 		wqe_counter = be16_to_cpu(wqe_counter_be);
448 		wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
449 		byte_cnt = be32_to_cpu(cqe->byte_cnt);
450 
451 		bus_dmamap_sync(rq->dma_tag,
452 		    rq->mbuf[wqe_counter].dma_map,
453 		    BUS_DMASYNC_POSTREAD);
454 
455 		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
456 			rq->stats.wqe_err++;
457 			goto wq_ll_pop;
458 		}
459 		if (pfil != NULL && PFIL_HOOKED_IN(pfil)) {
460 			seglen = MIN(byte_cnt, MLX5E_MAX_RX_BYTES);
461 			rv = pfil_run_hooks(rq->channel->priv->pfil,
462 			    rq->mbuf[wqe_counter].data, rq->ifp,
463 			    seglen | PFIL_MEMPTR | PFIL_IN, NULL);
464 
465 			switch (rv) {
466 			case PFIL_DROPPED:
467 			case PFIL_CONSUMED:
468 				/*
469 				 * Filter dropped or consumed it. In
470 				 * either case, we can just recycle
471 				 * buffer; there is no more work to do.
472 				 */
473 				rq->stats.packets++;
474 				goto wq_ll_pop;
475 			case PFIL_REALLOCED:
476 				/*
477 				 * Filter copied it; recycle buffer
478 				 * and receive the new mbuf allocated
479 				 * by the Filter
480 				 */
481 				mb = pfil_mem2mbuf(rq->mbuf[wqe_counter].data);
482 				goto rx_common;
483 			default:
484 				/*
485 				 * The Filter said it was OK, so
486 				 * receive like normal.
487 				 */
488 				KASSERT(rv == PFIL_PASS,
489 					("Filter returned %d!\n", rv));
490 			}
491 		}
492 		if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
493 		    (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
494 			/* set maximum mbuf length */
495 			mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
496 			/* get IP header aligned */
497 			mb->m_data += MLX5E_NET_IP_ALIGN;
498 
499 			bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
500 			    byte_cnt);
501 		} else {
502 			mb = rq->mbuf[wqe_counter].mbuf;
503 			rq->mbuf[wqe_counter].mbuf = NULL;	/* safety clear */
504 
505 			bus_dmamap_unload(rq->dma_tag,
506 			    rq->mbuf[wqe_counter].dma_map);
507 		}
508 rx_common:
509 		mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
510 		rq->stats.bytes += byte_cnt;
511 		rq->stats.packets++;
512 #ifdef NUMA
513 		mb->m_pkthdr.numa_domain = rq->ifp->if_numa_domain;
514 #endif
515 
516 #if !defined(HAVE_TCP_LRO_RX)
517 		tcp_lro_queue_mbuf(&rq->lro, mb);
518 #else
519 		if (mb->m_pkthdr.csum_flags == 0 ||
520 		    (rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
521 		    rq->lro.lro_cnt == 0 ||
522 		    tcp_lro_rx(&rq->lro, mb, 0) != 0) {
523 			rq->ifp->if_input(rq->ifp, mb);
524 		}
525 #endif
526 wq_ll_pop:
527 		mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
528 		    &wqe->next.next_wqe_index);
529 	}
530 	CURVNET_RESTORE();
531 
532 	mlx5_cqwq_update_db_record(&rq->cq.wq);
533 
534 	/* ensure cq space is freed before enabling more cqes */
535 	atomic_thread_fence_rel();
536 	return (i);
537 }
538 
539 void
540 mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
541 {
542 	struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
543 	int i = 0;
544 
545 #ifdef HAVE_PER_CQ_EVENT_PACKET
546 #if (MHLEN < 15)
547 #error "MHLEN is too small"
548 #endif
549 	struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA);
550 
551 	if (mb != NULL) {
552 		/* this code is used for debugging purpose only */
553 		mb->m_pkthdr.len = mb->m_len = 15;
554 		memset(mb->m_data, 255, 14);
555 		mb->m_data[14] = rq->ix;
556 		mb->m_pkthdr.rcvif = rq->ifp;
557 		rq->ifp->if_input(rq->ifp, mb);
558 	}
559 #endif
560 
561 	mtx_lock(&rq->mtx);
562 
563 	/*
564 	 * Polling the entire CQ without posting new WQEs results in
565 	 * lack of receive WQEs during heavy traffic scenarios.
566 	 */
567 	while (1) {
568 		if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
569 		    MLX5E_RX_BUDGET_MAX)
570 			break;
571 		i += MLX5E_RX_BUDGET_MAX;
572 		if (i >= MLX5E_BUDGET_MAX)
573 			break;
574 		mlx5e_post_rx_wqes(rq);
575 	}
576 	mlx5e_post_rx_wqes(rq);
577 	/* check for dynamic interrupt moderation callback */
578 	if (rq->dim.mode != NET_DIM_CQ_PERIOD_MODE_DISABLED)
579 		net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes);
580 	mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
581 	tcp_lro_flush_all(&rq->lro);
582 	mtx_unlock(&rq->mtx);
583 }
584