1 /*-
2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <dev/mlx5/mlx5_en/en.h>
30 #include <machine/in_cksum.h>
31
32 static inline int
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe * wqe,u16 ix)33 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
34 struct mlx5e_rx_wqe *wqe, u16 ix)
35 {
36 bus_dma_segment_t segs[MLX5E_MAX_BUSDMA_RX_SEGS];
37 struct mbuf *mb;
38 int nsegs;
39 int err;
40 struct mbuf *mb_head;
41 int i;
42
43 if (rq->mbuf[ix].mbuf != NULL)
44 return (0);
45
46 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
47 MLX5E_MAX_RX_BYTES);
48 if (unlikely(mb == NULL))
49 return (-ENOMEM);
50
51 mb->m_len = MLX5E_MAX_RX_BYTES;
52 mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
53
54 for (i = 1; i < rq->nsegs; i++) {
55 if (mb_head->m_pkthdr.len >= rq->wqe_sz)
56 break;
57 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
58 MLX5E_MAX_RX_BYTES);
59 if (unlikely(mb == NULL)) {
60 m_freem(mb_head);
61 return (-ENOMEM);
62 }
63 mb->m_len = MLX5E_MAX_RX_BYTES;
64 mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES;
65 }
66 /* rewind to first mbuf in chain */
67 mb = mb_head;
68
69 /* get IP header aligned */
70 m_adj(mb, MLX5E_NET_IP_ALIGN);
71
72 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
73 mb, segs, &nsegs, BUS_DMA_NOWAIT);
74 if (err != 0)
75 goto err_free_mbuf;
76 if (unlikely(nsegs == 0)) {
77 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
78 err = -ENOMEM;
79 goto err_free_mbuf;
80 }
81 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
82 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
83 MLX5_HW_START_PADDING);
84 for (i = 1; i != nsegs; i++) {
85 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
86 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
87 }
88 for (; i < rq->nsegs; i++) {
89 wqe->data[i].addr = 0;
90 wqe->data[i].byte_count = 0;
91 }
92
93 rq->mbuf[ix].mbuf = mb;
94 rq->mbuf[ix].data = mb->m_data;
95
96 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
97 BUS_DMASYNC_PREREAD);
98 return (0);
99
100 err_free_mbuf:
101 m_freem(mb);
102 return (err);
103 }
104
105 static void
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)106 mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
107 {
108 if (unlikely(rq->enabled == 0))
109 return;
110
111 while (!mlx5_wq_ll_is_full(&rq->wq)) {
112 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
113
114 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
115 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq);
116 break;
117 }
118 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
119 }
120
121 /* ensure wqes are visible to device before updating doorbell record */
122 atomic_thread_fence_rel();
123
124 mlx5_wq_ll_update_db_record(&rq->wq);
125 }
126
127 static void
mlx5e_lro_update_hdr(struct mbuf * mb,struct mlx5_cqe64 * cqe)128 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
129 {
130 /* TODO: consider vlans, ip options, ... */
131 struct ether_header *eh;
132 uint16_t eh_type;
133 uint16_t tot_len;
134 struct ip6_hdr *ip6 = NULL;
135 struct ip *ip4 = NULL;
136 struct tcphdr *th;
137 uint32_t *ts_ptr;
138 uint8_t l4_hdr_type;
139 int tcp_ack;
140
141 eh = mtod(mb, struct ether_header *);
142 eh_type = ntohs(eh->ether_type);
143
144 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
145 tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
146 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
147
148 /* TODO: consider vlan */
149 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
150
151 switch (eh_type) {
152 case ETHERTYPE_IP:
153 ip4 = (struct ip *)(eh + 1);
154 th = (struct tcphdr *)(ip4 + 1);
155 break;
156 case ETHERTYPE_IPV6:
157 ip6 = (struct ip6_hdr *)(eh + 1);
158 th = (struct tcphdr *)(ip6 + 1);
159 break;
160 default:
161 return;
162 }
163
164 ts_ptr = (uint32_t *)(th + 1);
165
166 if (get_cqe_lro_tcppsh(cqe))
167 th->th_flags |= TH_PUSH;
168
169 if (tcp_ack) {
170 th->th_flags |= TH_ACK;
171 th->th_ack = cqe->lro_ack_seq_num;
172 th->th_win = cqe->lro_tcp_win;
173
174 /*
175 * FreeBSD handles only 32bit aligned timestamp right after
176 * the TCP hdr
177 * +--------+--------+--------+--------+
178 * | NOP | NOP | TSopt | 10 |
179 * +--------+--------+--------+--------+
180 * | TSval timestamp |
181 * +--------+--------+--------+--------+
182 * | TSecr timestamp |
183 * +--------+--------+--------+--------+
184 */
185 if (get_cqe_lro_timestamp_valid(cqe) &&
186 (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
187 TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
188 TCPOLEN_TIMESTAMP))) {
189 /*
190 * cqe->timestamp is 64bit long.
191 * [0-31] - timestamp.
192 * [32-64] - timestamp echo replay.
193 */
194 ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
195 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
196 }
197 }
198 if (ip4) {
199 ip4->ip_ttl = cqe->lro_min_ttl;
200 ip4->ip_len = cpu_to_be16(tot_len);
201 ip4->ip_sum = 0;
202 ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
203 } else {
204 ip6->ip6_hlim = cqe->lro_min_ttl;
205 ip6->ip6_plen = cpu_to_be16(tot_len -
206 sizeof(struct ip6_hdr));
207 }
208 /* TODO: handle tcp checksum */
209 }
210
211 static uint64_t
mlx5e_mbuf_tstmp(struct mlx5e_priv * priv,uint64_t hw_tstmp)212 mlx5e_mbuf_tstmp(struct mlx5e_priv *priv, uint64_t hw_tstmp)
213 {
214 struct mlx5e_clbr_point *cp, dcp;
215 uint64_t tstmp_sec, tstmp_nsec;
216 uint64_t hw_clocks;
217 uint64_t rt_cur_to_prev, res_s, res_n, res_s_modulo, res;
218 uint64_t hw_clk_div;
219 u_int gen;
220
221 do {
222 cp = &priv->clbr_points[priv->clbr_curr];
223 gen = atomic_load_acq_int(&cp->clbr_gen);
224 if (gen == 0)
225 return (0);
226 dcp = *cp;
227 atomic_thread_fence_acq();
228 } while (gen != dcp.clbr_gen);
229 /*
230 * Our goal here is to have a result that is:
231 *
232 * ( (cur_time - prev_time) )
233 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time
234 * ( (hw_cur - hw_prev) )
235 *
236 * With the constraints that we cannot use float and we
237 * don't want to overflow the uint64_t numbers we are using.
238 *
239 * The plan is to take the clocking value of the hw timestamps
240 * and split them into seconds and nanosecond equivalent portions.
241 * Then we operate on the two portions seperately making sure to
242 * bring back the carry over from the seconds when we divide.
243 *
244 * First up lets get the two divided into separate entities
245 * i.e. the seconds. We use the clock frequency for this.
246 * Note that priv->cclk was setup with the clock frequency
247 * in hz so we are all set to go.
248 */
249 hw_clocks = hw_tstmp - dcp.clbr_hw_prev;
250 tstmp_sec = hw_clocks / priv->cclk;
251 tstmp_nsec = hw_clocks % priv->cclk;
252 /* Now work with them separately */
253 rt_cur_to_prev = (dcp.base_curr - dcp.base_prev);
254 res_s = tstmp_sec * rt_cur_to_prev;
255 res_n = tstmp_nsec * rt_cur_to_prev;
256 /* Now lets get our divider */
257 hw_clk_div = dcp.clbr_hw_curr - dcp.clbr_hw_prev;
258 /* Make sure to save the remainder from the seconds divide */
259 res_s_modulo = res_s % hw_clk_div;
260 res_s /= hw_clk_div;
261 /* scale the remainder to where it should be */
262 res_s_modulo *= priv->cclk;
263 /* Now add in the remainder */
264 res_n += res_s_modulo;
265 /* Now do the divide */
266 res_n /= hw_clk_div;
267 res_s *= priv->cclk;
268 /* Recombine the two */
269 res = res_s + res_n;
270 /* And now add in the base time to get to the real timestamp */
271 res += dcp.base_prev;
272 return (res);
273 }
274
275 static inline void
mlx5e_build_rx_mbuf(struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct mbuf * mb,u32 cqe_bcnt)276 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
277 struct mlx5e_rq *rq, struct mbuf *mb,
278 u32 cqe_bcnt)
279 {
280 if_t ifp = rq->ifp;
281 struct mlx5e_channel *c;
282 struct mbuf *mb_head;
283 int lro_num_seg; /* HW LRO session aggregated packets counter */
284 uint64_t tstmp;
285
286 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
287 if (lro_num_seg > 1) {
288 mlx5e_lro_update_hdr(mb, cqe);
289 rq->stats.lro_packets++;
290 rq->stats.lro_bytes += cqe_bcnt;
291 }
292
293 mb->m_pkthdr.len = cqe_bcnt;
294 for (mb_head = mb; mb != NULL; mb = mb->m_next) {
295 if (mb->m_len > cqe_bcnt)
296 mb->m_len = cqe_bcnt;
297 cqe_bcnt -= mb->m_len;
298 if (likely(cqe_bcnt == 0)) {
299 if (likely(mb->m_next != NULL)) {
300 /* trim off empty mbufs */
301 m_freem(mb->m_next);
302 mb->m_next = NULL;
303 }
304 break;
305 }
306 }
307 /* rewind to first mbuf in chain */
308 mb = mb_head;
309
310 /* check if a Toeplitz hash was computed */
311 if (cqe->rss_hash_type != 0) {
312 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
313 #ifdef RSS
314 /* decode the RSS hash type */
315 switch (cqe->rss_hash_type &
316 (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) {
317 /* IPv4 */
318 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4):
319 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
320 break;
321 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4):
322 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
323 break;
324 case CQE_RSS_DST_HTYPE_IPV4:
325 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
326 break;
327 /* IPv6 */
328 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6):
329 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
330 break;
331 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6):
332 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
333 break;
334 case CQE_RSS_DST_HTYPE_IPV6:
335 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
336 break;
337 default: /* Other */
338 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
339 break;
340 }
341 #else
342 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
343 #endif
344 #ifdef M_HASHTYPE_SETINNER
345 if (cqe_is_tunneled(cqe))
346 M_HASHTYPE_SETINNER(mb);
347 #endif
348 } else {
349 mb->m_pkthdr.flowid = rq->ix;
350 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
351 }
352 mb->m_pkthdr.rcvif = ifp;
353 mb->m_pkthdr.leaf_rcvif = ifp;
354
355 if (cqe_is_tunneled(cqe)) {
356 /*
357 * CQE can be tunneled only if TIR is configured to
358 * enable parsing of tunneled payload, so no need to
359 * check for capabilities.
360 */
361 if (((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK)) ==
362 (CQE_L2_OK | CQE_L3_OK))) {
363 mb->m_pkthdr.csum_flags |=
364 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
365 CSUM_IP_CHECKED | CSUM_IP_VALID |
366 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
367 mb->m_pkthdr.csum_data = htons(0xffff);
368
369 if (likely((cqe->hds_ip_ext & CQE_L4_OK) == CQE_L4_OK)) {
370 mb->m_pkthdr.csum_flags |=
371 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
372 }
373 } else {
374 rq->stats.csum_none++;
375 }
376 } else if (likely((if_getcapenable(ifp) & (IFCAP_RXCSUM |
377 IFCAP_RXCSUM_IPV6)) != 0) &&
378 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
379 (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
380 mb->m_pkthdr.csum_flags =
381 CSUM_IP_CHECKED | CSUM_IP_VALID |
382 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
383 mb->m_pkthdr.csum_data = htons(0xffff);
384 } else {
385 rq->stats.csum_none++;
386 }
387
388 if (cqe_has_vlan(cqe)) {
389 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
390 mb->m_flags |= M_VLANTAG;
391 }
392
393 c = container_of(rq, struct mlx5e_channel, rq);
394 if (c->priv->clbr_done >= 2) {
395 tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp));
396 if ((tstmp & MLX5_CQE_TSTMP_PTP) != 0) {
397 /*
398 * Timestamp was taken on the packet entrance,
399 * instead of the cqe generation.
400 */
401 tstmp &= ~MLX5_CQE_TSTMP_PTP;
402 mb->m_flags |= M_TSTMP_HPREC;
403 }
404 if (tstmp != 0) {
405 mb->m_pkthdr.rcv_tstmp = tstmp;
406 mb->m_flags |= M_TSTMP;
407 }
408 }
409 switch (get_cqe_tls_offload(cqe)) {
410 case CQE_TLS_OFFLOAD_DECRYPTED:
411 /* set proper checksum flag for decrypted packets */
412 mb->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
413 rq->stats.decrypted_ok_packets++;
414 break;
415 case CQE_TLS_OFFLOAD_ERROR:
416 rq->stats.decrypted_error_packets++;
417 break;
418 default:
419 break;
420 }
421 }
422
423 static inline void
mlx5e_read_cqe_slot(struct mlx5e_cq * cq,u32 cc,void * data)424 mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
425 {
426 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
427 sizeof(struct mlx5_cqe64));
428 }
429
430 static inline void
mlx5e_write_cqe_slot(struct mlx5e_cq * cq,u32 cc,void * data)431 mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
432 {
433 memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1),
434 data, sizeof(struct mlx5_cqe64));
435 }
436
437 static inline void
mlx5e_decompress_cqe(struct mlx5e_cq * cq,struct mlx5_cqe64 * title,struct mlx5_mini_cqe8 * mini,u16 wqe_counter,int i)438 mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title,
439 struct mlx5_mini_cqe8 *mini,
440 u16 wqe_counter, int i)
441 {
442 /*
443 * NOTE: The fields which are not set here are copied from the
444 * initial and common title. See memcpy() in
445 * mlx5e_write_cqe_slot().
446 */
447 title->byte_cnt = mini->byte_cnt;
448 title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1);
449 title->rss_hash_result = mini->rx_hash_result;
450 /*
451 * Since we use MLX5_CQE_FORMAT_HASH when creating the RX CQ,
452 * the value of the checksum should be ignored.
453 */
454 title->check_sum = 0;
455 title->op_own = (title->op_own & 0xf0) |
456 (((cq->wq.cc + i) >> cq->wq.log_sz) & 1);
457 }
458
459 #define MLX5E_MINI_ARRAY_SZ 8
460 /* Make sure structs are not packet differently */
461 CTASSERT(sizeof(struct mlx5_cqe64) ==
462 sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ);
463 static void
mlx5e_decompress_cqes(struct mlx5e_cq * cq)464 mlx5e_decompress_cqes(struct mlx5e_cq *cq)
465 {
466 struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ];
467 struct mlx5_cqe64 title;
468 u32 cqe_count;
469 u32 i = 0;
470 u16 title_wqe_counter;
471
472 mlx5e_read_cqe_slot(cq, cq->wq.cc, &title);
473 title_wqe_counter = be16_to_cpu(title.wqe_counter);
474 cqe_count = be32_to_cpu(title.byte_cnt);
475
476 /* Make sure we won't overflow */
477 KASSERT(cqe_count <= cq->wq.sz_m1,
478 ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__,
479 cqe_count, cq->wq.sz_m1));
480
481 mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array);
482 while (true) {
483 mlx5e_decompress_cqe(cq, &title,
484 &mini_array[i % MLX5E_MINI_ARRAY_SZ],
485 title_wqe_counter, i);
486 mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title);
487 i++;
488
489 if (i == cqe_count)
490 break;
491 if (i % MLX5E_MINI_ARRAY_SZ == 0)
492 mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array);
493 }
494 }
495
496 static int
mlx5e_poll_rx_cq(struct mlx5e_rq * rq,int budget)497 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
498 {
499 struct pfil_head *pfil;
500 int i, rv;
501
502 CURVNET_SET_QUIET(if_getvnet(rq->ifp));
503 pfil = rq->channel->priv->pfil;
504 for (i = 0; i < budget; i++) {
505 struct mlx5e_rx_wqe *wqe;
506 struct mlx5_cqe64 *cqe;
507 struct mbuf *mb;
508 __be16 wqe_counter_be;
509 u16 wqe_counter;
510 u32 byte_cnt, seglen;
511
512 cqe = mlx5e_get_cqe(&rq->cq);
513 if (!cqe)
514 break;
515
516 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
517 mlx5e_decompress_cqes(&rq->cq);
518
519 mlx5_cqwq_pop(&rq->cq.wq);
520
521 wqe_counter_be = cqe->wqe_counter;
522 wqe_counter = be16_to_cpu(wqe_counter_be);
523 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
524 byte_cnt = be32_to_cpu(cqe->byte_cnt);
525
526 bus_dmamap_sync(rq->dma_tag,
527 rq->mbuf[wqe_counter].dma_map,
528 BUS_DMASYNC_POSTREAD);
529
530 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
531 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe);
532 rq->stats.wqe_err++;
533 goto wq_ll_pop;
534 }
535 if (pfil != NULL && PFIL_HOOKED_IN(pfil)) {
536 seglen = MIN(byte_cnt, MLX5E_MAX_RX_BYTES);
537 rv = pfil_mem_in(rq->channel->priv->pfil,
538 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb);
539
540 switch (rv) {
541 case PFIL_DROPPED:
542 case PFIL_CONSUMED:
543 /*
544 * Filter dropped or consumed it. In
545 * either case, we can just recycle
546 * buffer; there is no more work to do.
547 */
548 rq->stats.packets++;
549 goto wq_ll_pop;
550 case PFIL_REALLOCED:
551 /*
552 * Filter copied it; recycle buffer
553 * and receive the new mbuf allocated
554 * by the Filter
555 */
556 goto rx_common;
557 default:
558 /*
559 * The Filter said it was OK, so
560 * receive like normal.
561 */
562 KASSERT(rv == PFIL_PASS,
563 ("Filter returned %d!\n", rv));
564 }
565 }
566 if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
567 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
568 /* set maximum mbuf length */
569 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
570 /* get IP header aligned */
571 mb->m_data += MLX5E_NET_IP_ALIGN;
572
573 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
574 byte_cnt);
575 } else {
576 mb = rq->mbuf[wqe_counter].mbuf;
577 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */
578
579 bus_dmamap_unload(rq->dma_tag,
580 rq->mbuf[wqe_counter].dma_map);
581 }
582 rx_common:
583 mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
584 rq->stats.bytes += byte_cnt;
585 rq->stats.packets++;
586 #ifdef NUMA
587 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp);
588 #endif
589
590 #if !defined(HAVE_TCP_LRO_RX)
591 tcp_lro_queue_mbuf(&rq->lro, mb);
592 #else
593 if (mb->m_pkthdr.csum_flags == 0 ||
594 (if_getcapenable(rq->ifp) & IFCAP_LRO) == 0 ||
595 rq->lro.lro_cnt == 0 ||
596 tcp_lro_rx(&rq->lro, mb, 0) != 0) {
597 if_input(rq->ifp, mb);
598 }
599 #endif
600 wq_ll_pop:
601 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
602 &wqe->next.next_wqe_index);
603 }
604 CURVNET_RESTORE();
605
606 mlx5_cqwq_update_db_record(&rq->cq.wq);
607
608 /* ensure cq space is freed before enabling more cqes */
609 atomic_thread_fence_rel();
610 return (i);
611 }
612
613 void
mlx5e_rx_cq_comp(struct mlx5_core_cq * mcq,struct mlx5_eqe * eqe __unused)614 mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
615 {
616 struct mlx5e_channel *c = container_of(mcq, struct mlx5e_channel, rq.cq.mcq);
617 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
618 int i = 0;
619
620 #ifdef HAVE_PER_CQ_EVENT_PACKET
621 #if (MHLEN < 15)
622 #error "MHLEN is too small"
623 #endif
624 struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA);
625
626 if (mb != NULL) {
627 /* this code is used for debugging purpose only */
628 mb->m_pkthdr.len = mb->m_len = 15;
629 memset(mb->m_data, 255, 14);
630 mb->m_data[14] = rq->ix;
631 mb->m_pkthdr.rcvif = rq->ifp;
632 mb->m_pkthdr.leaf_rcvif = rq->ifp;
633 if_input(rq->ifp, mb);
634 }
635 #endif
636 for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) {
637 mtx_lock(&c->sq[j].lock);
638 c->sq[j].db_inhibit++;
639 mtx_unlock(&c->sq[j].lock);
640 }
641
642 mtx_lock(&c->iq.lock);
643 c->iq.db_inhibit++;
644 mtx_unlock(&c->iq.lock);
645
646 mtx_lock(&rq->mtx);
647
648 /*
649 * Polling the entire CQ without posting new WQEs results in
650 * lack of receive WQEs during heavy traffic scenarios.
651 */
652 while (1) {
653 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
654 MLX5E_RX_BUDGET_MAX)
655 break;
656 i += MLX5E_RX_BUDGET_MAX;
657 if (i >= MLX5E_BUDGET_MAX)
658 break;
659 mlx5e_post_rx_wqes(rq);
660 }
661 mlx5e_post_rx_wqes(rq);
662 /* check for dynamic interrupt moderation callback */
663 if (rq->dim.mode != NET_DIM_CQ_PERIOD_MODE_DISABLED)
664 net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes);
665 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
666 tcp_lro_flush_all(&rq->lro);
667 mtx_unlock(&rq->mtx);
668
669 for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) {
670 mtx_lock(&c->sq[j].lock);
671 c->sq[j].db_inhibit--;
672 /* Update the doorbell record, if any. */
673 mlx5e_tx_notify_hw(c->sq + j, true);
674 mtx_unlock(&c->sq[j].lock);
675 }
676
677 mtx_lock(&c->iq.lock);
678 c->iq.db_inhibit--;
679 mlx5e_iq_notify_hw(&c->iq);
680 mtx_unlock(&c->iq.lock);
681 }
682