1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/ipsec_rxtx.h"
55 #include "en_accel/ktls_txrx.h"
56 #include "en/xdp.h"
57 #include "en/xsk/rx.h"
58 #include "en/health.h"
59 #include "en/params.h"
60 #include "devlink.h"
61 #include "en/devlink.h"
62 
63 static struct sk_buff *
64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
65 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
66 				u32 page_idx);
67 static struct sk_buff *
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
70 				   u32 page_idx);
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 
75 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
76 	.handle_rx_cqe       = mlx5e_handle_rx_cqe,
77 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
78 	.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
79 };
80 
mlx5e_read_cqe_slot(struct mlx5_cqwq * wq,u32 cqcc,void * data)81 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
82 				       u32 cqcc, void *data)
83 {
84 	u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
85 
86 	memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
87 }
88 
mlx5e_read_enhanced_title_slot(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
90 					   struct mlx5_cqe64 *cqe)
91 {
92 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
93 	struct mlx5_cqe64 *title = &cqd->title;
94 
95 	memcpy(title, cqe, sizeof(struct mlx5_cqe64));
96 
97 	if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
98 		return;
99 
100 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
101 		cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
102 			mpwrq_get_cqe_consumed_strides(title);
103 	else
104 		cqd->wqe_counter =
105 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
106 }
107 
mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
109 					 struct mlx5_cqwq *wq,
110 					 u32 cqcc)
111 {
112 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
113 	struct mlx5_cqe64 *title = &cqd->title;
114 
115 	mlx5e_read_cqe_slot(wq, cqcc, title);
116 	cqd->left        = be32_to_cpu(title->byte_cnt);
117 	cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
118 	rq->stats->cqe_compress_blks++;
119 }
120 
mlx5e_read_mini_arr_slot(struct mlx5_cqwq * wq,struct mlx5e_cq_decomp * cqd,u32 cqcc)121 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
122 					    struct mlx5e_cq_decomp *cqd,
123 					    u32 cqcc)
124 {
125 	mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
126 	cqd->mini_arr_idx = 0;
127 }
128 
mlx5e_cqes_update_owner(struct mlx5_cqwq * wq,int n)129 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
130 {
131 	u32 cqcc   = wq->cc;
132 	u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
133 	u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
134 	u32 wq_sz  = mlx5_cqwq_get_size(wq);
135 	u32 ci_top = min_t(u32, wq_sz, ci + n);
136 
137 	for (; ci < ci_top; ci++, n--) {
138 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
139 
140 		cqe->op_own = op_own;
141 	}
142 
143 	if (unlikely(ci == wq_sz)) {
144 		op_own = !op_own;
145 		for (ci = 0; ci < n; ci++) {
146 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
147 
148 			cqe->op_own = op_own;
149 		}
150 	}
151 }
152 
mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
154 					struct mlx5_cqwq *wq,
155 					u32 cqcc)
156 {
157 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
158 	struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
159 	struct mlx5_cqe64 *title = &cqd->title;
160 
161 	title->byte_cnt     = mini_cqe->byte_cnt;
162 	title->check_sum    = mini_cqe->checksum;
163 	title->op_own      &= 0xf0;
164 	title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
165 
166 	/* state bit set implies linked-list striding RQ wq type and
167 	 * HW stride index capability supported
168 	 */
169 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
170 		title->wqe_counter = mini_cqe->stridx;
171 		return;
172 	}
173 
174 	/* HW stride index capability not supported */
175 	title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
176 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
177 		cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
178 	else
179 		cqd->wqe_counter =
180 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
181 }
182 
mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
184 						struct mlx5_cqwq *wq,
185 						u32 cqcc)
186 {
187 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
188 
189 	mlx5e_decompress_cqe(rq, wq, cqcc);
190 	cqd->title.rss_hash_type   = 0;
191 	cqd->title.rss_hash_result = 0;
192 }
193 
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem)194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
195 					 struct mlx5_cqwq *wq,
196 					 struct mlx5_cqe64 *cqe,
197 					 int budget_rem)
198 {
199 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
200 	u32 cqcc, left;
201 	u32 i;
202 
203 	left = get_cqe_enhanced_num_mini_cqes(cqe);
204 	/* Here we avoid breaking the cqe compression session in the middle
205 	 * in case budget is not sufficient to handle all of it. In this case
206 	 * we return work_done == budget_rem to give 'busy' napi indication.
207 	 */
208 	if (unlikely(left > budget_rem))
209 		return budget_rem;
210 
211 	cqcc = wq->cc;
212 	cqd->mini_arr_idx = 0;
213 	memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
214 	for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
215 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
216 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
217 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
218 				rq, &cqd->title);
219 	}
220 	wq->cc = cqcc;
221 	rq->stats->cqe_compress_pkts += left;
222 
223 	return left;
224 }
225 
mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem)226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
227 					     struct mlx5_cqwq *wq,
228 					     int update_owner_only,
229 					     int budget_rem)
230 {
231 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
232 	u32 cqcc = wq->cc + update_owner_only;
233 	u32 cqe_count;
234 	u32 i;
235 
236 	cqe_count = min_t(u32, cqd->left, budget_rem);
237 
238 	for (i = update_owner_only; i < cqe_count;
239 	     i++, cqd->mini_arr_idx++, cqcc++) {
240 		if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
241 			mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
242 
243 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
244 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
245 				mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
246 				rq, &cqd->title);
247 	}
248 	mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
249 	wq->cc = cqcc;
250 	cqd->left -= cqe_count;
251 	rq->stats->cqe_compress_pkts += cqe_count;
252 
253 	return cqe_count;
254 }
255 
mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem)256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
257 					      struct mlx5_cqwq *wq,
258 					      int budget_rem)
259 {
260 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
261 	u32 cc = wq->cc;
262 
263 	mlx5e_read_title_slot(rq, wq, cc);
264 	mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
265 	mlx5e_decompress_cqe(rq, wq, cc);
266 	INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
267 			mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
268 			rq, &cqd->title);
269 	cqd->mini_arr_idx++;
270 
271 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
272 }
273 
274 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
275 
mlx5e_page_alloc_fragmented(struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page)276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
277 				       struct mlx5e_frag_page *frag_page)
278 {
279 	struct page *page;
280 
281 	page = page_pool_dev_alloc_pages(rq->page_pool);
282 	if (unlikely(!page))
283 		return -ENOMEM;
284 
285 	page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
286 
287 	*frag_page = (struct mlx5e_frag_page) {
288 		.page	= page,
289 		.frags	= 0,
290 	};
291 
292 	return 0;
293 }
294 
mlx5e_page_release_fragmented(struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page)295 static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
296 					  struct mlx5e_frag_page *frag_page)
297 {
298 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
299 	struct page *page = frag_page->page;
300 
301 	if (page_pool_unref_page(page, drain_count) == 0)
302 		page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
303 }
304 
mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
306 				    struct mlx5e_wqe_frag_info *frag)
307 {
308 	int err = 0;
309 
310 	if (!frag->offset)
311 		/* On first frag (offset == 0), replenish page.
312 		 * Other frags that point to the same page (with a different
313 		 * offset) should just use the new one without replenishing again
314 		 * by themselves.
315 		 */
316 		err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
317 
318 	return err;
319 }
320 
mlx5e_frag_can_release(struct mlx5e_wqe_frag_info * frag)321 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
322 {
323 #define CAN_RELEASE_MASK \
324 	(BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
325 
326 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
327 
328 	return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
329 }
330 
mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
332 				     struct mlx5e_wqe_frag_info *frag)
333 {
334 	if (mlx5e_frag_can_release(frag))
335 		mlx5e_page_release_fragmented(rq, frag->frag_page);
336 }
337 
get_frag(struct mlx5e_rq * rq,u16 ix)338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
339 {
340 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
341 }
342 
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix)343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
344 			      u16 ix)
345 {
346 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
347 	int err;
348 	int i;
349 
350 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
351 		dma_addr_t addr;
352 		u16 headroom;
353 
354 		err = mlx5e_get_rx_frag(rq, frag);
355 		if (unlikely(err))
356 			goto free_frags;
357 
358 		frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
359 
360 		headroom = i == 0 ? rq->buff.headroom : 0;
361 		addr = page_pool_get_dma_addr(frag->frag_page->page);
362 		wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
363 	}
364 
365 	return 0;
366 
367 free_frags:
368 	while (--i >= 0)
369 		mlx5e_put_rx_frag(rq, --frag);
370 
371 	return err;
372 }
373 
mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi)374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
375 				     struct mlx5e_wqe_frag_info *wi)
376 {
377 	int i;
378 
379 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
380 		mlx5e_put_rx_frag(rq, wi);
381 }
382 
mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info * wi)383 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
384 {
385 	if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
386 		xsk_buff_free(*wi->xskp);
387 }
388 
mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix)389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
390 {
391 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
392 
393 	if (rq->xsk_pool) {
394 		mlx5e_xsk_free_rx_wqe(wi);
395 	} else {
396 		mlx5e_free_rx_wqe(rq, wi);
397 
398 		/* Avoid a second release of the wqe pages: dealloc is called
399 		 * for the same missing wqes on regular RQ flush and on regular
400 		 * RQ close. This happens when XSK RQs come into play.
401 		 */
402 		for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
403 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
404 	}
405 }
406 
mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
408 {
409 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
410 	int i;
411 
412 	for (i = 0; i < wqe_bulk; i++) {
413 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
414 		struct mlx5e_wqe_frag_info *wi;
415 
416 		wi = get_frag(rq, j);
417 		/* The page is always put into the Reuse Ring, because there
418 		 * is no way to return the page to the userspace when the
419 		 * interface goes down.
420 		 */
421 		mlx5e_xsk_free_rx_wqe(wi);
422 	}
423 }
424 
mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)425 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
426 {
427 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
428 	int i;
429 
430 	for (i = 0; i < wqe_bulk; i++) {
431 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
432 		struct mlx5e_wqe_frag_info *wi;
433 
434 		wi = get_frag(rq, j);
435 		mlx5e_free_rx_wqe(rq, wi);
436 	}
437 }
438 
mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
440 {
441 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
442 	int i;
443 
444 	for (i = 0; i < wqe_bulk; i++) {
445 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
446 		struct mlx5e_rx_wqe_cyc *wqe;
447 
448 		wqe = mlx5_wq_cyc_get_wqe(wq, j);
449 
450 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
451 			break;
452 	}
453 
454 	return i;
455 }
456 
mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
458 {
459 	int remaining = wqe_bulk;
460 	int total_alloc = 0;
461 	int refill_alloc;
462 	int refill;
463 
464 	/* The WQE bulk is split into smaller bulks that are sized
465 	 * according to the page pool cache refill size to avoid overflowing
466 	 * the page pool cache due to too many page releases at once.
467 	 */
468 	do {
469 		refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
470 
471 		mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
472 		refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
473 		if (unlikely(refill_alloc != refill))
474 			goto err_free;
475 
476 		total_alloc += refill_alloc;
477 		remaining -= refill;
478 	} while (remaining);
479 
480 	return total_alloc;
481 
482 err_free:
483 	mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
484 
485 	for (int i = 0; i < total_alloc + refill; i++) {
486 		int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
487 		struct mlx5e_wqe_frag_info *frag;
488 
489 		frag = get_frag(rq, j);
490 		for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
491 			frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
492 	}
493 
494 	return 0;
495 }
496 
497 static void
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len)498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
499 			       struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
500 			       u32 frag_offset, u32 len)
501 {
502 	skb_frag_t *frag;
503 
504 	dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
505 
506 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
507 	if (!xdp_buff_has_frags(xdp)) {
508 		/* Init on the first fragment to avoid cold cache access
509 		 * when possible.
510 		 */
511 		sinfo->nr_frags = 0;
512 		sinfo->xdp_frags_size = 0;
513 		xdp_buff_set_frags_flag(xdp);
514 	}
515 
516 	frag = &sinfo->frags[sinfo->nr_frags++];
517 	skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
518 
519 	if (page_is_pfmemalloc(frag_page->page))
520 		xdp_buff_set_frag_pfmemalloc(xdp);
521 	sinfo->xdp_frags_size += len;
522 }
523 
524 static inline void
mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize)525 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
526 		   struct mlx5e_frag_page *frag_page,
527 		   u32 frag_offset, u32 len,
528 		   unsigned int truesize)
529 {
530 	dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
531 	u8 next_frag = skb_shinfo(skb)->nr_frags;
532 
533 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
534 				rq->buff.map_dir);
535 
536 	if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
537 		skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
538 	} else {
539 		frag_page->frags++;
540 		skb_add_rx_frag(skb, next_frag, frag_page->page,
541 				frag_offset, len, truesize);
542 	}
543 }
544 
545 static inline void
mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,struct page * page,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen)546 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
547 		      struct page *page, dma_addr_t addr,
548 		      int offset_from, int dma_offset, u32 headlen)
549 {
550 	const void *from = page_address(page) + offset_from;
551 	/* Aligning len to sizeof(long) optimizes memcpy performance */
552 	unsigned int len = ALIGN(headlen, sizeof(long));
553 
554 	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
555 				rq->buff.map_dir);
556 	skb_copy_to_linear_data(skb, from, len);
557 }
558 
559 static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi)560 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
561 {
562 	bool no_xdp_xmit;
563 	int i;
564 
565 	/* A common case for AF_XDP. */
566 	if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
567 		return;
568 
569 	no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
570 
571 	if (rq->xsk_pool) {
572 		struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
573 
574 		/* The page is always put into the Reuse Ring, because there
575 		 * is no way to return the page to userspace when the interface
576 		 * goes down.
577 		 */
578 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
579 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
580 				xsk_buff_free(xsk_buffs[i]);
581 	} else {
582 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
583 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
584 				struct mlx5e_frag_page *frag_page;
585 
586 				frag_page = &wi->alloc_units.frag_pages[i];
587 				mlx5e_page_release_fragmented(rq, frag_page);
588 			}
589 		}
590 	}
591 }
592 
mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n)593 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
594 {
595 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
596 
597 	do {
598 		u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
599 
600 		mlx5_wq_ll_push(wq, next_wqe_index);
601 	} while (--n);
602 
603 	/* ensure wqes are visible to device before updating doorbell record */
604 	dma_wmb();
605 
606 	mlx5_wq_ll_update_db_record(wq);
607 }
608 
609 /* This function returns the size of the continuous free space inside a bitmap
610  * that starts from first and no longer than len including circular ones.
611  */
bitmap_find_window(unsigned long * bitmap,int len,int bitmap_size,int first)612 static int bitmap_find_window(unsigned long *bitmap, int len,
613 			      int bitmap_size, int first)
614 {
615 	int next_one, count;
616 
617 	next_one = find_next_bit(bitmap, bitmap_size, first);
618 	if (next_one == bitmap_size) {
619 		if (bitmap_size - first >= len)
620 			return len;
621 		next_one = find_next_bit(bitmap, bitmap_size, 0);
622 		count = next_one + bitmap_size - first;
623 	} else {
624 		count = next_one - first;
625 	}
626 
627 	return min(len, count);
628 }
629 
build_ksm_umr(struct mlx5e_icosq * sq,struct mlx5e_umr_wqe * umr_wqe,__be32 key,u16 offset,u16 ksm_len)630 static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
631 			  __be32 key, u16 offset, u16 ksm_len)
632 {
633 	memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
634 	umr_wqe->ctrl.opmod_idx_opcode =
635 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
636 			     MLX5_OPCODE_UMR);
637 	umr_wqe->ctrl.umr_mkey = key;
638 	umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
639 					    | MLX5E_KSM_UMR_DS_CNT(ksm_len));
640 	umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
641 	umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
642 	umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
643 	umr_wqe->uctrl.mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
644 }
645 
mlx5e_build_shampo_hd_umr(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,u16 ksm_entries,u16 index)646 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
647 				     struct mlx5e_icosq *sq,
648 				     u16 ksm_entries, u16 index)
649 {
650 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
651 	u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
652 	u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
653 	u16 page_index = shampo->curr_page_index;
654 	struct mlx5e_frag_page *frag_page;
655 	u64 addr = shampo->last_addr;
656 	struct mlx5e_dma_info *dma_info;
657 	struct mlx5e_umr_wqe *umr_wqe;
658 	int headroom, i;
659 
660 	headroom = rq->buff.headroom;
661 	new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
662 	entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
663 	wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
664 	pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
665 	umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
666 	build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
667 
668 	frag_page = &shampo->pages[page_index];
669 
670 	for (i = 0; i < entries; i++, index++) {
671 		dma_info = &shampo->info[index];
672 		if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
673 					 MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
674 			goto update_ksm;
675 		header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
676 			MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
677 		if (!(header_offset & (PAGE_SIZE - 1))) {
678 			page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
679 			frag_page = &shampo->pages[page_index];
680 
681 			err = mlx5e_page_alloc_fragmented(rq, frag_page);
682 			if (unlikely(err))
683 				goto err_unmap;
684 
685 			addr = page_pool_get_dma_addr(frag_page->page);
686 
687 			dma_info->addr = addr;
688 			dma_info->frag_page = frag_page;
689 		} else {
690 			dma_info->addr = addr + header_offset;
691 			dma_info->frag_page = frag_page;
692 		}
693 
694 update_ksm:
695 		umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
696 			.key = cpu_to_be32(lkey),
697 			.va  = cpu_to_be64(dma_info->addr + headroom),
698 		};
699 	}
700 
701 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
702 		.wqe_type	= MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
703 		.num_wqebbs	= wqe_bbs,
704 		.shampo.len	= new_entries,
705 	};
706 
707 	shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
708 	shampo->curr_page_index = page_index;
709 	shampo->last_addr = addr;
710 	sq->pc += wqe_bbs;
711 	sq->doorbell_cseg = &umr_wqe->ctrl;
712 
713 	return 0;
714 
715 err_unmap:
716 	while (--i >= 0) {
717 		dma_info = &shampo->info[--index];
718 		if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
719 			dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
720 			mlx5e_page_release_fragmented(rq, dma_info->frag_page);
721 		}
722 	}
723 	rq->stats->buff_alloc_err++;
724 	return err;
725 }
726 
mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq * rq)727 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
728 {
729 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
730 	u16 ksm_entries, num_wqe, index, entries_before;
731 	struct mlx5e_icosq *sq = rq->icosq;
732 	int i, err, max_ksm_entries, len;
733 
734 	max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
735 	ksm_entries = bitmap_find_window(shampo->bitmap,
736 					 shampo->hd_per_wqe,
737 					 shampo->hd_per_wq, shampo->pi);
738 	ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
739 	if (!ksm_entries)
740 		return 0;
741 
742 	ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
743 	index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
744 	entries_before = shampo->hd_per_wq - index;
745 
746 	if (unlikely(entries_before < ksm_entries))
747 		num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
748 			  DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
749 	else
750 		num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
751 
752 	for (i = 0; i < num_wqe; i++) {
753 		len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
754 							ksm_entries;
755 		if (unlikely(index + len > shampo->hd_per_wq))
756 			len = shampo->hd_per_wq - index;
757 		err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
758 		if (unlikely(err))
759 			return err;
760 		index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
761 		ksm_entries -= len;
762 	}
763 
764 	return 0;
765 }
766 
mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)767 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
768 {
769 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
770 	struct mlx5e_icosq *sq = rq->icosq;
771 	struct mlx5e_frag_page *frag_page;
772 	struct mlx5_wq_cyc *wq = &sq->wq;
773 	struct mlx5e_umr_wqe *umr_wqe;
774 	u32 offset; /* 17-bit value with MTT. */
775 	u16 pi;
776 	int err;
777 	int i;
778 
779 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
780 		err = mlx5e_alloc_rx_hd_mpwqe(rq);
781 		if (unlikely(err))
782 			goto err;
783 	}
784 
785 	pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
786 	umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
787 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
788 
789 	frag_page = &wi->alloc_units.frag_pages[0];
790 
791 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
792 		dma_addr_t addr;
793 
794 		err = mlx5e_page_alloc_fragmented(rq, frag_page);
795 		if (unlikely(err))
796 			goto err_unmap;
797 		addr = page_pool_get_dma_addr(frag_page->page);
798 		umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
799 			.ptag = cpu_to_be64(addr | MLX5_EN_WR),
800 		};
801 	}
802 
803 	/* Pad if needed, in case the value set to ucseg->xlt_octowords
804 	 * in mlx5e_build_umr_wqe() needed alignment.
805 	 */
806 	if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
807 		int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
808 			rq->mpwqe.pages_per_wqe;
809 
810 		memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
811 		       sizeof(*umr_wqe->inline_mtts) * pad);
812 	}
813 
814 	bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
815 	wi->consumed_strides = 0;
816 
817 	umr_wqe->ctrl.opmod_idx_opcode =
818 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
819 			    MLX5_OPCODE_UMR);
820 
821 	offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
822 	umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
823 
824 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
825 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
826 		.num_wqebbs = rq->mpwqe.umr_wqebbs,
827 		.umr.rq     = rq,
828 	};
829 
830 	sq->pc += rq->mpwqe.umr_wqebbs;
831 
832 	sq->doorbell_cseg = &umr_wqe->ctrl;
833 
834 	return 0;
835 
836 err_unmap:
837 	while (--i >= 0) {
838 		frag_page--;
839 		mlx5e_page_release_fragmented(rq, frag_page);
840 	}
841 
842 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
843 
844 err:
845 	rq->stats->buff_alloc_err++;
846 
847 	return err;
848 }
849 
850 static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq * rq,u16 header_index)851 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
852 {
853 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
854 	u64 addr = shampo->info[header_index].addr;
855 
856 	if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
857 		struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
858 
859 		dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
860 		mlx5e_page_release_fragmented(rq, dma_info->frag_page);
861 	}
862 	clear_bit(header_index, shampo->bitmap);
863 }
864 
mlx5e_shampo_dealloc_hd(struct mlx5e_rq * rq)865 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
866 {
867 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
868 	int i;
869 
870 	for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
871 		mlx5e_free_rx_shampo_hd_entry(rq, i);
872 }
873 
mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)874 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
875 {
876 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
877 	/* This function is called on rq/netdev close. */
878 	mlx5e_free_rx_mpwqe(rq, wi);
879 
880 	/* Avoid a second release of the wqe pages: dealloc is called also
881 	 * for missing wqes on an already flushed RQ.
882 	 */
883 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
884 }
885 
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)886 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
887 {
888 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
889 	int wqe_bulk, count;
890 	bool busy = false;
891 	u16 head;
892 
893 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
894 		return false;
895 
896 	if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
897 		return false;
898 
899 	if (rq->page_pool)
900 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
901 
902 	wqe_bulk = mlx5_wq_cyc_missing(wq);
903 	head = mlx5_wq_cyc_get_head(wq);
904 
905 	/* Don't allow any newly allocated WQEs to share the same page with old
906 	 * WQEs that aren't completed yet. Stop earlier.
907 	 */
908 	wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
909 
910 	if (!rq->xsk_pool) {
911 		count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
912 	} else if (likely(!dma_dev_need_sync(rq->pdev))) {
913 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
914 		count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
915 	} else {
916 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
917 		/* If dma_need_sync is true, it's more efficient to call
918 		 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
919 		 * because the latter does the same check and returns only one
920 		 * frame.
921 		 */
922 		count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
923 	}
924 
925 	mlx5_wq_cyc_push_n(wq, count);
926 	if (unlikely(count != wqe_bulk)) {
927 		rq->stats->buff_alloc_err++;
928 		busy = true;
929 	}
930 
931 	/* ensure wqes are visible to device before updating doorbell record */
932 	dma_wmb();
933 
934 	mlx5_wq_cyc_update_db_record(wq);
935 
936 	return busy;
937 }
938 
mlx5e_free_icosq_descs(struct mlx5e_icosq * sq)939 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
940 {
941 	u16 sqcc;
942 
943 	sqcc = sq->cc;
944 
945 	while (sqcc != sq->pc) {
946 		struct mlx5e_icosq_wqe_info *wi;
947 		u16 ci;
948 
949 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
950 		wi = &sq->db.wqe_info[ci];
951 		sqcc += wi->num_wqebbs;
952 #ifdef CONFIG_MLX5_EN_TLS
953 		switch (wi->wqe_type) {
954 		case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
955 			mlx5e_ktls_handle_ctx_completion(wi);
956 			break;
957 		case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
958 			mlx5e_ktls_handle_get_psv_completion(wi, sq);
959 			break;
960 		}
961 #endif
962 	}
963 	sq->cc = sqcc;
964 }
965 
mlx5e_shampo_fill_umr(struct mlx5e_rq * rq,int len)966 void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
967 {
968 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
969 	int end, from, full_len = len;
970 
971 	end = shampo->hd_per_wq;
972 	from = shampo->ci;
973 	if (from + len > end) {
974 		len -= end - from;
975 		bitmap_set(shampo->bitmap, from, end - from);
976 		from = 0;
977 	}
978 
979 	bitmap_set(shampo->bitmap, from, len);
980 	shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
981 }
982 
mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,struct mlx5e_icosq * sq)983 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
984 				       struct mlx5e_icosq *sq)
985 {
986 	struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
987 	/* assume 1:1 relationship between RQ and icosq */
988 	struct mlx5e_rq *rq = &c->rq;
989 
990 	mlx5e_shampo_fill_umr(rq, umr.len);
991 }
992 
mlx5e_poll_ico_cq(struct mlx5e_cq * cq)993 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
994 {
995 	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
996 	struct mlx5_cqe64 *cqe;
997 	u16 sqcc;
998 	int i;
999 
1000 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1001 		return 0;
1002 
1003 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
1004 	if (likely(!cqe))
1005 		return 0;
1006 
1007 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1008 	 * otherwise a cq overrun may occur
1009 	 */
1010 	sqcc = sq->cc;
1011 
1012 	i = 0;
1013 	do {
1014 		u16 wqe_counter;
1015 		bool last_wqe;
1016 
1017 		mlx5_cqwq_pop(&cq->wq);
1018 
1019 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
1020 
1021 		do {
1022 			struct mlx5e_icosq_wqe_info *wi;
1023 			u16 ci;
1024 
1025 			last_wqe = (sqcc == wqe_counter);
1026 
1027 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
1028 			wi = &sq->db.wqe_info[ci];
1029 			sqcc += wi->num_wqebbs;
1030 
1031 			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1032 				netdev_WARN_ONCE(cq->netdev,
1033 						 "Bad OP in ICOSQ CQE: 0x%x\n",
1034 						 get_cqe_opcode(cqe));
1035 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1036 						     (struct mlx5_err_cqe *)cqe);
1037 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
1038 				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1039 					queue_work(cq->workqueue, &sq->recover_work);
1040 				break;
1041 			}
1042 
1043 			switch (wi->wqe_type) {
1044 			case MLX5E_ICOSQ_WQE_UMR_RX:
1045 				wi->umr.rq->mpwqe.umr_completed++;
1046 				break;
1047 			case MLX5E_ICOSQ_WQE_NOP:
1048 				break;
1049 			case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
1050 				mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
1051 				break;
1052 #ifdef CONFIG_MLX5_EN_TLS
1053 			case MLX5E_ICOSQ_WQE_UMR_TLS:
1054 				break;
1055 			case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
1056 				mlx5e_ktls_handle_ctx_completion(wi);
1057 				break;
1058 			case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
1059 				mlx5e_ktls_handle_get_psv_completion(wi, sq);
1060 				break;
1061 #endif
1062 			default:
1063 				netdev_WARN_ONCE(cq->netdev,
1064 						 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1065 						 wi->wqe_type);
1066 			}
1067 		} while (!last_wqe);
1068 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1069 
1070 	sq->cc = sqcc;
1071 
1072 	mlx5_cqwq_update_db_record(&cq->wq);
1073 
1074 	return i;
1075 }
1076 
mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq)1077 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1078 {
1079 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1080 	u8  umr_completed = rq->mpwqe.umr_completed;
1081 	struct mlx5e_icosq *sq = rq->icosq;
1082 	int alloc_err = 0;
1083 	u8  missing, i;
1084 	u16 head;
1085 
1086 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1087 		return false;
1088 
1089 	if (umr_completed) {
1090 		mlx5e_post_rx_mpwqe(rq, umr_completed);
1091 		rq->mpwqe.umr_in_progress -= umr_completed;
1092 		rq->mpwqe.umr_completed = 0;
1093 	}
1094 
1095 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1096 
1097 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1098 		rq->stats->congst_umr++;
1099 
1100 	if (likely(missing < rq->mpwqe.min_wqe_bulk))
1101 		return false;
1102 
1103 	if (rq->page_pool)
1104 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
1105 
1106 	head = rq->mpwqe.actual_wq_head;
1107 	i = missing;
1108 	do {
1109 		struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1110 
1111 		/* Deferred free for better page pool cache usage. */
1112 		mlx5e_free_rx_mpwqe(rq, wi);
1113 
1114 		alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1115 					   mlx5e_alloc_rx_mpwqe(rq, head);
1116 
1117 		if (unlikely(alloc_err))
1118 			break;
1119 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1120 	} while (--i);
1121 
1122 	rq->mpwqe.umr_last_bulk    = missing - i;
1123 	if (sq->doorbell_cseg) {
1124 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
1125 		sq->doorbell_cseg = NULL;
1126 	}
1127 
1128 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1129 	rq->mpwqe.actual_wq_head   = head;
1130 
1131 	/* If XSK Fill Ring doesn't have enough frames, report the error, so
1132 	 * that one of the actions can be performed:
1133 	 * 1. If need_wakeup is used, signal that the application has to kick
1134 	 * the driver when it refills the Fill Ring.
1135 	 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1136 	 */
1137 	if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1138 		return true;
1139 
1140 	return false;
1141 }
1142 
mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 * cqe,struct tcphdr * tcp)1143 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1144 {
1145 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1146 	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1147 			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1148 
1149 	tcp->check                      = 0;
1150 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
1151 
1152 	if (tcp_ack) {
1153 		tcp->ack                = 1;
1154 		tcp->ack_seq            = cqe->lro.ack_seq_num;
1155 		tcp->window             = cqe->lro.tcp_win;
1156 	}
1157 }
1158 
mlx5e_lro_update_hdr(struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1159 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
1160 				 u32 cqe_bcnt)
1161 {
1162 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
1163 	struct tcphdr	*tcp;
1164 	int network_depth = 0;
1165 	__wsum check;
1166 	__be16 proto;
1167 	u16 tot_len;
1168 	void *ip_p;
1169 
1170 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1171 
1172 	tot_len = cqe_bcnt - network_depth;
1173 	ip_p = skb->data + network_depth;
1174 
1175 	if (proto == htons(ETH_P_IP)) {
1176 		struct iphdr *ipv4 = ip_p;
1177 
1178 		tcp = ip_p + sizeof(struct iphdr);
1179 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1180 
1181 		ipv4->ttl               = cqe->lro.min_ttl;
1182 		ipv4->tot_len           = cpu_to_be16(tot_len);
1183 		ipv4->check             = 0;
1184 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1185 						       ipv4->ihl);
1186 
1187 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1188 		check = csum_partial(tcp, tcp->doff * 4,
1189 				     csum_unfold((__force __sum16)cqe->check_sum));
1190 		/* Almost done, don't forget the pseudo header */
1191 		tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
1192 					  ipv4->saddr, ipv4->daddr, check);
1193 	} else {
1194 		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1195 		struct ipv6hdr *ipv6 = ip_p;
1196 
1197 		tcp = ip_p + sizeof(struct ipv6hdr);
1198 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1199 
1200 		ipv6->hop_limit         = cqe->lro.min_ttl;
1201 		ipv6->payload_len       = cpu_to_be16(payload_len);
1202 
1203 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1204 		check = csum_partial(tcp, tcp->doff * 4,
1205 				     csum_unfold((__force __sum16)cqe->check_sum));
1206 		/* Almost done, don't forget the pseudo header */
1207 		tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
1208 					  &ipv6->daddr, check);
1209 	}
1210 }
1211 
mlx5e_shampo_get_packet_hd(struct mlx5e_rq * rq,u16 header_index)1212 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1213 {
1214 	struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
1215 	u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
1216 
1217 	return page_address(last_head->frag_page->page) + head_offset;
1218 }
1219 
mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4)1220 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1221 {
1222 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1223 	struct sk_buff *skb = rq->hw_gro_data->skb;
1224 	struct udphdr *uh;
1225 
1226 	uh = (struct udphdr *)(skb->data + udp_off);
1227 	uh->len = htons(skb->len - udp_off);
1228 
1229 	if (uh->check)
1230 		uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1231 					  ipv4->daddr, 0);
1232 
1233 	skb->csum_start = (unsigned char *)uh - skb->head;
1234 	skb->csum_offset = offsetof(struct udphdr, check);
1235 
1236 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1237 }
1238 
mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6)1239 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1240 {
1241 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1242 	struct sk_buff *skb = rq->hw_gro_data->skb;
1243 	struct udphdr *uh;
1244 
1245 	uh = (struct udphdr *)(skb->data + udp_off);
1246 	uh->len = htons(skb->len - udp_off);
1247 
1248 	if (uh->check)
1249 		uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1250 					  &ipv6->daddr, 0);
1251 
1252 	skb->csum_start = (unsigned char *)uh - skb->head;
1253 	skb->csum_offset = offsetof(struct udphdr, check);
1254 
1255 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1256 }
1257 
mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd)1258 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1259 					      struct tcphdr *skb_tcp_hd)
1260 {
1261 	u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1262 	struct tcphdr *last_tcp_hd;
1263 	void *last_hd_addr;
1264 
1265 	last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1266 	last_tcp_hd =  last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1267 	tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1268 }
1269 
mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match)1270 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1271 					     struct mlx5_cqe64 *cqe, bool match)
1272 {
1273 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1274 	struct sk_buff *skb = rq->hw_gro_data->skb;
1275 	struct tcphdr *tcp;
1276 
1277 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1278 	if (match)
1279 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1280 
1281 	tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1282 				   ipv4->daddr, 0);
1283 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1284 	if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
1285 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
1286 
1287 	skb->csum_start = (unsigned char *)tcp - skb->head;
1288 	skb->csum_offset = offsetof(struct tcphdr, check);
1289 
1290 	if (tcp->cwr)
1291 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1292 }
1293 
mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match)1294 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1295 					     struct mlx5_cqe64 *cqe, bool match)
1296 {
1297 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1298 	struct sk_buff *skb = rq->hw_gro_data->skb;
1299 	struct tcphdr *tcp;
1300 
1301 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1302 	if (match)
1303 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1304 
1305 	tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1306 				   &ipv6->daddr, 0);
1307 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1308 	skb->csum_start = (unsigned char *)tcp - skb->head;
1309 	skb->csum_offset = offsetof(struct tcphdr, check);
1310 
1311 	if (tcp->cwr)
1312 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1313 }
1314 
mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)1315 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1316 {
1317 	bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1318 	struct sk_buff *skb = rq->hw_gro_data->skb;
1319 
1320 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1321 	skb->ip_summed = CHECKSUM_PARTIAL;
1322 
1323 	if (is_ipv4) {
1324 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1325 		struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1326 		__be16 newlen = htons(skb->len - nhoff);
1327 
1328 		csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1329 		ipv4->tot_len = newlen;
1330 
1331 		if (ipv4->protocol == IPPROTO_TCP)
1332 			mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1333 		else
1334 			mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1335 	} else {
1336 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1337 		struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1338 
1339 		ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1340 
1341 		if (ipv6->nexthdr == IPPROTO_TCP)
1342 			mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1343 		else
1344 			mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1345 	}
1346 }
1347 
mlx5e_skb_set_hash(struct mlx5_cqe64 * cqe,struct sk_buff * skb)1348 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1349 				      struct sk_buff *skb)
1350 {
1351 	u8 cht = cqe->rss_hash_type;
1352 	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1353 		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1354 					    PKT_HASH_TYPE_NONE;
1355 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1356 }
1357 
is_last_ethertype_ip(struct sk_buff * skb,int * network_depth,__be16 * proto)1358 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1359 					__be16 *proto)
1360 {
1361 	*proto = ((struct ethhdr *)skb->data)->h_proto;
1362 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
1363 
1364 	if (*proto == htons(ETH_P_IP))
1365 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1366 
1367 	if (*proto == htons(ETH_P_IPV6))
1368 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1369 
1370 	return false;
1371 }
1372 
mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb)1373 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1374 {
1375 	int network_depth = 0;
1376 	__be16 proto;
1377 	void *ip;
1378 	int rc;
1379 
1380 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1381 		return;
1382 
1383 	ip = skb->data + network_depth;
1384 	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1385 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1386 
1387 	rq->stats->ecn_mark += !!rc;
1388 }
1389 
get_ip_proto(struct sk_buff * skb,int network_depth,__be16 proto)1390 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1391 {
1392 	void *ip_p = skb->data + network_depth;
1393 
1394 	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1395 					    ((struct ipv6hdr *)ip_p)->nexthdr;
1396 }
1397 
1398 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1399 
1400 #define MAX_PADDING 8
1401 
1402 static void
tail_padding_csum_slow(struct sk_buff * skb,int offset,int len,struct mlx5e_rq_stats * stats)1403 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1404 		       struct mlx5e_rq_stats *stats)
1405 {
1406 	stats->csum_complete_tail_slow++;
1407 	skb->csum = csum_block_add(skb->csum,
1408 				   skb_checksum(skb, offset, len, 0),
1409 				   offset);
1410 }
1411 
1412 static void
tail_padding_csum(struct sk_buff * skb,int offset,struct mlx5e_rq_stats * stats)1413 tail_padding_csum(struct sk_buff *skb, int offset,
1414 		  struct mlx5e_rq_stats *stats)
1415 {
1416 	u8 tail_padding[MAX_PADDING];
1417 	int len = skb->len - offset;
1418 	void *tail;
1419 
1420 	if (unlikely(len > MAX_PADDING)) {
1421 		tail_padding_csum_slow(skb, offset, len, stats);
1422 		return;
1423 	}
1424 
1425 	tail = skb_header_pointer(skb, offset, len, tail_padding);
1426 	if (unlikely(!tail)) {
1427 		tail_padding_csum_slow(skb, offset, len, stats);
1428 		return;
1429 	}
1430 
1431 	stats->csum_complete_tail++;
1432 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1433 }
1434 
1435 static void
mlx5e_skb_csum_fixup(struct sk_buff * skb,int network_depth,__be16 proto,struct mlx5e_rq_stats * stats)1436 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1437 		     struct mlx5e_rq_stats *stats)
1438 {
1439 	struct ipv6hdr *ip6;
1440 	struct iphdr   *ip4;
1441 	int pkt_len;
1442 
1443 	/* Fixup vlan headers, if any */
1444 	if (network_depth > ETH_HLEN)
1445 		/* CQE csum is calculated from the IP header and does
1446 		 * not cover VLAN headers (if present). This will add
1447 		 * the checksum manually.
1448 		 */
1449 		skb->csum = csum_partial(skb->data + ETH_HLEN,
1450 					 network_depth - ETH_HLEN,
1451 					 skb->csum);
1452 
1453 	/* Fixup tail padding, if any */
1454 	switch (proto) {
1455 	case htons(ETH_P_IP):
1456 		ip4 = (struct iphdr *)(skb->data + network_depth);
1457 		pkt_len = network_depth + ntohs(ip4->tot_len);
1458 		break;
1459 	case htons(ETH_P_IPV6):
1460 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1461 		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1462 		break;
1463 	default:
1464 		return;
1465 	}
1466 
1467 	if (likely(pkt_len >= skb->len))
1468 		return;
1469 
1470 	tail_padding_csum(skb, pkt_len, stats);
1471 }
1472 
mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro)1473 static inline void mlx5e_handle_csum(struct net_device *netdev,
1474 				     struct mlx5_cqe64 *cqe,
1475 				     struct mlx5e_rq *rq,
1476 				     struct sk_buff *skb,
1477 				     bool   lro)
1478 {
1479 	struct mlx5e_rq_stats *stats = rq->stats;
1480 	int network_depth = 0;
1481 	__be16 proto;
1482 
1483 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1484 		goto csum_none;
1485 
1486 	if (lro) {
1487 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1488 		stats->csum_unnecessary++;
1489 		return;
1490 	}
1491 
1492 	/* True when explicitly set via priv flag, or XDP prog is loaded */
1493 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1494 	    get_cqe_tls_offload(cqe))
1495 		goto csum_unnecessary;
1496 
1497 	/* CQE csum doesn't cover padding octets in short ethernet
1498 	 * frames. And the pad field is appended prior to calculating
1499 	 * and appending the FCS field.
1500 	 *
1501 	 * Detecting these padded frames requires to verify and parse
1502 	 * IP headers, so we simply force all those small frames to be
1503 	 * CHECKSUM_UNNECESSARY even if they are not padded.
1504 	 */
1505 	if (short_frame(skb->len))
1506 		goto csum_unnecessary;
1507 
1508 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1509 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1510 			goto csum_unnecessary;
1511 
1512 		stats->csum_complete++;
1513 		skb->ip_summed = CHECKSUM_COMPLETE;
1514 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1515 
1516 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1517 			return; /* CQE csum covers all received bytes */
1518 
1519 		/* csum might need some fixups ...*/
1520 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1521 		return;
1522 	}
1523 
1524 csum_unnecessary:
1525 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1526 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
1527 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 		if (cqe_is_tunneled(cqe)) {
1529 			skb->csum_level = 1;
1530 			skb->encapsulation = 1;
1531 			stats->csum_unnecessary_inner++;
1532 			return;
1533 		}
1534 		stats->csum_unnecessary++;
1535 		return;
1536 	}
1537 csum_none:
1538 	skb->ip_summed = CHECKSUM_NONE;
1539 	stats->csum_none++;
1540 }
1541 
1542 #define MLX5E_CE_BIT_MASK 0x80
1543 
mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb)1544 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1545 				      u32 cqe_bcnt,
1546 				      struct mlx5e_rq *rq,
1547 				      struct sk_buff *skb)
1548 {
1549 	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1550 	struct mlx5e_rq_stats *stats = rq->stats;
1551 	struct net_device *netdev = rq->netdev;
1552 
1553 	skb->mac_len = ETH_HLEN;
1554 
1555 	if (unlikely(get_cqe_tls_offload(cqe)))
1556 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1557 
1558 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1559 		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1560 						  be32_to_cpu(cqe->ft_metadata));
1561 
1562 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1563 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1564 
1565 	if (lro_num_seg > 1) {
1566 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1567 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1568 		/* Subtract one since we already counted this as one
1569 		 * "regular" packet in mlx5e_complete_rx_cqe()
1570 		 */
1571 		stats->packets += lro_num_seg - 1;
1572 		stats->lro_packets++;
1573 		stats->lro_bytes += cqe_bcnt;
1574 	}
1575 
1576 	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1577 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1578 								  rq->clock, get_cqe_ts(cqe));
1579 	skb_record_rx_queue(skb, rq->ix);
1580 
1581 	if (likely(netdev->features & NETIF_F_RXHASH))
1582 		mlx5e_skb_set_hash(cqe, skb);
1583 
1584 	if (cqe_has_vlan(cqe)) {
1585 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1586 				       be16_to_cpu(cqe->vlan_info));
1587 		stats->removed_vlan_packets++;
1588 	}
1589 
1590 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1591 
1592 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1593 	/* checking CE bit in cqe - MSB in ml_path field */
1594 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1595 		mlx5e_enable_ecn(rq, skb);
1596 
1597 	skb->protocol = eth_type_trans(skb, netdev);
1598 
1599 	if (unlikely(mlx5e_skb_is_multicast(skb)))
1600 		stats->mcast_packets++;
1601 }
1602 
mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1603 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1604 					 struct mlx5_cqe64 *cqe,
1605 					 u32 cqe_bcnt,
1606 					 struct sk_buff *skb)
1607 {
1608 	struct mlx5e_rq_stats *stats = rq->stats;
1609 
1610 	stats->packets++;
1611 	stats->bytes += cqe_bcnt;
1612 	if (NAPI_GRO_CB(skb)->count != 1)
1613 		return;
1614 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1615 	skb_reset_network_header(skb);
1616 	if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1617 		napi_gro_receive(rq->cq.napi, skb);
1618 		rq->hw_gro_data->skb = NULL;
1619 	}
1620 }
1621 
mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1622 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1623 					 struct mlx5_cqe64 *cqe,
1624 					 u32 cqe_bcnt,
1625 					 struct sk_buff *skb)
1626 {
1627 	struct mlx5e_rq_stats *stats = rq->stats;
1628 
1629 	stats->packets++;
1630 	stats->bytes += cqe_bcnt;
1631 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1632 }
1633 
1634 static inline
mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize)1635 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1636 				       u32 frag_size, u16 headroom,
1637 				       u32 cqe_bcnt, u32 metasize)
1638 {
1639 	struct sk_buff *skb = napi_build_skb(va, frag_size);
1640 
1641 	if (unlikely(!skb)) {
1642 		rq->stats->buff_alloc_err++;
1643 		return NULL;
1644 	}
1645 
1646 	skb_reserve(skb, headroom);
1647 	skb_put(skb, cqe_bcnt);
1648 
1649 	if (metasize)
1650 		skb_metadata_set(skb, metasize);
1651 
1652 	return skb;
1653 }
1654 
mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf)1655 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1656 			     void *va, u16 headroom, u32 frame_sz, u32 len,
1657 			     struct mlx5e_xdp_buff *mxbuf)
1658 {
1659 	xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1660 	xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1661 	mxbuf->cqe = cqe;
1662 	mxbuf->rq = rq;
1663 }
1664 
1665 static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1666 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1667 			  struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1668 {
1669 	struct mlx5e_frag_page *frag_page = wi->frag_page;
1670 	u16 rx_headroom = rq->buff.headroom;
1671 	struct bpf_prog *prog;
1672 	struct sk_buff *skb;
1673 	u32 metasize = 0;
1674 	void *va, *data;
1675 	dma_addr_t addr;
1676 	u32 frag_size;
1677 
1678 	va             = page_address(frag_page->page) + wi->offset;
1679 	data           = va + rx_headroom;
1680 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1681 
1682 	addr = page_pool_get_dma_addr(frag_page->page);
1683 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1684 				      frag_size, rq->buff.map_dir);
1685 	net_prefetch(data);
1686 
1687 	prog = rcu_dereference(rq->xdp_prog);
1688 	if (prog) {
1689 		struct mlx5e_xdp_buff mxbuf;
1690 
1691 		net_prefetchw(va); /* xdp_frame data area */
1692 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1693 				 cqe_bcnt, &mxbuf);
1694 		if (mlx5e_xdp_handle(rq, prog, &mxbuf))
1695 			return NULL; /* page/packet was consumed by XDP */
1696 
1697 		rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
1698 		metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
1699 		cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
1700 	}
1701 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1702 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1703 	if (unlikely(!skb))
1704 		return NULL;
1705 
1706 	/* queue up for recycling/reuse */
1707 	skb_mark_for_recycle(skb);
1708 	frag_page->frags++;
1709 
1710 	return skb;
1711 }
1712 
1713 static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1714 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1715 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1716 {
1717 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1718 	struct mlx5e_wqe_frag_info *head_wi = wi;
1719 	u16 rx_headroom = rq->buff.headroom;
1720 	struct mlx5e_frag_page *frag_page;
1721 	struct skb_shared_info *sinfo;
1722 	struct mlx5e_xdp_buff mxbuf;
1723 	u32 frag_consumed_bytes;
1724 	struct bpf_prog *prog;
1725 	struct sk_buff *skb;
1726 	dma_addr_t addr;
1727 	u32 truesize;
1728 	void *va;
1729 
1730 	frag_page = wi->frag_page;
1731 
1732 	va = page_address(frag_page->page) + wi->offset;
1733 	frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1734 
1735 	addr = page_pool_get_dma_addr(frag_page->page);
1736 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1737 				      rq->buff.frame0_sz, rq->buff.map_dir);
1738 	net_prefetchw(va); /* xdp_frame data area */
1739 	net_prefetch(va + rx_headroom);
1740 
1741 	mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1742 			 frag_consumed_bytes, &mxbuf);
1743 	sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
1744 	truesize = 0;
1745 
1746 	cqe_bcnt -= frag_consumed_bytes;
1747 	frag_info++;
1748 	wi++;
1749 
1750 	while (cqe_bcnt) {
1751 		frag_page = wi->frag_page;
1752 
1753 		frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1754 
1755 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
1756 					       wi->offset, frag_consumed_bytes);
1757 		truesize += frag_info->frag_stride;
1758 
1759 		cqe_bcnt -= frag_consumed_bytes;
1760 		frag_info++;
1761 		wi++;
1762 	}
1763 
1764 	prog = rcu_dereference(rq->xdp_prog);
1765 	if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
1766 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1767 			struct mlx5e_wqe_frag_info *pwi;
1768 
1769 			for (pwi = head_wi; pwi < wi; pwi++)
1770 				pwi->frag_page->frags++;
1771 		}
1772 		return NULL; /* page/packet was consumed by XDP */
1773 	}
1774 
1775 	skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
1776 				     mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
1777 				     mxbuf.xdp.data_end - mxbuf.xdp.data,
1778 				     mxbuf.xdp.data - mxbuf.xdp.data_meta);
1779 	if (unlikely(!skb))
1780 		return NULL;
1781 
1782 	skb_mark_for_recycle(skb);
1783 	head_wi->frag_page->frags++;
1784 
1785 	if (xdp_buff_has_frags(&mxbuf.xdp)) {
1786 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
1787 		xdp_update_skb_shared_info(skb, wi - head_wi - 1,
1788 					   sinfo->xdp_frags_size, truesize,
1789 					   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
1790 
1791 		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1792 			pwi->frag_page->frags++;
1793 	}
1794 
1795 	return skb;
1796 }
1797 
trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1798 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1799 {
1800 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1801 	struct mlx5e_priv *priv = rq->priv;
1802 
1803 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1804 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1805 		mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1806 		queue_work(priv->wq, &rq->recover_work);
1807 	}
1808 }
1809 
mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1810 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1811 {
1812 	trigger_report(rq, cqe);
1813 	rq->stats->wqe_err++;
1814 }
1815 
mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1816 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1817 {
1818 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1819 	struct mlx5e_wqe_frag_info *wi;
1820 	struct sk_buff *skb;
1821 	u32 cqe_bcnt;
1822 	u16 ci;
1823 
1824 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1825 	wi       = get_frag(rq, ci);
1826 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1827 
1828 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1829 		mlx5e_handle_rx_err_cqe(rq, cqe);
1830 		goto wq_cyc_pop;
1831 	}
1832 
1833 	skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1834 			      mlx5e_skb_from_cqe_linear,
1835 			      mlx5e_skb_from_cqe_nonlinear,
1836 			      mlx5e_xsk_skb_from_cqe_linear,
1837 			      rq, wi, cqe, cqe_bcnt);
1838 	if (!skb) {
1839 		/* probably for XDP */
1840 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1841 			wi->frag_page->frags++;
1842 		goto wq_cyc_pop;
1843 	}
1844 
1845 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1846 
1847 	if (mlx5e_cqe_regb_chain(cqe))
1848 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1849 			dev_kfree_skb_any(skb);
1850 			goto wq_cyc_pop;
1851 		}
1852 
1853 	napi_gro_receive(rq->cq.napi, skb);
1854 
1855 wq_cyc_pop:
1856 	mlx5_wq_cyc_pop(wq);
1857 }
1858 
1859 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1860 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1861 {
1862 	struct net_device *netdev = rq->netdev;
1863 	struct mlx5e_priv *priv = netdev_priv(netdev);
1864 	struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1865 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1866 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1867 	struct mlx5e_wqe_frag_info *wi;
1868 	struct sk_buff *skb;
1869 	u32 cqe_bcnt;
1870 	u16 ci;
1871 
1872 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1873 	wi       = get_frag(rq, ci);
1874 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1875 
1876 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1877 		mlx5e_handle_rx_err_cqe(rq, cqe);
1878 		goto wq_cyc_pop;
1879 	}
1880 
1881 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1882 			      mlx5e_skb_from_cqe_linear,
1883 			      mlx5e_skb_from_cqe_nonlinear,
1884 			      rq, wi, cqe, cqe_bcnt);
1885 	if (!skb) {
1886 		/* probably for XDP */
1887 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1888 			wi->frag_page->frags++;
1889 		goto wq_cyc_pop;
1890 	}
1891 
1892 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1893 
1894 	if (rep->vlan && skb_vlan_tag_present(skb))
1895 		skb_vlan_pop(skb);
1896 
1897 	mlx5e_rep_tc_receive(cqe, rq, skb);
1898 
1899 wq_cyc_pop:
1900 	mlx5_wq_cyc_pop(wq);
1901 }
1902 
mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1903 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1904 {
1905 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1906 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1907 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1908 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1909 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1910 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1911 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1912 	struct mlx5e_rx_wqe_ll *wqe;
1913 	struct mlx5_wq_ll *wq;
1914 	struct sk_buff *skb;
1915 	u16 cqe_bcnt;
1916 
1917 	wi->consumed_strides += cstrides;
1918 
1919 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1920 		mlx5e_handle_rx_err_cqe(rq, cqe);
1921 		goto mpwrq_cqe_out;
1922 	}
1923 
1924 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1925 		struct mlx5e_rq_stats *stats = rq->stats;
1926 
1927 		stats->mpwqe_filler_cqes++;
1928 		stats->mpwqe_filler_strides += cstrides;
1929 		goto mpwrq_cqe_out;
1930 	}
1931 
1932 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1933 
1934 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1935 			      mlx5e_skb_from_cqe_mpwrq_linear,
1936 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
1937 			      rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1938 	if (!skb)
1939 		goto mpwrq_cqe_out;
1940 
1941 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1942 
1943 	mlx5e_rep_tc_receive(cqe, rq, skb);
1944 
1945 mpwrq_cqe_out:
1946 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1947 		return;
1948 
1949 	wq  = &rq->mpwqe.wq;
1950 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1951 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1952 }
1953 
1954 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1955 	.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1956 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1957 };
1958 #endif
1959 
1960 static void
mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset)1961 mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1962 			   struct mlx5e_frag_page *frag_page,
1963 			   u32 data_bcnt, u32 data_offset)
1964 {
1965 	net_prefetchw(skb->data);
1966 
1967 	do {
1968 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1969 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
1970 		unsigned int truesize = pg_consumed_bytes;
1971 
1972 		mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
1973 				   pg_consumed_bytes, truesize);
1974 
1975 		data_bcnt -= pg_consumed_bytes;
1976 		data_offset = 0;
1977 		frag_page++;
1978 	} while (data_bcnt);
1979 }
1980 
1981 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)1982 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1983 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1984 				   u32 page_idx)
1985 {
1986 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
1987 	u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1988 	struct mlx5e_frag_page *head_page = frag_page;
1989 	u32 frag_offset    = head_offset;
1990 	u32 byte_cnt       = cqe_bcnt;
1991 	struct skb_shared_info *sinfo;
1992 	struct mlx5e_xdp_buff mxbuf;
1993 	unsigned int truesize = 0;
1994 	struct bpf_prog *prog;
1995 	struct sk_buff *skb;
1996 	u32 linear_frame_sz;
1997 	u16 linear_data_len;
1998 	u16 linear_hr;
1999 	void *va;
2000 
2001 	prog = rcu_dereference(rq->xdp_prog);
2002 
2003 	if (prog) {
2004 		/* area for bpf_xdp_[store|load]_bytes */
2005 		net_prefetchw(page_address(frag_page->page) + frag_offset);
2006 		if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
2007 			rq->stats->buff_alloc_err++;
2008 			return NULL;
2009 		}
2010 		va = page_address(wi->linear_page.page);
2011 		net_prefetchw(va); /* xdp_frame data area */
2012 		linear_hr = XDP_PACKET_HEADROOM;
2013 		linear_data_len = 0;
2014 		linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
2015 	} else {
2016 		skb = napi_alloc_skb(rq->cq.napi,
2017 				     ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
2018 		if (unlikely(!skb)) {
2019 			rq->stats->buff_alloc_err++;
2020 			return NULL;
2021 		}
2022 		skb_mark_for_recycle(skb);
2023 		va = skb->head;
2024 		net_prefetchw(va); /* xdp_frame data area */
2025 		net_prefetchw(skb->data);
2026 
2027 		frag_offset += headlen;
2028 		byte_cnt -= headlen;
2029 		linear_hr = skb_headroom(skb);
2030 		linear_data_len = headlen;
2031 		linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
2032 		if (unlikely(frag_offset >= PAGE_SIZE)) {
2033 			frag_page++;
2034 			frag_offset -= PAGE_SIZE;
2035 		}
2036 	}
2037 
2038 	mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
2039 
2040 	sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
2041 
2042 	while (byte_cnt) {
2043 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2044 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2045 
2046 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2047 			truesize += pg_consumed_bytes;
2048 		else
2049 			truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2050 
2051 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
2052 					       pg_consumed_bytes);
2053 		byte_cnt -= pg_consumed_bytes;
2054 		frag_offset = 0;
2055 		frag_page++;
2056 	}
2057 
2058 	if (prog) {
2059 		if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2060 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2061 				struct mlx5e_frag_page *pfp;
2062 
2063 				for (pfp = head_page; pfp < frag_page; pfp++)
2064 					pfp->frags++;
2065 
2066 				wi->linear_page.frags++;
2067 			}
2068 			mlx5e_page_release_fragmented(rq, &wi->linear_page);
2069 			return NULL; /* page/packet was consumed by XDP */
2070 		}
2071 
2072 		skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
2073 					     linear_frame_sz,
2074 					     mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
2075 					     mxbuf.xdp.data - mxbuf.xdp.data_meta);
2076 		if (unlikely(!skb)) {
2077 			mlx5e_page_release_fragmented(rq, &wi->linear_page);
2078 			return NULL;
2079 		}
2080 
2081 		skb_mark_for_recycle(skb);
2082 		wi->linear_page.frags++;
2083 		mlx5e_page_release_fragmented(rq, &wi->linear_page);
2084 
2085 		if (xdp_buff_has_frags(&mxbuf.xdp)) {
2086 			struct mlx5e_frag_page *pagep;
2087 
2088 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
2089 			xdp_update_skb_shared_info(skb, frag_page - head_page,
2090 						   sinfo->xdp_frags_size, truesize,
2091 						   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2092 
2093 			pagep = head_page;
2094 			do
2095 				pagep->frags++;
2096 			while (++pagep < frag_page);
2097 		}
2098 		__pskb_pull_tail(skb, headlen);
2099 	} else {
2100 		dma_addr_t addr;
2101 
2102 		if (xdp_buff_has_frags(&mxbuf.xdp)) {
2103 			struct mlx5e_frag_page *pagep;
2104 
2105 			xdp_update_skb_shared_info(skb, sinfo->nr_frags,
2106 						   sinfo->xdp_frags_size, truesize,
2107 						   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2108 
2109 			pagep = frag_page - sinfo->nr_frags;
2110 			do
2111 				pagep->frags++;
2112 			while (++pagep < frag_page);
2113 		}
2114 		/* copy header */
2115 		addr = page_pool_get_dma_addr(head_page->page);
2116 		mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
2117 				      head_offset, head_offset, headlen);
2118 		/* skb linear part was allocated with headlen and aligned to long */
2119 		skb->tail += headlen;
2120 		skb->len  += headlen;
2121 	}
2122 
2123 	return skb;
2124 }
2125 
2126 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2127 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2128 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2129 				u32 page_idx)
2130 {
2131 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2132 	u16 rx_headroom = rq->buff.headroom;
2133 	struct bpf_prog *prog;
2134 	struct sk_buff *skb;
2135 	u32 metasize = 0;
2136 	void *va, *data;
2137 	dma_addr_t addr;
2138 	u32 frag_size;
2139 
2140 	/* Check packet size. Note LRO doesn't use linear SKB */
2141 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2142 		rq->stats->oversize_pkts_sw_drop++;
2143 		return NULL;
2144 	}
2145 
2146 	va             = page_address(frag_page->page) + head_offset;
2147 	data           = va + rx_headroom;
2148 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2149 
2150 	addr = page_pool_get_dma_addr(frag_page->page);
2151 	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2152 				      frag_size, rq->buff.map_dir);
2153 	net_prefetch(data);
2154 
2155 	prog = rcu_dereference(rq->xdp_prog);
2156 	if (prog) {
2157 		struct mlx5e_xdp_buff mxbuf;
2158 
2159 		net_prefetchw(va); /* xdp_frame data area */
2160 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2161 				 cqe_bcnt, &mxbuf);
2162 		if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2163 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2164 				frag_page->frags++;
2165 			return NULL; /* page/packet was consumed by XDP */
2166 		}
2167 
2168 		rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
2169 		metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
2170 		cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
2171 	}
2172 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2173 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2174 	if (unlikely(!skb))
2175 		return NULL;
2176 
2177 	/* queue up for recycling/reuse */
2178 	skb_mark_for_recycle(skb);
2179 	frag_page->frags++;
2180 
2181 	return skb;
2182 }
2183 
2184 static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index)2185 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2186 			  struct mlx5_cqe64 *cqe, u16 header_index)
2187 {
2188 	struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
2189 	u16 head_offset = head->addr & (PAGE_SIZE - 1);
2190 	u16 head_size = cqe->shampo.header_size;
2191 	u16 rx_headroom = rq->buff.headroom;
2192 	struct sk_buff *skb = NULL;
2193 	void *hdr, *data;
2194 	u32 frag_size;
2195 
2196 	hdr		= page_address(head->frag_page->page) + head_offset;
2197 	data		= hdr + rx_headroom;
2198 	frag_size	= MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
2199 
2200 	if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
2201 		/* build SKB around header */
2202 		dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
2203 		net_prefetchw(hdr);
2204 		net_prefetch(data);
2205 		skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2206 
2207 		if (unlikely(!skb))
2208 			return NULL;
2209 
2210 		head->frag_page->frags++;
2211 	} else {
2212 		/* allocate SKB and copy header for large header */
2213 		rq->stats->gro_large_hds++;
2214 		skb = napi_alloc_skb(rq->cq.napi,
2215 				     ALIGN(head_size, sizeof(long)));
2216 		if (unlikely(!skb)) {
2217 			rq->stats->buff_alloc_err++;
2218 			return NULL;
2219 		}
2220 
2221 		net_prefetchw(skb->data);
2222 		mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
2223 				      head_offset + rx_headroom,
2224 				      rx_headroom, head_size);
2225 		/* skb linear part was allocated with headlen and aligned to long */
2226 		skb->tail += head_size;
2227 		skb->len  += head_size;
2228 	}
2229 
2230 	/* queue up for recycling/reuse */
2231 	skb_mark_for_recycle(skb);
2232 
2233 	return skb;
2234 }
2235 
2236 static void
mlx5e_shampo_align_fragment(struct sk_buff * skb,u8 log_stride_sz)2237 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2238 {
2239 	skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2240 	unsigned int frag_size = skb_frag_size(last_frag);
2241 	unsigned int frag_truesize;
2242 
2243 	frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2244 	skb->truesize += frag_truesize - frag_size;
2245 }
2246 
2247 static void
mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)2248 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2249 {
2250 	struct sk_buff *skb = rq->hw_gro_data->skb;
2251 	struct mlx5e_rq_stats *stats = rq->stats;
2252 	u16 gro_count = NAPI_GRO_CB(skb)->count;
2253 
2254 	if (likely(skb_shinfo(skb)->nr_frags))
2255 		mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2256 	if (gro_count > 1) {
2257 		stats->gro_skbs++;
2258 		stats->gro_packets += gro_count;
2259 		stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
2260 
2261 		mlx5e_shampo_update_hdr(rq, cqe, match);
2262 	} else {
2263 		skb_shinfo(skb)->gso_size = 0;
2264 	}
2265 	napi_gro_receive(rq->cq.napi, skb);
2266 	rq->hw_gro_data->skb = NULL;
2267 }
2268 
2269 static bool
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff * skb,u16 data_bcnt)2270 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2271 {
2272 	int nr_frags = skb_shinfo(skb)->nr_frags;
2273 
2274 	return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2275 }
2276 
mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2277 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2278 {
2279 	u16 data_bcnt		= mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2280 	u16 header_index	= mlx5e_shampo_get_cqe_header_index(rq, cqe);
2281 	u32 wqe_offset		= be32_to_cpu(cqe->shampo.data_offset);
2282 	u16 cstrides		= mpwrq_get_cqe_consumed_strides(cqe);
2283 	u32 data_offset		= wqe_offset & (PAGE_SIZE - 1);
2284 	u32 cqe_bcnt		= mpwrq_get_cqe_byte_cnt(cqe);
2285 	u16 wqe_id		= be16_to_cpu(cqe->wqe_id);
2286 	u32 page_idx		= wqe_offset >> PAGE_SHIFT;
2287 	u16 head_size		= cqe->shampo.header_size;
2288 	struct sk_buff **skb	= &rq->hw_gro_data->skb;
2289 	bool flush		= cqe->shampo.flush;
2290 	bool match		= cqe->shampo.match;
2291 	struct mlx5e_rq_stats *stats = rq->stats;
2292 	struct mlx5e_rx_wqe_ll *wqe;
2293 	struct mlx5e_mpw_info *wi;
2294 	struct mlx5_wq_ll *wq;
2295 
2296 	wi = mlx5e_get_mpw_info(rq, wqe_id);
2297 	wi->consumed_strides += cstrides;
2298 
2299 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2300 		mlx5e_handle_rx_err_cqe(rq, cqe);
2301 		goto mpwrq_cqe_out;
2302 	}
2303 
2304 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2305 		stats->mpwqe_filler_cqes++;
2306 		stats->mpwqe_filler_strides += cstrides;
2307 		goto mpwrq_cqe_out;
2308 	}
2309 
2310 	if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2311 		match = false;
2312 		mlx5e_shampo_flush_skb(rq, cqe, match);
2313 	}
2314 
2315 	if (!*skb) {
2316 		if (likely(head_size))
2317 			*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2318 		else
2319 			*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
2320 								  data_offset, page_idx);
2321 		if (unlikely(!*skb))
2322 			goto free_hd_entry;
2323 
2324 		NAPI_GRO_CB(*skb)->count = 1;
2325 		skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2326 	} else {
2327 		NAPI_GRO_CB(*skb)->count++;
2328 		if (NAPI_GRO_CB(*skb)->count == 2 &&
2329 		    rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2330 			void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2331 			int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2332 				    sizeof(struct iphdr);
2333 			struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
2334 
2335 			rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2336 		}
2337 	}
2338 
2339 	if (likely(head_size)) {
2340 		if (data_bcnt) {
2341 			struct mlx5e_frag_page *frag_page;
2342 
2343 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2344 			mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2345 		} else {
2346 			stats->hds_nodata_packets++;
2347 			stats->hds_nodata_bytes += head_size;
2348 		}
2349 	} else {
2350 		stats->hds_nosplit_packets++;
2351 		stats->hds_nosplit_bytes += data_bcnt;
2352 	}
2353 
2354 	mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
2355 	if (flush && rq->hw_gro_data->skb)
2356 		mlx5e_shampo_flush_skb(rq, cqe, match);
2357 free_hd_entry:
2358 	if (likely(head_size))
2359 		mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2360 mpwrq_cqe_out:
2361 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2362 		return;
2363 
2364 	if (unlikely(!cstrides))
2365 		return;
2366 
2367 	wq  = &rq->mpwqe.wq;
2368 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2369 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2370 }
2371 
mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2372 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2373 {
2374 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2375 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2376 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2377 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2378 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2379 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2380 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2381 	struct mlx5e_rx_wqe_ll *wqe;
2382 	struct mlx5_wq_ll *wq;
2383 	struct sk_buff *skb;
2384 	u16 cqe_bcnt;
2385 
2386 	wi->consumed_strides += cstrides;
2387 
2388 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2389 		mlx5e_handle_rx_err_cqe(rq, cqe);
2390 		goto mpwrq_cqe_out;
2391 	}
2392 
2393 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2394 		struct mlx5e_rq_stats *stats = rq->stats;
2395 
2396 		stats->mpwqe_filler_cqes++;
2397 		stats->mpwqe_filler_strides += cstrides;
2398 		goto mpwrq_cqe_out;
2399 	}
2400 
2401 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2402 
2403 	skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2404 			      mlx5e_skb_from_cqe_mpwrq_linear,
2405 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
2406 			      mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2407 			      rq, wi, cqe, cqe_bcnt, head_offset,
2408 			      page_idx);
2409 	if (!skb)
2410 		goto mpwrq_cqe_out;
2411 
2412 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2413 
2414 	if (mlx5e_cqe_regb_chain(cqe))
2415 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2416 			dev_kfree_skb_any(skb);
2417 			goto mpwrq_cqe_out;
2418 		}
2419 
2420 	napi_gro_receive(rq->cq.napi, skb);
2421 
2422 mpwrq_cqe_out:
2423 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2424 		return;
2425 
2426 	wq  = &rq->mpwqe.wq;
2427 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2428 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2429 }
2430 
mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2431 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2432 						 struct mlx5_cqwq *cqwq,
2433 						 int budget_rem)
2434 {
2435 	struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2436 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
2437 	int work_done = 0;
2438 
2439 	cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
2440 	if (!cqe)
2441 		return work_done;
2442 
2443 	if (cqd->last_cqe_title &&
2444 	    (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2445 		rq->stats->cqe_compress_blks++;
2446 		cqd->last_cqe_title = false;
2447 	}
2448 
2449 	do {
2450 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2451 			if (title_cqe) {
2452 				mlx5e_read_enhanced_title_slot(rq, title_cqe);
2453 				title_cqe = NULL;
2454 				rq->stats->cqe_compress_blks++;
2455 			}
2456 			work_done +=
2457 				mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2458 							      budget_rem - work_done);
2459 			continue;
2460 		}
2461 		title_cqe = cqe;
2462 		mlx5_cqwq_pop(cqwq);
2463 
2464 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2465 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2466 				rq, cqe);
2467 		work_done++;
2468 	} while (work_done < budget_rem &&
2469 		 (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
2470 
2471 	/* last cqe might be title on next poll bulk */
2472 	if (title_cqe) {
2473 		mlx5e_read_enhanced_title_slot(rq, title_cqe);
2474 		cqd->last_cqe_title = true;
2475 	}
2476 
2477 	return work_done;
2478 }
2479 
mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2480 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2481 					      struct mlx5_cqwq *cqwq,
2482 					      int budget_rem)
2483 {
2484 	struct mlx5_cqe64 *cqe;
2485 	int work_done = 0;
2486 
2487 	if (rq->cqd.left)
2488 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2489 
2490 	while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2491 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2492 			work_done +=
2493 				mlx5e_decompress_cqes_start(rq, cqwq,
2494 							    budget_rem - work_done);
2495 			continue;
2496 		}
2497 
2498 		mlx5_cqwq_pop(cqwq);
2499 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2500 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2501 				rq, cqe);
2502 		work_done++;
2503 	}
2504 
2505 	return work_done;
2506 }
2507 
mlx5e_poll_rx_cq(struct mlx5e_cq * cq,int budget)2508 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2509 {
2510 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2511 	struct mlx5_cqwq *cqwq = &cq->wq;
2512 	int work_done;
2513 
2514 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2515 		return 0;
2516 
2517 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2518 		work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2519 								  budget);
2520 	else
2521 		work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2522 							       budget);
2523 
2524 	if (work_done == 0)
2525 		return 0;
2526 
2527 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2528 		mlx5e_shampo_flush_skb(rq, NULL, false);
2529 
2530 	if (rcu_access_pointer(rq->xdp_prog))
2531 		mlx5e_xdp_rx_poll_complete(rq);
2532 
2533 	mlx5_cqwq_update_db_record(cqwq);
2534 
2535 	/* ensure cq space is freed before enabling more cqes */
2536 	wmb();
2537 
2538 	return work_done;
2539 }
2540 
2541 #ifdef CONFIG_MLX5_CORE_IPOIB
2542 
2543 #define MLX5_IB_GRH_SGID_OFFSET 8
2544 #define MLX5_IB_GRH_DGID_OFFSET 24
2545 #define MLX5_GID_SIZE           16
2546 
mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)2547 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2548 					 struct mlx5_cqe64 *cqe,
2549 					 u32 cqe_bcnt,
2550 					 struct sk_buff *skb)
2551 {
2552 	struct hwtstamp_config *tstamp;
2553 	struct mlx5e_rq_stats *stats;
2554 	struct net_device *netdev;
2555 	struct mlx5e_priv *priv;
2556 	char *pseudo_header;
2557 	u32 flags_rqpn;
2558 	u32 qpn;
2559 	u8 *dgid;
2560 	u8 g;
2561 
2562 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2563 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2564 
2565 	/* No mapping present, cannot process SKB. This might happen if a child
2566 	 * interface is going down while having unprocessed CQEs on parent RQ
2567 	 */
2568 	if (unlikely(!netdev)) {
2569 		/* TODO: add drop counters support */
2570 		skb->dev = NULL;
2571 		pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2572 		return;
2573 	}
2574 
2575 	priv = mlx5i_epriv(netdev);
2576 	tstamp = &priv->tstamp;
2577 	stats = &priv->channel_stats[rq->ix]->rq;
2578 
2579 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2580 	g = (flags_rqpn >> 28) & 3;
2581 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2582 	if ((!g) || dgid[0] != 0xff)
2583 		skb->pkt_type = PACKET_HOST;
2584 	else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2585 		skb->pkt_type = PACKET_BROADCAST;
2586 	else
2587 		skb->pkt_type = PACKET_MULTICAST;
2588 
2589 	/* Drop packets that this interface sent, ie multicast packets
2590 	 * that the HCA has replicated.
2591 	 */
2592 	if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2593 	    (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2594 		    MLX5_GID_SIZE) == 0)) {
2595 		skb->dev = NULL;
2596 		return;
2597 	}
2598 
2599 	skb_pull(skb, MLX5_IB_GRH_BYTES);
2600 
2601 	skb->protocol = *((__be16 *)(skb->data));
2602 
2603 	if (netdev->features & NETIF_F_RXCSUM) {
2604 		skb->ip_summed = CHECKSUM_COMPLETE;
2605 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2606 		stats->csum_complete++;
2607 	} else {
2608 		skb->ip_summed = CHECKSUM_NONE;
2609 		stats->csum_none++;
2610 	}
2611 
2612 	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
2613 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2614 								  rq->clock, get_cqe_ts(cqe));
2615 	skb_record_rx_queue(skb, rq->ix);
2616 
2617 	if (likely(netdev->features & NETIF_F_RXHASH))
2618 		mlx5e_skb_set_hash(cqe, skb);
2619 
2620 	/* 20 bytes of ipoib header and 4 for encap existing */
2621 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2622 	memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2623 	skb_reset_mac_header(skb);
2624 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2625 
2626 	skb->dev = netdev;
2627 
2628 	stats->packets++;
2629 	stats->bytes += cqe_bcnt;
2630 }
2631 
mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2632 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2633 {
2634 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2635 	struct mlx5e_wqe_frag_info *wi;
2636 	struct sk_buff *skb;
2637 	u32 cqe_bcnt;
2638 	u16 ci;
2639 
2640 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2641 	wi       = get_frag(rq, ci);
2642 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2643 
2644 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2645 		rq->stats->wqe_err++;
2646 		goto wq_cyc_pop;
2647 	}
2648 
2649 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2650 			      mlx5e_skb_from_cqe_linear,
2651 			      mlx5e_skb_from_cqe_nonlinear,
2652 			      rq, wi, cqe, cqe_bcnt);
2653 	if (!skb)
2654 		goto wq_cyc_pop;
2655 
2656 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2657 	if (unlikely(!skb->dev)) {
2658 		dev_kfree_skb_any(skb);
2659 		goto wq_cyc_pop;
2660 	}
2661 	napi_gro_receive(rq->cq.napi, skb);
2662 
2663 wq_cyc_pop:
2664 	mlx5_wq_cyc_pop(wq);
2665 }
2666 
2667 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2668 	.handle_rx_cqe       = mlx5i_handle_rx_cqe,
2669 	.handle_rx_cqe_mpwqe = NULL, /* Not supported */
2670 };
2671 #endif /* CONFIG_MLX5_CORE_IPOIB */
2672 
mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk)2673 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2674 {
2675 	struct net_device *netdev = rq->netdev;
2676 	struct mlx5_core_dev *mdev = rq->mdev;
2677 	struct mlx5e_priv *priv = rq->priv;
2678 
2679 	switch (rq->wq_type) {
2680 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2681 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2682 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2683 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2684 				mlx5e_skb_from_cqe_mpwrq_linear :
2685 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
2686 		rq->post_wqes = mlx5e_post_rx_mpwqes;
2687 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2688 
2689 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2690 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2691 			if (!rq->handle_rx_cqe) {
2692 				netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2693 				return -EINVAL;
2694 			}
2695 		} else {
2696 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2697 			if (!rq->handle_rx_cqe) {
2698 				netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2699 				return -EINVAL;
2700 			}
2701 		}
2702 
2703 		break;
2704 	default: /* MLX5_WQ_TYPE_CYCLIC */
2705 		rq->wqe.skb_from_cqe = xsk ?
2706 			mlx5e_xsk_skb_from_cqe_linear :
2707 			mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2708 				mlx5e_skb_from_cqe_linear :
2709 				mlx5e_skb_from_cqe_nonlinear;
2710 		rq->post_wqes = mlx5e_post_rx_wqes;
2711 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2712 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2713 		if (!rq->handle_rx_cqe) {
2714 			netdev_err(netdev, "RX handler of RQ is not set\n");
2715 			return -EINVAL;
2716 		}
2717 	}
2718 
2719 	return 0;
2720 }
2721 
mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2722 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2723 {
2724 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2725 	struct mlx5e_wqe_frag_info *wi;
2726 	struct sk_buff *skb;
2727 	u32 cqe_bcnt;
2728 	u16 trap_id;
2729 	u16 ci;
2730 
2731 	trap_id  = get_cqe_flow_tag(cqe);
2732 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2733 	wi       = get_frag(rq, ci);
2734 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2735 
2736 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2737 		rq->stats->wqe_err++;
2738 		goto wq_cyc_pop;
2739 	}
2740 
2741 	skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2742 	if (!skb)
2743 		goto wq_cyc_pop;
2744 
2745 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2746 	skb_push(skb, ETH_HLEN);
2747 
2748 	mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2749 				 rq->netdev->devlink_port);
2750 	dev_kfree_skb_any(skb);
2751 
2752 wq_cyc_pop:
2753 	mlx5_wq_cyc_pop(wq);
2754 }
2755 
mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params)2756 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2757 {
2758 	rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2759 			       mlx5e_skb_from_cqe_linear :
2760 			       mlx5e_skb_from_cqe_nonlinear;
2761 	rq->post_wqes = mlx5e_post_rx_wqes;
2762 	rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2763 	rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2764 }
2765