1 /*-
2  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <dev/mlx5/mlx5_en/en.h>
32 
33 #include <linux/list.h>
34 #include <dev/mlx5/fs.h>
35 #include <dev/mlx5/mpfs.h>
36 #include <dev/mlx5/mlx5_core/fs_tcp.h>
37 
38 /*
39  * The flow tables with rules define the packet processing on receive.
40  * Currently the following structure is set up to handle different
41  * offloads like TLS RX offload, VLAN decapsulation, packet
42  * classification, RSS hashing, VxLAN checksum offloading:
43  *
44  *   +=========+       +=========+      +=================+
45  *   |TCP/IPv4 |       |TCP/IPv4 |      |TCP/IPv4 Match   |
46  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
47  *   |         |       |Catch-all|\     |                 |
48  *   +=========+       +=========+|     +=================+
49  *                                |
50  *       +------------------------+
51  *       V
52  *   +=========+       +=========+      +=================+
53  *   |TCP/IPv6 |       |TCP/IPv6 |      |TCP/IPv6 Match   |
54  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
55  *   |         |       |Catch-all|\     |                 |
56  *   +=========+       +=========+|     +=================+
57  *                                |
58  *       +------------------------+
59  *       V
60  *   +=========+       +=========+      +=================+
61  *   |VLAN ft: |       |VxLAN    |      |VxLAN Main       |
62  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
63  *   |VID/noVID|/      |Catch-all|\     |                 |
64  *   +=========+       +=========+|     +=================+
65  *                                |
66  *                                |
67  *                                |
68  *                                v
69  *                      +=================+
70  *                      |Main             |
71  *                      |Outer Proto Match|=====> TIR n
72  *                      |                 |
73  *                      +=================+
74  *
75  * The path through flow rules directs each packet into an appropriate TIR,
76  * according to the:
77  * - VLAN encapsulation
78  * - Outer protocol
79  * - Presence of inner protocol
80  */
81 
82 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
83 
84 enum {
85 	MLX5E_FULLMATCH = 0,
86 	MLX5E_ALLMULTI = 1,
87 	MLX5E_PROMISC = 2,
88 };
89 
90 enum {
91 	MLX5E_UC = 0,
92 	MLX5E_MC_IPV4 = 1,
93 	MLX5E_MC_IPV6 = 2,
94 	MLX5E_MC_OTHER = 3,
95 };
96 
97 enum {
98 	MLX5E_ACTION_NONE = 0,
99 	MLX5E_ACTION_ADD = 1,
100 	MLX5E_ACTION_DEL = 2,
101 };
102 
103 struct mlx5e_eth_addr_hash_node {
104 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
105 	u8	action;
106 	u32	mpfs_index;
107 	struct mlx5e_eth_addr_info ai;
108 };
109 
110 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
111 
112 static inline int
113 mlx5e_hash_eth_addr(const u8 * addr)
114 {
115 	return (addr[5]);
116 }
117 
118 static bool
119 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
120     struct mlx5e_eth_addr_hash_node *hn_new)
121 {
122 	struct mlx5e_eth_addr_hash_node *hn;
123 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
124 
125 	LIST_FOREACH(hn, &hash[ix], hlist) {
126 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
127 			if (hn->action == MLX5E_ACTION_DEL)
128 				hn->action = MLX5E_ACTION_NONE;
129 			free(hn_new, M_MLX5EN);
130 			return (false);
131 		}
132 	}
133 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
134 	return (true);
135 }
136 
137 static void
138 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
139 {
140 	LIST_REMOVE(hn, hlist);
141 	free(hn, M_MLX5EN);
142 }
143 
144 static void
145 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
146     struct mlx5e_eth_addr_info *ai)
147 {
148 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
149 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
150 
151 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
152 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
153 
154 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
155 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
156 
157 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
158 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
159 
160 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
161 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
162 
163 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
164 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
165 
166 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
167 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
168 
169 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
170 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
171 
172 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
173 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
174 
175 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
176 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
177 
178 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
179 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
180 
181 	/* ensure the rules are not freed again */
182 	ai->tt_vec = 0;
183 }
184 
185 static int
186 mlx5e_get_eth_addr_type(const u8 * addr)
187 {
188 	if (ETHER_IS_MULTICAST(addr) == 0)
189 		return (MLX5E_UC);
190 
191 	if ((addr[0] == 0x01) &&
192 	    (addr[1] == 0x00) &&
193 	    (addr[2] == 0x5e) &&
194 	    !(addr[3] & 0x80))
195 		return (MLX5E_MC_IPV4);
196 
197 	if ((addr[0] == 0x33) &&
198 	    (addr[1] == 0x33))
199 		return (MLX5E_MC_IPV6);
200 
201 	return (MLX5E_MC_OTHER);
202 }
203 
204 static	u32
205 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
206 {
207 	int eth_addr_type;
208 	u32 ret;
209 
210 	switch (type) {
211 	case MLX5E_FULLMATCH:
212 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
213 		switch (eth_addr_type) {
214 		case MLX5E_UC:
215 			ret =
216 			    (1 << MLX5E_TT_IPV4_TCP) |
217 			    (1 << MLX5E_TT_IPV6_TCP) |
218 			    (1 << MLX5E_TT_IPV4_UDP) |
219 			    (1 << MLX5E_TT_IPV6_UDP) |
220 			    (1 << MLX5E_TT_IPV4) |
221 			    (1 << MLX5E_TT_IPV6) |
222 			    (1 << MLX5E_TT_ANY) |
223 			    0;
224 			break;
225 
226 		case MLX5E_MC_IPV4:
227 			ret =
228 			    (1 << MLX5E_TT_IPV4_UDP) |
229 			    (1 << MLX5E_TT_IPV4) |
230 			    0;
231 			break;
232 
233 		case MLX5E_MC_IPV6:
234 			ret =
235 			    (1 << MLX5E_TT_IPV6_UDP) |
236 			    (1 << MLX5E_TT_IPV6) |
237 			    0;
238 			break;
239 
240 		default:
241 			ret =
242 			    (1 << MLX5E_TT_ANY) |
243 			    0;
244 			break;
245 		}
246 		break;
247 
248 	case MLX5E_ALLMULTI:
249 		ret =
250 		    (1 << MLX5E_TT_IPV4_UDP) |
251 		    (1 << MLX5E_TT_IPV6_UDP) |
252 		    (1 << MLX5E_TT_IPV4) |
253 		    (1 << MLX5E_TT_IPV6) |
254 		    (1 << MLX5E_TT_ANY) |
255 		    0;
256 		break;
257 
258 	default:			/* MLX5E_PROMISC */
259 		ret =
260 		    (1 << MLX5E_TT_IPV4_TCP) |
261 		    (1 << MLX5E_TT_IPV6_TCP) |
262 		    (1 << MLX5E_TT_IPV4_UDP) |
263 		    (1 << MLX5E_TT_IPV6_UDP) |
264 		    (1 << MLX5E_TT_IPV4) |
265 		    (1 << MLX5E_TT_IPV6) |
266 		    (1 << MLX5E_TT_ANY) |
267 		    0;
268 		break;
269 	}
270 
271 	return (ret);
272 }
273 
274 static int
275 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
276     struct mlx5e_eth_addr_info *ai, int type,
277     u32 *mc, u32 *mv)
278 {
279 	struct mlx5_flow_destination dest = {};
280 	u8 mc_enable = 0;
281 	struct mlx5_flow_rule **rule_p;
282 	struct mlx5_flow_table *ft = priv->fts.main.t;
283 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
284 				   outer_headers.dmac_47_16);
285 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
286 				   outer_headers.dmac_47_16);
287 	u32 *tirn = priv->tirn;
288 	u32 tt_vec;
289 	int err = 0;
290 
291 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
292 
293 	switch (type) {
294 	case MLX5E_FULLMATCH:
295 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
296 		memset(mc_dmac, 0xff, ETH_ALEN);
297 		ether_addr_copy(mv_dmac, ai->addr);
298 		break;
299 
300 	case MLX5E_ALLMULTI:
301 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
302 		mc_dmac[0] = 0x01;
303 		mv_dmac[0] = 0x01;
304 		break;
305 
306 	case MLX5E_PROMISC:
307 		break;
308 	default:
309 		break;
310 	}
311 
312 	tt_vec = mlx5e_get_tt_vec(ai, type);
313 
314 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
315 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
316 		dest.tir_num = tirn[MLX5E_TT_ANY];
317 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
318 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
319 					     MLX5_FS_ETH_FLOW_TAG, &dest);
320 		if (IS_ERR_OR_NULL(*rule_p))
321 			goto err_del_ai;
322 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
323 	}
324 
325 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
326 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
327 
328 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
329 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
330 		dest.tir_num = tirn[MLX5E_TT_IPV4];
331 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
332 			 ETHERTYPE_IP);
333 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
334 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
335 					     MLX5_FS_ETH_FLOW_TAG, &dest);
336 		if (IS_ERR_OR_NULL(*rule_p))
337 			goto err_del_ai;
338 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
339 	}
340 
341 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
342 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
343 		dest.tir_num = tirn[MLX5E_TT_IPV6];
344 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
345 			 ETHERTYPE_IPV6);
346 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
347 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
348 					     MLX5_FS_ETH_FLOW_TAG, &dest);
349 		if (IS_ERR_OR_NULL(*rule_p))
350 			goto err_del_ai;
351 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
352 	}
353 
354 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
355 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
356 
357 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
358 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
359 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
360 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
361 			 ETHERTYPE_IP);
362 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
363 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
364 					     MLX5_FS_ETH_FLOW_TAG, &dest);
365 		if (IS_ERR_OR_NULL(*rule_p))
366 			goto err_del_ai;
367 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
368 	}
369 
370 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
371 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
372 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
373 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
374 			 ETHERTYPE_IPV6);
375 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
376 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
377 					     MLX5_FS_ETH_FLOW_TAG, &dest);
378 		if (IS_ERR_OR_NULL(*rule_p))
379 			goto err_del_ai;
380 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
381 	}
382 
383 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
384 
385 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
386 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
387 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
388 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
389 			 ETHERTYPE_IP);
390 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
391 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
392 					     MLX5_FS_ETH_FLOW_TAG, &dest);
393 		if (IS_ERR_OR_NULL(*rule_p))
394 			goto err_del_ai;
395 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
396 	}
397 
398 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
399 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
400 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
401 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
402 			 ETHERTYPE_IPV6);
403 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
404 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
405 					     MLX5_FS_ETH_FLOW_TAG, &dest);
406 		if (IS_ERR_OR_NULL(*rule_p))
407 			goto err_del_ai;
408 
409 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
410 	}
411 
412 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
413 
414 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
415 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
416 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
417 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
418 			 ETHERTYPE_IP);
419 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
420 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
421 					     MLX5_FS_ETH_FLOW_TAG, &dest);
422 		if (IS_ERR_OR_NULL(*rule_p))
423 			goto err_del_ai;
424 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
425 	}
426 
427 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
428 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
429 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
430 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
431 			 ETHERTYPE_IPV6);
432 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
433 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
434 					     MLX5_FS_ETH_FLOW_TAG, &dest);
435 		if (IS_ERR_OR_NULL(*rule_p))
436 			goto err_del_ai;
437 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
438 	}
439 
440 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
441 
442 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
443 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
444 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
445 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
446 			 ETHERTYPE_IP);
447 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
448 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
449 					     MLX5_FS_ETH_FLOW_TAG, &dest);
450 		if (IS_ERR_OR_NULL(*rule_p))
451 			goto err_del_ai;
452 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
453 	}
454 
455 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
456 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
457 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
458 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
459 			 ETHERTYPE_IPV6);
460 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
461 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
462 					     MLX5_FS_ETH_FLOW_TAG, &dest);
463 		if (IS_ERR_OR_NULL(*rule_p))
464 			goto err_del_ai;
465 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
466 	}
467 
468 	return 0;
469 
470 err_del_ai:
471 	err = PTR_ERR(*rule_p);
472 	*rule_p = NULL;
473 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
474 
475 	return err;
476 }
477 
478 static int
479 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
480     struct mlx5e_eth_addr_info *ai, int type)
481 {
482 	u32 *match_criteria;
483 	u32 *match_value;
484 	int err = 0;
485 
486 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
487 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
488 	if (!match_value || !match_criteria) {
489 		mlx5_en_err(priv->ifp, "alloc failed\n");
490 		err = -ENOMEM;
491 		goto add_eth_addr_rule_out;
492 	}
493 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
494 	    match_value);
495 
496 add_eth_addr_rule_out:
497 	kvfree(match_criteria);
498 	kvfree(match_value);
499 
500 	return (err);
501 }
502 
503 static void
504 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
505 {
506 	struct mlx5_flow_rule **ra = priv->fts.main_vxlan_rule, **r;
507 
508 	r = &ra[MLX5E_TT_IPV6_IPSEC_ESP];
509 	if (*r != NULL) {
510 		mlx5_del_flow_rule(*r);
511 		*r = NULL;
512 	}
513 
514 	r = &ra[MLX5E_TT_IPV4_IPSEC_ESP];
515 	if (*r != NULL) {
516 		mlx5_del_flow_rule(*r);
517 		*r = NULL;
518 	}
519 
520 	r = &ra[MLX5E_TT_IPV6_IPSEC_AH];
521 	if (*r != NULL) {
522 		mlx5_del_flow_rule(*r);
523 		*r = NULL;
524 	}
525 
526 	r = &ra[MLX5E_TT_IPV4_IPSEC_AH];
527 	if (*r != NULL) {
528 		mlx5_del_flow_rule(*r);
529 		*r = NULL;
530 	}
531 
532 	r = &ra[MLX5E_TT_IPV6_TCP];
533 	if (*r != NULL) {
534 		mlx5_del_flow_rule(*r);
535 		*r = NULL;
536 	}
537 
538 	r = &ra[MLX5E_TT_IPV4_TCP];
539 	if (*r != NULL) {
540 		mlx5_del_flow_rule(*r);
541 		*r = NULL;
542 	}
543 
544 	r = &ra[MLX5E_TT_IPV6_UDP];
545 	if (*r != NULL) {
546 		mlx5_del_flow_rule(*r);
547 		*r = NULL;
548 	}
549 
550 	r = &ra[MLX5E_TT_IPV4_UDP];
551 	if (*r != NULL) {
552 		mlx5_del_flow_rule(*r);
553 		*r = NULL;
554 	}
555 
556 	r = &ra[MLX5E_TT_IPV6];
557 	if (*r != NULL) {
558 		mlx5_del_flow_rule(*r);
559 		*r = NULL;
560 	}
561 
562 	r = &ra[MLX5E_TT_IPV4];
563 	if (*r != NULL) {
564 		mlx5_del_flow_rule(*r);
565 		*r = NULL;
566 	}
567 
568 	r = &ra[MLX5E_TT_ANY];
569 	if (*r != NULL) {
570 		mlx5_del_flow_rule(*r);
571 		*r = NULL;
572 	}
573 }
574 
575 static int
576 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
577 {
578 	struct mlx5_flow_destination dest = {};
579 	u8 mc_enable = 0;
580 	struct mlx5_flow_rule **rule_p;
581 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
582 	u32 *tirn = priv->tirn_inner_vxlan;
583 	int err = 0;
584 
585 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
586 
587 	mc_enable = MLX5_MATCH_INNER_HEADERS;
588 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
589 
590 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
591 	dest.tir_num = tirn[MLX5E_TT_IPV4];
592 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
593 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
594 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
595 	if (IS_ERR_OR_NULL(*rule_p))
596 		goto err_del_ai;
597 
598 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
599 	dest.tir_num = tirn[MLX5E_TT_IPV6];
600 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
601 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
602 	     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
603 	if (IS_ERR_OR_NULL(*rule_p))
604 		goto err_del_ai;
605 
606 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
607 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
608 
609 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
610 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
611 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
612 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
613 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
614 	if (IS_ERR_OR_NULL(*rule_p))
615 		goto err_del_ai;
616 
617 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
618 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
619 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
620 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
621 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
622 	if (IS_ERR_OR_NULL(*rule_p))
623 		goto err_del_ai;
624 
625 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
626 
627 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
628 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
629 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
630 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
631 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
632 	if (IS_ERR_OR_NULL(*rule_p))
633 		goto err_del_ai;
634 
635 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
636 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
637 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
638 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
639 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
640 	if (IS_ERR_OR_NULL(*rule_p))
641 		goto err_del_ai;
642 
643 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
644 
645 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
646 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
647 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
648 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
649 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
650 	if (IS_ERR_OR_NULL(*rule_p))
651 		goto err_del_ai;
652 
653 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
654 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
655 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
656 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
657 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
658 	if (IS_ERR_OR_NULL(*rule_p))
659 		goto err_del_ai;
660 
661 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
662 
663 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
664 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
665 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
666 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
667 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
668 	if (IS_ERR_OR_NULL(*rule_p))
669 			goto err_del_ai;
670 
671 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
672 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
673 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
674 		 ETHERTYPE_IPV6);
675 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
676 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
677 	if (IS_ERR_OR_NULL(*rule_p))
678 		goto err_del_ai;
679 
680 	mc_enable = 0;
681 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
682 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
683 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
684 	dest.tir_num = tirn[MLX5E_TT_ANY];
685 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
686 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
687 	if (IS_ERR_OR_NULL(*rule_p))
688 		goto err_del_ai;
689 
690 	return (0);
691 
692 err_del_ai:
693 	err = PTR_ERR(*rule_p);
694 	*rule_p = NULL;
695 	mlx5e_del_main_vxlan_rules(priv);
696 
697 	return (err);
698 }
699 
700 static int
701 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
702 {
703 	u32 *match_criteria;
704 	u32 *match_value;
705 	int err = 0;
706 
707 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
708 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
709 	if (match_value == NULL || match_criteria == NULL) {
710 		mlx5_en_err(priv->ifp, "alloc failed\n");
711 		err = -ENOMEM;
712 		goto add_main_vxlan_rules_out;
713 	}
714 	err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
715 
716 add_main_vxlan_rules_out:
717 	kvfree(match_criteria);
718 	kvfree(match_value);
719 
720 	return (err);
721 }
722 
723 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
724 {
725 	struct ifnet *ifp = priv->ifp;
726 	int max_list_size;
727 	int list_size;
728 	u16 *vlans;
729 	int vlan;
730 	int err;
731 	int i;
732 
733 	list_size = 0;
734 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
735 		list_size++;
736 
737 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
738 
739 	if (list_size > max_list_size) {
740 		mlx5_en_err(ifp,
741 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
742 			    list_size, max_list_size);
743 		list_size = max_list_size;
744 	}
745 
746 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
747 	if (!vlans)
748 		return -ENOMEM;
749 
750 	i = 0;
751 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
752 		if (i >= list_size)
753 			break;
754 		vlans[i++] = vlan;
755 	}
756 
757 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
758 	if (err)
759 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
760 			   err);
761 
762 	kfree(vlans);
763 	return err;
764 }
765 
766 enum mlx5e_vlan_rule_type {
767 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
768 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
769 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
770 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
771 };
772 
773 static int
774 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
775     enum mlx5e_vlan_rule_type rule_type, u16 vid,
776     u32 *mc, u32 *mv)
777 {
778 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
779 	struct mlx5_flow_destination dest = {};
780 	u8 mc_enable = 0;
781 	struct mlx5_flow_rule **rule_p;
782 	int err = 0;
783 
784 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
785 	dest.ft = priv->fts.vxlan.t;
786 
787 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
788 
789 	switch (rule_type) {
790 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
791 		rule_p = &priv->vlan.untagged_ft_rule;
792 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
793 		break;
794 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
795 		rule_p = &priv->vlan.any_cvlan_ft_rule;
796 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
797 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
798 		break;
799 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
800 		rule_p = &priv->vlan.any_svlan_ft_rule;
801 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
802 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
803 		break;
804 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
805 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
806 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
807 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
808 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
809 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
810 		mlx5e_vport_context_update_vlans(priv);
811 		break;
812 	}
813 
814 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
815 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
816 				     MLX5_FS_ETH_FLOW_TAG,
817 				     &dest);
818 
819 	if (IS_ERR(*rule_p)) {
820 		err = PTR_ERR(*rule_p);
821 		*rule_p = NULL;
822 		mlx5_en_err(priv->ifp, "add rule failed\n");
823 	}
824 
825 	return (err);
826 }
827 
828 static int
829 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
830     enum mlx5e_vlan_rule_type rule_type, u16 vid)
831 {
832 	u32 *match_criteria;
833 	u32 *match_value;
834 	int err = 0;
835 
836 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
837 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
838 	if (!match_value || !match_criteria) {
839 		mlx5_en_err(priv->ifp, "alloc failed\n");
840 		err = -ENOMEM;
841 		goto add_vlan_rule_out;
842 	}
843 
844 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
845 				    match_value);
846 
847 add_vlan_rule_out:
848 	kvfree(match_criteria);
849 	kvfree(match_value);
850 
851 	return (err);
852 }
853 
854 static void
855 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
856     enum mlx5e_vlan_rule_type rule_type, u16 vid)
857 {
858 	switch (rule_type) {
859 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
860 		if (priv->vlan.untagged_ft_rule) {
861 			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
862 			priv->vlan.untagged_ft_rule = NULL;
863 		}
864 		break;
865 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
866 		if (priv->vlan.any_cvlan_ft_rule) {
867 			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
868 			priv->vlan.any_cvlan_ft_rule = NULL;
869 		}
870 		break;
871 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
872 		if (priv->vlan.any_svlan_ft_rule) {
873 			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
874 			priv->vlan.any_svlan_ft_rule = NULL;
875 		}
876 		break;
877 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
878 		if (priv->vlan.active_vlans_ft_rule[vid]) {
879 			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
880 			priv->vlan.active_vlans_ft_rule[vid] = NULL;
881 		}
882 		mlx5e_vport_context_update_vlans(priv);
883 		break;
884 	default:
885 		break;
886 	}
887 }
888 
889 static void
890 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
891 {
892 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
893 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
894 }
895 
896 static int
897 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
898 {
899 	int err;
900 
901 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
902 	if (err)
903 		return (err);
904 
905 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
906 	if (err)
907 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
908 
909 	return (err);
910 }
911 
912 void
913 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
914 {
915 	if (priv->vlan.filter_disabled) {
916 		priv->vlan.filter_disabled = false;
917 		if (priv->ifp->if_flags & IFF_PROMISC)
918 			return;
919 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
920 			mlx5e_del_any_vid_rules(priv);
921 	}
922 }
923 
924 void
925 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
926 {
927 	if (!priv->vlan.filter_disabled) {
928 		priv->vlan.filter_disabled = true;
929 		if (priv->ifp->if_flags & IFF_PROMISC)
930 			return;
931 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
932 			mlx5e_add_any_vid_rules(priv);
933 	}
934 }
935 
936 void
937 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
938 {
939 	struct mlx5e_priv *priv = arg;
940 
941 	if (ifp != priv->ifp)
942 		return;
943 
944 	PRIV_LOCK(priv);
945 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
946 	    test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
947 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
948 	PRIV_UNLOCK(priv);
949 }
950 
951 void
952 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
953 {
954 	struct mlx5e_priv *priv = arg;
955 
956 	if (ifp != priv->ifp)
957 		return;
958 
959 	PRIV_LOCK(priv);
960 	clear_bit(vid, priv->vlan.active_vlans);
961 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
962 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
963 	PRIV_UNLOCK(priv);
964 }
965 
966 static int
967 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
968 {
969 	int err;
970 	int i;
971 
972 	set_bit(0, priv->vlan.active_vlans);
973 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
974 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
975 					  i);
976 		if (err)
977 			goto error;
978 	}
979 
980 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
981 	if (err)
982 		goto error;
983 
984 	if (priv->vlan.filter_disabled) {
985 		err = mlx5e_add_any_vid_rules(priv);
986 		if (err)
987 			goto error;
988 	}
989 	return (0);
990 error:
991 	mlx5e_del_all_vlan_rules(priv);
992 	return (err);
993 }
994 
995 static void
996 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
997 {
998 	int i;
999 
1000 	if (priv->vlan.filter_disabled)
1001 		mlx5e_del_any_vid_rules(priv);
1002 
1003 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1004 
1005 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
1006 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
1007 	clear_bit(0, priv->vlan.active_vlans);
1008 }
1009 
1010 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
1011 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
1012 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
1013 
1014 static void
1015 mlx5e_execute_action(struct mlx5e_priv *priv,
1016     struct mlx5e_eth_addr_hash_node *hn)
1017 {
1018 	switch (hn->action) {
1019 	case MLX5E_ACTION_ADD:
1020 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
1021 		hn->action = MLX5E_ACTION_NONE;
1022 		break;
1023 
1024 	case MLX5E_ACTION_DEL:
1025 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
1026 		if (hn->mpfs_index != -1U)
1027 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
1028 		mlx5e_del_eth_addr_from_hash(hn);
1029 		break;
1030 
1031 	default:
1032 		break;
1033 	}
1034 }
1035 
1036 static struct mlx5e_eth_addr_hash_node *
1037 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
1038 {
1039 	struct mlx5e_eth_addr_hash_node *hn;
1040 
1041 	hn = LIST_FIRST(fh);
1042 	if (hn != NULL) {
1043 		LIST_REMOVE(hn, hlist);
1044 		LIST_INSERT_HEAD(uh, hn, hlist);
1045 	}
1046 	return (hn);
1047 }
1048 
1049 static struct mlx5e_eth_addr_hash_node *
1050 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
1051 {
1052 	struct mlx5e_eth_addr_hash_node *hn;
1053 
1054 	hn = LIST_FIRST(fh);
1055 	if (hn != NULL)
1056 		LIST_REMOVE(hn, hlist);
1057 	return (hn);
1058 }
1059 
1060 struct mlx5e_copy_addr_ctx {
1061 	struct mlx5e_eth_addr_hash_head *free;
1062 	struct mlx5e_eth_addr_hash_head *fill;
1063 	bool success;
1064 };
1065 
1066 static u_int
1067 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1068 {
1069 	struct mlx5e_copy_addr_ctx *ctx = arg;
1070 	struct mlx5e_eth_addr_hash_node *hn;
1071 
1072 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
1073 	if (hn == NULL) {
1074 		ctx->success = false;
1075 		return (0);
1076 	}
1077 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
1078 
1079 	return (1);
1080 }
1081 
1082 static void
1083 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
1084 {
1085 	struct mlx5e_copy_addr_ctx ctx;
1086 	struct mlx5e_eth_addr_hash_head head_free;
1087 	struct mlx5e_eth_addr_hash_head head_uc;
1088 	struct mlx5e_eth_addr_hash_head head_mc;
1089 	struct mlx5e_eth_addr_hash_node *hn;
1090 	struct ifnet *ifp = priv->ifp;
1091 	size_t x;
1092 	size_t num;
1093 
1094 	PRIV_ASSERT_LOCKED(priv);
1095 
1096 retry:
1097 	LIST_INIT(&head_free);
1098 	LIST_INIT(&head_uc);
1099 	LIST_INIT(&head_mc);
1100 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
1101 
1102 	/* allocate place holders */
1103 	for (x = 0; x != num; x++) {
1104 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
1105 		hn->action = MLX5E_ACTION_ADD;
1106 		hn->mpfs_index = -1U;
1107 		LIST_INSERT_HEAD(&head_free, hn, hlist);
1108 	}
1109 
1110 	hn = mlx5e_move_hn(&head_free, &head_uc);
1111 	MPASS(hn != NULL);
1112 
1113 	ether_addr_copy(hn->ai.addr,
1114 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
1115 
1116 	ctx.free = &head_free;
1117 	ctx.fill = &head_uc;
1118 	ctx.success = true;
1119 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1120 	if (ctx.success == false)
1121 		goto cleanup;
1122 
1123 	ctx.fill = &head_mc;
1124 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1125 	if (ctx.success == false)
1126 		goto cleanup;
1127 
1128 	/* insert L2 unicast addresses into hash list */
1129 
1130 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1131 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1132 			continue;
1133 		if (hn->mpfs_index == -1U)
1134 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1135 			    hn->ai.addr, 0, 0);
1136 	}
1137 
1138 	/* insert L2 multicast addresses into hash list */
1139 
1140 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1141 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1142 			continue;
1143 	}
1144 
1145 cleanup:
1146 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1147 		free(hn, M_MLX5EN);
1148 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1149 		free(hn, M_MLX5EN);
1150 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1151 		free(hn, M_MLX5EN);
1152 
1153 	if (ctx.success == false)
1154 		goto retry;
1155 }
1156 
1157 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1158 				  u8 addr_array[][ETH_ALEN], int size)
1159 {
1160 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1161 	struct ifnet *ifp = priv->ifp;
1162 	struct mlx5e_eth_addr_hash_node *hn;
1163 	struct mlx5e_eth_addr_hash_head *addr_list;
1164 	struct mlx5e_eth_addr_hash_node *tmp;
1165 	int i = 0;
1166 	int hi;
1167 
1168 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1169 
1170 	if (is_uc) /* Make sure our own address is pushed first */
1171 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
1172 	else if (priv->eth_addr.broadcast_enabled)
1173 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
1174 
1175 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1176 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
1177 			continue;
1178 		if (i >= size)
1179 			break;
1180 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1181 	}
1182 }
1183 
1184 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1185 						 int list_type)
1186 {
1187 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1188 	struct mlx5e_eth_addr_hash_node *hn;
1189 	u8 (*addr_array)[ETH_ALEN] = NULL;
1190 	struct mlx5e_eth_addr_hash_head *addr_list;
1191 	struct mlx5e_eth_addr_hash_node *tmp;
1192 	int max_size;
1193 	int size;
1194 	int err;
1195 	int hi;
1196 
1197 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1198 	max_size = is_uc ?
1199 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1200 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1201 
1202 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1203 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1204 		size++;
1205 
1206 	if (size > max_size) {
1207 		mlx5_en_err(priv->ifp,
1208 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1209 			    is_uc ? "UC" : "MC", size, max_size);
1210 		size = max_size;
1211 	}
1212 
1213 	if (size) {
1214 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1215 		if (!addr_array) {
1216 			err = -ENOMEM;
1217 			goto out;
1218 		}
1219 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1220 	}
1221 
1222 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1223 out:
1224 	if (err)
1225 		mlx5_en_err(priv->ifp,
1226 			   "Failed to modify vport %s list err(%d)\n",
1227 			   is_uc ? "UC" : "MC", err);
1228 	kfree(addr_array);
1229 }
1230 
1231 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1232 {
1233 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1234 
1235 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1236 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1237 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1238 				      ea->allmulti_enabled,
1239 				      ea->promisc_enabled);
1240 }
1241 
1242 static void
1243 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1244 {
1245 	struct mlx5e_eth_addr_hash_node *hn;
1246 	struct mlx5e_eth_addr_hash_node *tmp;
1247 	int i;
1248 
1249 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1250 	    mlx5e_execute_action(priv, hn);
1251 
1252 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1253 	    mlx5e_execute_action(priv, hn);
1254 }
1255 
1256 static void
1257 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1258 {
1259 	struct mlx5e_eth_addr_hash_node *hn;
1260 	struct mlx5e_eth_addr_hash_node *tmp;
1261 	int i;
1262 
1263 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1264 	    hn->action = MLX5E_ACTION_DEL;
1265 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1266 	    hn->action = MLX5E_ACTION_DEL;
1267 
1268 	if (rx_mode_enable)
1269 		mlx5e_sync_ifp_addr(priv);
1270 
1271 	mlx5e_apply_ifp_addr(priv);
1272 }
1273 
1274 static void
1275 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1276 {
1277 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1278 	struct ifnet *ndev = priv->ifp;
1279 
1280 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
1281 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
1282 	bool broadcast_enabled = rx_mode_enable;
1283 
1284 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1285 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1286 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1287 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1288 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1289 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1290 
1291 	/* update broadcast address */
1292 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1293 	    priv->ifp->if_broadcastaddr);
1294 
1295 	if (enable_promisc) {
1296 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1297 		if (!priv->vlan.filter_disabled)
1298 			mlx5e_add_any_vid_rules(priv);
1299 	}
1300 	if (enable_allmulti)
1301 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1302 	if (enable_broadcast)
1303 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1304 
1305 	mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1306 
1307 	if (disable_broadcast)
1308 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1309 	if (disable_allmulti)
1310 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1311 	if (disable_promisc) {
1312 		if (!priv->vlan.filter_disabled)
1313 			mlx5e_del_any_vid_rules(priv);
1314 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1315 	}
1316 
1317 	ea->promisc_enabled = promisc_enabled;
1318 	ea->allmulti_enabled = allmulti_enabled;
1319 	ea->broadcast_enabled = broadcast_enabled;
1320 
1321 	mlx5e_vport_context_update(priv);
1322 }
1323 
1324 void
1325 mlx5e_set_rx_mode_work(struct work_struct *work)
1326 {
1327 	struct mlx5e_priv *priv =
1328 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1329 
1330 	PRIV_LOCK(priv);
1331 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1332 		mlx5e_set_rx_mode_core(priv, true);
1333 	PRIV_UNLOCK(priv);
1334 }
1335 
1336 static void
1337 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1338 {
1339 	int i;
1340 
1341 	for (i = ft->num_groups - 1; i >= 0; i--) {
1342 		if (!IS_ERR_OR_NULL(ft->g[i]))
1343 			mlx5_destroy_flow_group(ft->g[i]);
1344 		ft->g[i] = NULL;
1345 	}
1346 	ft->num_groups = 0;
1347 }
1348 
1349 static void
1350 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1351 {
1352 	mlx5e_destroy_groups(ft);
1353 	kfree(ft->g);
1354 	mlx5_destroy_flow_table(ft->t);
1355 	ft->t = NULL;
1356 }
1357 
1358 #define MLX5E_NUM_MAIN_GROUPS	10
1359 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1360 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1361 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1362 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1363 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1364 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1365 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1366 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1367 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1368 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1369 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1370 				 MLX5E_MAIN_GROUP1_SIZE +\
1371 				 MLX5E_MAIN_GROUP2_SIZE +\
1372 				 MLX5E_MAIN_GROUP3_SIZE +\
1373 				 MLX5E_MAIN_GROUP4_SIZE +\
1374 				 MLX5E_MAIN_GROUP5_SIZE +\
1375 				 MLX5E_MAIN_GROUP6_SIZE +\
1376 				 MLX5E_MAIN_GROUP7_SIZE +\
1377 				 MLX5E_MAIN_GROUP8_SIZE +\
1378 				 MLX5E_MAIN_GROUP9_SIZE +\
1379 				 0)
1380 
1381 static int
1382 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1383 				      int inlen)
1384 {
1385 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1386 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1387 				match_criteria.outer_headers.dmac_47_16);
1388 	int err;
1389 	int ix = 0;
1390 
1391 	/* Tunnel rules need to be first in this list of groups */
1392 
1393 	/* Start tunnel rules */
1394 	memset(in, 0, inlen);
1395 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1396 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1397 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1398 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1399 	MLX5_SET_CFG(in, start_flow_index, ix);
1400 	ix += MLX5E_MAIN_GROUP0_SIZE;
1401 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1402 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1403 	if (IS_ERR(ft->g[ft->num_groups]))
1404 		goto err_destory_groups;
1405 	ft->num_groups++;
1406 	/* End Tunnel Rules */
1407 
1408 	memset(in, 0, inlen);
1409 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1410 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1411 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1412 	MLX5_SET_CFG(in, start_flow_index, ix);
1413 	ix += MLX5E_MAIN_GROUP1_SIZE;
1414 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1415 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1416 	if (IS_ERR(ft->g[ft->num_groups]))
1417 		goto err_destory_groups;
1418 	ft->num_groups++;
1419 
1420 	memset(in, 0, inlen);
1421 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1422 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1423 	MLX5_SET_CFG(in, start_flow_index, ix);
1424 	ix += MLX5E_MAIN_GROUP2_SIZE;
1425 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1426 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1427 	if (IS_ERR(ft->g[ft->num_groups]))
1428 		goto err_destory_groups;
1429 	ft->num_groups++;
1430 
1431 	memset(in, 0, inlen);
1432 	MLX5_SET_CFG(in, start_flow_index, ix);
1433 	ix += MLX5E_MAIN_GROUP3_SIZE;
1434 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1435 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1436 	if (IS_ERR(ft->g[ft->num_groups]))
1437 		goto err_destory_groups;
1438 	ft->num_groups++;
1439 
1440 	memset(in, 0, inlen);
1441 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1442 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1443 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1444 	memset(dmac, 0xff, ETH_ALEN);
1445 	MLX5_SET_CFG(in, start_flow_index, ix);
1446 	ix += MLX5E_MAIN_GROUP4_SIZE;
1447 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1448 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1449 	if (IS_ERR(ft->g[ft->num_groups]))
1450 		goto err_destory_groups;
1451 	ft->num_groups++;
1452 
1453 	memset(in, 0, inlen);
1454 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1455 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1456 	memset(dmac, 0xff, ETH_ALEN);
1457 	MLX5_SET_CFG(in, start_flow_index, ix);
1458 	ix += MLX5E_MAIN_GROUP5_SIZE;
1459 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1460 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1461 	if (IS_ERR(ft->g[ft->num_groups]))
1462 		goto err_destory_groups;
1463 	ft->num_groups++;
1464 
1465 	memset(in, 0, inlen);
1466 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1467 	memset(dmac, 0xff, ETH_ALEN);
1468 	MLX5_SET_CFG(in, start_flow_index, ix);
1469 	ix += MLX5E_MAIN_GROUP6_SIZE;
1470 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1471 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1472 	if (IS_ERR(ft->g[ft->num_groups]))
1473 		goto err_destory_groups;
1474 	ft->num_groups++;
1475 
1476 	memset(in, 0, inlen);
1477 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1478 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1479 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1480 	dmac[0] = 0x01;
1481 	MLX5_SET_CFG(in, start_flow_index, ix);
1482 	ix += MLX5E_MAIN_GROUP7_SIZE;
1483 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1484 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1485 	if (IS_ERR(ft->g[ft->num_groups]))
1486 		goto err_destory_groups;
1487 	ft->num_groups++;
1488 
1489 	memset(in, 0, inlen);
1490 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1491 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1492 	dmac[0] = 0x01;
1493 	MLX5_SET_CFG(in, start_flow_index, ix);
1494 	ix += MLX5E_MAIN_GROUP8_SIZE;
1495 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1496 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1497 	if (IS_ERR(ft->g[ft->num_groups]))
1498 		goto err_destory_groups;
1499 	ft->num_groups++;
1500 
1501 	memset(in, 0, inlen);
1502 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1503 	dmac[0] = 0x01;
1504 	MLX5_SET_CFG(in, start_flow_index, ix);
1505 	ix += MLX5E_MAIN_GROUP9_SIZE;
1506 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1507 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1508 	if (IS_ERR(ft->g[ft->num_groups]))
1509 		goto err_destory_groups;
1510 	ft->num_groups++;
1511 
1512 	return (0);
1513 
1514 err_destory_groups:
1515 	err = PTR_ERR(ft->g[ft->num_groups]);
1516 	ft->g[ft->num_groups] = NULL;
1517 	mlx5e_destroy_groups(ft);
1518 
1519 	return (err);
1520 }
1521 
1522 static int
1523 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1524 {
1525 	u32 *in;
1526 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1527 	int err;
1528 
1529 	in = mlx5_vzalloc(inlen);
1530 	if (!in)
1531 		return (-ENOMEM);
1532 
1533 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1534 
1535 	kvfree(in);
1536 	return (err);
1537 }
1538 
1539 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1540 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1541 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1542 static int
1543 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1544     int inlen)
1545 {
1546 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1547 	int err;
1548 	int ix = 0;
1549 
1550 	memset(in, 0, inlen);
1551 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1552 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1553 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1554 	MLX5_SET_CFG(in, start_flow_index, ix);
1555 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1556 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1557 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1558 	if (IS_ERR(ft->g[ft->num_groups]))
1559 		goto err_destory_groups;
1560 	ft->num_groups++;
1561 
1562 	memset(in, 0, inlen);
1563 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1564 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1565 	MLX5_SET_CFG(in, start_flow_index, ix);
1566 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1567 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1568 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1569 	if (IS_ERR(ft->g[ft->num_groups]))
1570 		goto err_destory_groups;
1571 	ft->num_groups++;
1572 
1573 	memset(in, 0, inlen);
1574 	MLX5_SET_CFG(in, start_flow_index, ix);
1575 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1576 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1577 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1578 	if (IS_ERR(ft->g[ft->num_groups]))
1579 		goto err_destory_groups;
1580 	ft->num_groups++;
1581 
1582 	return (0);
1583 
1584 err_destory_groups:
1585 	err = PTR_ERR(ft->g[ft->num_groups]);
1586 	ft->g[ft->num_groups] = NULL;
1587 	mlx5e_destroy_groups(ft);
1588 
1589 	return (err);
1590 }
1591 
1592 static int
1593 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1594 {
1595 	u32 *in;
1596 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1597 	int err;
1598 
1599 	in = mlx5_vzalloc(inlen);
1600 	if (!in)
1601 		return (-ENOMEM);
1602 
1603 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1604 
1605 	kvfree(in);
1606 	return (err);
1607 }
1608 
1609 
1610 static int
1611 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1612 {
1613 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1614 	    &priv->fts.main;
1615 	int err;
1616 
1617 	ft->num_groups = 0;
1618 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1619 	    inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1620 
1621 	if (IS_ERR(ft->t)) {
1622 		err = PTR_ERR(ft->t);
1623 		ft->t = NULL;
1624 		return (err);
1625 	}
1626 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1627 	if (!ft->g) {
1628 		err = -ENOMEM;
1629 		goto err_destroy_main_flow_table;
1630 	}
1631 
1632 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1633 	    mlx5e_create_main_groups(ft);
1634 	if (err)
1635 		goto err_free_g;
1636 	return (0);
1637 
1638 err_free_g:
1639 	kfree(ft->g);
1640 
1641 err_destroy_main_flow_table:
1642 	mlx5_destroy_flow_table(ft->t);
1643 	ft->t = NULL;
1644 
1645 	return (err);
1646 }
1647 
1648 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1649 {
1650 	mlx5e_destroy_flow_table(&priv->fts.main);
1651 }
1652 
1653 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1654 {
1655 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1656 }
1657 
1658 #define MLX5E_NUM_VLAN_GROUPS	3
1659 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1660 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1661 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1662 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1663 				 MLX5E_VLAN_GROUP1_SIZE +\
1664 				 MLX5E_VLAN_GROUP2_SIZE +\
1665 				 0)
1666 
1667 static int
1668 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1669 				      int inlen)
1670 {
1671 	int err;
1672 	int ix = 0;
1673 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1674 
1675 	memset(in, 0, inlen);
1676 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1677 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1678 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1679 	MLX5_SET_CFG(in, start_flow_index, ix);
1680 	ix += MLX5E_VLAN_GROUP0_SIZE;
1681 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1682 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1683 	if (IS_ERR(ft->g[ft->num_groups]))
1684 		goto err_destory_groups;
1685 	ft->num_groups++;
1686 
1687 	memset(in, 0, inlen);
1688 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1689 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1690 	MLX5_SET_CFG(in, start_flow_index, ix);
1691 	ix += MLX5E_VLAN_GROUP1_SIZE;
1692 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1693 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1694 	if (IS_ERR(ft->g[ft->num_groups]))
1695 		goto err_destory_groups;
1696 	ft->num_groups++;
1697 
1698 	memset(in, 0, inlen);
1699 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1700 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1701 	MLX5_SET_CFG(in, start_flow_index, ix);
1702 	ix += MLX5E_VLAN_GROUP2_SIZE;
1703 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1704 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1705 	if (IS_ERR(ft->g[ft->num_groups]))
1706 		goto err_destory_groups;
1707 	ft->num_groups++;
1708 
1709 	return (0);
1710 
1711 err_destory_groups:
1712 	err = PTR_ERR(ft->g[ft->num_groups]);
1713 	ft->g[ft->num_groups] = NULL;
1714 	mlx5e_destroy_groups(ft);
1715 
1716 	return (err);
1717 }
1718 
1719 static int
1720 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1721 {
1722 	u32 *in;
1723 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1724 	int err;
1725 
1726 	in = mlx5_vzalloc(inlen);
1727 	if (!in)
1728 		return (-ENOMEM);
1729 
1730 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1731 
1732 	kvfree(in);
1733 	return (err);
1734 }
1735 
1736 static int
1737 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1738 {
1739 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1740 	int err;
1741 
1742 	ft->num_groups = 0;
1743 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1744 				       MLX5E_VLAN_TABLE_SIZE);
1745 
1746 	if (IS_ERR(ft->t)) {
1747 		err = PTR_ERR(ft->t);
1748 		ft->t = NULL;
1749 		return (err);
1750 	}
1751 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1752 	if (!ft->g) {
1753 		err = -ENOMEM;
1754 		goto err_destroy_vlan_flow_table;
1755 	}
1756 
1757 	err = mlx5e_create_vlan_groups(ft);
1758 	if (err)
1759 		goto err_free_g;
1760 
1761 	return (0);
1762 
1763 err_free_g:
1764 	kfree(ft->g);
1765 
1766 err_destroy_vlan_flow_table:
1767 	mlx5_destroy_flow_table(ft->t);
1768 	ft->t = NULL;
1769 
1770 	return (err);
1771 }
1772 
1773 static void
1774 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1775 {
1776 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1777 }
1778 
1779 static int
1780 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1781     struct mlx5e_vxlan_db_el *el)
1782 {
1783 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1784 	struct mlx5_flow_destination dest = {};
1785 	u8 mc_enable;
1786 	struct mlx5_flow_rule **rule_p;
1787 	int err = 0;
1788 
1789 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1790 	dest.ft = priv->fts.main_vxlan.t;
1791 
1792 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
1793 	rule_p = &el->vxlan_ft_rule;
1794 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1795 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1796 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1797 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1798 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1799 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1800 
1801 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1802 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1803 
1804 	if (IS_ERR(*rule_p)) {
1805 		err = PTR_ERR(*rule_p);
1806 		*rule_p = NULL;
1807 		mlx5_en_err(priv->ifp, "add rule failed\n");
1808 	}
1809 
1810 	return (err);
1811 }
1812 
1813 static struct mlx5e_vxlan_db_el *
1814 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1815 {
1816 	struct mlx5e_vxlan_db_el *el;
1817 
1818 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1819 		if (el->proto == proto && el->port == port)
1820 			return (el);
1821 	}
1822 	return (NULL);
1823 }
1824 
1825 static struct mlx5e_vxlan_db_el *
1826 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1827 {
1828 	struct mlx5e_vxlan_db_el *el;
1829 
1830 	el = mlx5_vzalloc(sizeof(*el));
1831 	el->refcount = 1;
1832 	el->proto = proto;
1833 	el->port = port;
1834 	el->vxlan_ft_rule = NULL;
1835 	return (el);
1836 }
1837 
1838 static int
1839 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1840 {
1841 	switch (family) {
1842 	case AF_INET:
1843 		*proto = ETHERTYPE_IP;
1844 		return (0);
1845 	case AF_INET6:
1846 		*proto = ETHERTYPE_IPV6;
1847 		return (0);
1848 	default:
1849 		return (-EINVAL);
1850 	}
1851 }
1852 
1853 static int
1854 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1855     struct mlx5e_vxlan_db_el *el)
1856 {
1857 	u32 *match_criteria;
1858 	u32 *match_value;
1859 	int err;
1860 
1861 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1862 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1863 	if (match_value == NULL || match_criteria == NULL) {
1864 		mlx5_en_err(priv->ifp, "alloc failed\n");
1865 		err = -ENOMEM;
1866 		goto add_vxlan_rule_out;
1867 	}
1868 
1869 	err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1870 
1871 add_vxlan_rule_out:
1872 	kvfree(match_criteria);
1873 	kvfree(match_value);
1874 
1875 	return (err);
1876 }
1877 
1878 static int
1879 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1880 {
1881 	struct mlx5e_vxlan_db_el *el;
1882 	u_int proto;
1883 	int err;
1884 
1885 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1886 	if (err != 0)
1887 		return (err);
1888 
1889 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1890 	if (el != NULL) {
1891 		el->refcount++;
1892 		if (el->installed)
1893 			return (0);
1894 	}
1895 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1896 
1897 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
1898 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1899 		if (err == 0)
1900 			el->installed = true;
1901 	}
1902 	if (err == 0)
1903 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1904 	else
1905 		kvfree(el);
1906 
1907 	return (err);
1908 }
1909 
1910 static int
1911 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1912 {
1913 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1914 	struct mlx5_flow_destination dest = {};
1915 	u8 mc_enable = 0;
1916 	struct mlx5_flow_rule **rule_p;
1917 	int err = 0;
1918 
1919 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1920 	dest.ft = priv->fts.main.t;
1921 
1922 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1923 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1924 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1925 
1926 	if (IS_ERR(*rule_p)) {
1927 		err = PTR_ERR(*rule_p);
1928 		*rule_p = NULL;
1929 		mlx5_en_err(priv->ifp, "add rule failed\n");
1930 	}
1931 
1932 	return (err);
1933 }
1934 
1935 
1936 static int
1937 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1938 {
1939 	u32 *match_criteria;
1940 	u32 *match_value;
1941 	int err;
1942 
1943 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1944 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1945 	if (match_value == NULL || match_criteria == NULL) {
1946 		mlx5_en_err(priv->ifp, "alloc failed\n");
1947 		err = -ENOMEM;
1948 		goto add_vxlan_rule_out;
1949 	}
1950 
1951 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1952 	    match_value);
1953 
1954 add_vxlan_rule_out:
1955 	kvfree(match_criteria);
1956 	kvfree(match_value);
1957 
1958 	return (err);
1959 }
1960 
1961 int
1962 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1963 {
1964 	struct mlx5e_vxlan_db_el *el;
1965 	int err;
1966 
1967 	err = 0;
1968 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1969 		if (el->installed)
1970 			continue;
1971 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1972 		if (err != 0)
1973 			break;
1974 		el->installed = true;
1975 	}
1976 
1977 	return (err);
1978 }
1979 
1980 static int
1981 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1982 {
1983 	struct mlx5e_vxlan_db_el *el;
1984 	u_int proto;
1985 	int err;
1986 
1987 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1988 	if (err != 0)
1989 		return (err);
1990 
1991 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1992 	if (el == NULL)
1993 		return (0);
1994 	if (el->refcount > 1) {
1995 		el->refcount--;
1996 		return (0);
1997 	}
1998 
1999 	if (el->installed)
2000 		mlx5_del_flow_rule(el->vxlan_ft_rule);
2001 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
2002 	kvfree(el);
2003 	return (0);
2004 }
2005 
2006 void
2007 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
2008 {
2009 	struct mlx5e_vxlan_db_el *el;
2010 
2011 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
2012 		if (!el->installed)
2013 			continue;
2014 		mlx5_del_flow_rule(el->vxlan_ft_rule);
2015 		el->installed = false;
2016 	}
2017 }
2018 
2019 static void
2020 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
2021 {
2022 	mlx5_del_flow_rule(priv->fts.vxlan_catchall_ft_rule);
2023 }
2024 
2025 void
2026 mlx5e_vxlan_start(void *arg, struct ifnet *ifp __unused, sa_family_t family,
2027     u_int port)
2028 {
2029 	struct mlx5e_priv *priv = arg;
2030 	int err;
2031 
2032 	PRIV_LOCK(priv);
2033 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
2034 	if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
2035 		mlx5e_add_vxlan_rule(priv, family, port);
2036 	PRIV_UNLOCK(priv);
2037 }
2038 
2039 void
2040 mlx5e_vxlan_stop(void *arg, struct ifnet *ifp __unused, sa_family_t family,
2041     u_int port)
2042 {
2043 	struct mlx5e_priv *priv = arg;
2044 
2045 	PRIV_LOCK(priv);
2046 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
2047 		mlx5e_del_vxlan_rule(priv, family, port);
2048 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
2049 	PRIV_UNLOCK(priv);
2050 }
2051 
2052 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
2053 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
2054 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
2055 #define	MLX5E_VXLAN_TABLE_SIZE	\
2056     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
2057 
2058 static int
2059 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2060 				      int inlen)
2061 {
2062 	int err;
2063 	int ix = 0;
2064 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2065 
2066 	memset(in, 0, inlen);
2067 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
2068 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
2069 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
2070 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
2071 	MLX5_SET_CFG(in, start_flow_index, ix);
2072 	ix += MLX5E_VXLAN_GROUP0_SIZE;
2073 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2074 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2075 	if (IS_ERR(ft->g[ft->num_groups]))
2076 		goto err_destory_groups;
2077 	ft->num_groups++;
2078 
2079 	memset(in, 0, inlen);
2080 	MLX5_SET_CFG(in, start_flow_index, ix);
2081 	ix += MLX5E_VXLAN_GROUP1_SIZE;
2082 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2083 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2084 	if (IS_ERR(ft->g[ft->num_groups]))
2085 		goto err_destory_groups;
2086 	ft->num_groups++;
2087 
2088 	return (0);
2089 
2090 err_destory_groups:
2091 	err = PTR_ERR(ft->g[ft->num_groups]);
2092 	ft->g[ft->num_groups] = NULL;
2093 	mlx5e_destroy_groups(ft);
2094 
2095 	return (err);
2096 }
2097 
2098 static int
2099 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
2100 {
2101 	u32 *in;
2102 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2103 	int err;
2104 
2105 	in = mlx5_vzalloc(inlen);
2106 	if (!in)
2107 		return (-ENOMEM);
2108 
2109 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2110 
2111 	kvfree(in);
2112 	return (err);
2113 }
2114 
2115 static int
2116 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2117 {
2118 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2119 	int err;
2120 
2121 	ft->num_groups = 0;
2122 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2123 				       MLX5E_VXLAN_TABLE_SIZE);
2124 
2125 	if (IS_ERR(ft->t)) {
2126 		err = PTR_ERR(ft->t);
2127 		ft->t = NULL;
2128 		return (err);
2129 	}
2130 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2131 	if (!ft->g) {
2132 		err = -ENOMEM;
2133 		goto err_destroy_vxlan_flow_table;
2134 	}
2135 
2136 	err = mlx5e_create_vxlan_groups(ft);
2137 	if (err)
2138 		goto err_free_g;
2139 
2140 	TAILQ_INIT(&priv->vxlan.head);
2141 	return (0);
2142 
2143 err_free_g:
2144 	kfree(ft->g);
2145 
2146 err_destroy_vxlan_flow_table:
2147 	mlx5_destroy_flow_table(ft->t);
2148 	ft->t = NULL;
2149 
2150 	return (err);
2151 }
2152 
2153 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2154 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2155 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2156 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2157 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2158 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2159 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2160 					 0)
2161 
2162 static int
2163 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2164 					   int inlen)
2165 {
2166 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2167 	int err;
2168 	int ix = 0;
2169 
2170 	memset(in, 0, inlen);
2171 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2172 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2173 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2174 	MLX5_SET_CFG(in, start_flow_index, ix);
2175 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2176 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2177 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2178 	if (IS_ERR(ft->g[ft->num_groups]))
2179 		goto err_destory_groups;
2180 	ft->num_groups++;
2181 
2182 	memset(in, 0, inlen);
2183 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2184 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2185 	MLX5_SET_CFG(in, start_flow_index, ix);
2186 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2187 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2188 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2189 	if (IS_ERR(ft->g[ft->num_groups]))
2190 		goto err_destory_groups;
2191 	ft->num_groups++;
2192 
2193 	memset(in, 0, inlen);
2194 	MLX5_SET_CFG(in, start_flow_index, ix);
2195 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2196 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2197 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2198 	if (IS_ERR(ft->g[ft->num_groups]))
2199 		goto err_destory_groups;
2200 	ft->num_groups++;
2201 
2202 	return (0);
2203 
2204 err_destory_groups:
2205 	err = PTR_ERR(ft->g[ft->num_groups]);
2206 	ft->g[ft->num_groups] = NULL;
2207 	mlx5e_destroy_groups(ft);
2208 
2209 	return (err);
2210 }
2211 
2212 static int
2213 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2214 {
2215 	u32 *in;
2216 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2217 	int err;
2218 
2219 	in = mlx5_vzalloc(inlen);
2220 	if (!in)
2221 		return (-ENOMEM);
2222 
2223 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2224 
2225 	kvfree(in);
2226 	return (err);
2227 }
2228 
2229 static int
2230 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2231 {
2232 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2233 	int err;
2234 
2235 	ft->num_groups = 0;
2236 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2237 				       MLX5E_INNER_RSS_TABLE_SIZE);
2238 
2239 	if (IS_ERR(ft->t)) {
2240 		err = PTR_ERR(ft->t);
2241 		ft->t = NULL;
2242 		return (err);
2243 	}
2244 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2245 			GFP_KERNEL);
2246 	if (!ft->g) {
2247 		err = -ENOMEM;
2248 		goto err_destroy_inner_rss_flow_table;
2249 	}
2250 
2251 	err = mlx5e_create_inner_rss_groups(ft);
2252 	if (err)
2253 		goto err_free_g;
2254 
2255 	return (0);
2256 
2257 err_free_g:
2258 	kfree(ft->g);
2259 
2260 err_destroy_inner_rss_flow_table:
2261 	mlx5_destroy_flow_table(ft->t);
2262 	ft->t = NULL;
2263 
2264 	return (err);
2265 }
2266 
2267 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2268 {
2269 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2270 }
2271 
2272 static void
2273 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2274 {
2275 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2276 }
2277 
2278 int
2279 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2280 {
2281 	int err;
2282 
2283 	/* setup namespace pointer */
2284 	priv->fts.ns = mlx5_get_flow_namespace(
2285 	    priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2286 
2287 	err = mlx5e_create_vlan_flow_table(priv);
2288 	if (err)
2289 		return (err);
2290 
2291 	err = mlx5e_create_vxlan_flow_table(priv);
2292 	if (err)
2293 		goto err_destroy_vlan_flow_table;
2294 
2295 	err = mlx5e_create_main_flow_table(priv, true);
2296 	if (err)
2297 		goto err_destroy_vxlan_flow_table;
2298 
2299 	err = mlx5e_create_inner_rss_flow_table(priv);
2300 	if (err)
2301 		goto err_destroy_main_flow_table_true;
2302 
2303 	err = mlx5e_create_main_flow_table(priv, false);
2304 	if (err)
2305 		goto err_destroy_inner_rss_flow_table;
2306 
2307 	err = mlx5e_add_vxlan_catchall_rule(priv);
2308 	if (err)
2309 		goto err_destroy_main_flow_table_false;
2310 
2311 	err = mlx5e_accel_fs_tcp_create(priv);
2312 	if (err)
2313 		goto err_del_vxlan_catchall_rule;
2314 
2315 	return (0);
2316 
2317 err_del_vxlan_catchall_rule:
2318 	mlx5e_del_vxlan_catchall_rule(priv);
2319 err_destroy_main_flow_table_false:
2320 	mlx5e_destroy_main_flow_table(priv);
2321 err_destroy_inner_rss_flow_table:
2322 	mlx5e_destroy_inner_rss_flow_table(priv);
2323 err_destroy_main_flow_table_true:
2324 	mlx5e_destroy_main_vxlan_flow_table(priv);
2325 err_destroy_vxlan_flow_table:
2326 	mlx5e_destroy_vxlan_flow_table(priv);
2327 err_destroy_vlan_flow_table:
2328 	mlx5e_destroy_vlan_flow_table(priv);
2329 
2330 	return (err);
2331 }
2332 
2333 void
2334 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2335 {
2336 	mlx5e_accel_fs_tcp_destroy(priv);
2337 	mlx5e_del_vxlan_catchall_rule(priv);
2338 	mlx5e_destroy_main_flow_table(priv);
2339 	mlx5e_destroy_inner_rss_flow_table(priv);
2340 	mlx5e_destroy_main_vxlan_flow_table(priv);
2341 	mlx5e_destroy_vxlan_flow_table(priv);
2342 	mlx5e_destroy_vlan_flow_table(priv);
2343 }
2344 
2345 int
2346 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2347 {
2348 	int err;
2349 
2350 	err = mlx5e_add_all_vlan_rules(priv);
2351 	if (err)
2352 		return (err);
2353 
2354 	err = mlx5e_add_main_vxlan_rules(priv);
2355 	if (err)
2356 		goto err_del_all_vlan_rules;
2357 
2358 	err = mlx5e_add_all_vxlan_rules(priv);
2359 	if (err)
2360 		goto err_del_main_vxlan_rules;
2361 
2362 	mlx5e_set_rx_mode_core(priv, true);
2363 
2364 	set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2365 
2366 	return (0);
2367 
2368 err_del_main_vxlan_rules:
2369 	mlx5e_del_main_vxlan_rules(priv);
2370 
2371 err_del_all_vlan_rules:
2372 	mlx5e_del_all_vlan_rules(priv);
2373 
2374 	return (err);
2375 }
2376 
2377 void
2378 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2379 {
2380 	clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2381 
2382 	mlx5e_set_rx_mode_core(priv, false);
2383 	mlx5e_del_all_vxlan_rules(priv);
2384 	mlx5e_del_main_vxlan_rules(priv);
2385 	mlx5e_del_all_vlan_rules(priv);
2386 }
2387