1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
32 
33 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
34 
35 enum {
36 	MLX5E_FULLMATCH = 0,
37 	MLX5E_ALLMULTI = 1,
38 	MLX5E_PROMISC = 2,
39 };
40 
41 enum {
42 	MLX5E_UC = 0,
43 	MLX5E_MC_IPV4 = 1,
44 	MLX5E_MC_IPV6 = 2,
45 	MLX5E_MC_OTHER = 3,
46 };
47 
48 enum {
49 	MLX5E_ACTION_NONE = 0,
50 	MLX5E_ACTION_ADD = 1,
51 	MLX5E_ACTION_DEL = 2,
52 };
53 
54 struct mlx5e_eth_addr_hash_node {
55 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
56 	u8	action;
57 	struct mlx5e_eth_addr_info ai;
58 };
59 
60 static inline int
61 mlx5e_hash_eth_addr(const u8 * addr)
62 {
63 	return (addr[5]);
64 }
65 
66 static void
67 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
68     const u8 * addr)
69 {
70 	struct mlx5e_eth_addr_hash_node *hn;
71 	int ix = mlx5e_hash_eth_addr(addr);
72 
73 	LIST_FOREACH(hn, &hash[ix], hlist) {
74 		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
75 			if (hn->action == MLX5E_ACTION_DEL)
76 				hn->action = MLX5E_ACTION_NONE;
77 			return;
78 		}
79 	}
80 
81 	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
82 	if (hn == NULL)
83 		return;
84 
85 	ether_addr_copy(hn->ai.addr, addr);
86 	hn->action = MLX5E_ACTION_ADD;
87 
88 	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
89 }
90 
91 static void
92 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
93 {
94 	LIST_REMOVE(hn, hlist);
95 	free(hn, M_MLX5EN);
96 }
97 
98 static void
99 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
100     struct mlx5e_eth_addr_info *ai)
101 {
102 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
103 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
104 
105 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
106 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
107 
108 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
109 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
110 
111 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
112 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
113 
114 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
115 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
116 
117 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
118 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
119 
120 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
121 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
122 
123 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
124 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
125 
126 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
127 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
128 
129 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
130 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
131 
132 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
133 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
134 }
135 
136 static int
137 mlx5e_get_eth_addr_type(const u8 * addr)
138 {
139 	if (ETHER_IS_MULTICAST(addr) == 0)
140 		return (MLX5E_UC);
141 
142 	if ((addr[0] == 0x01) &&
143 	    (addr[1] == 0x00) &&
144 	    (addr[2] == 0x5e) &&
145 	    !(addr[3] & 0x80))
146 		return (MLX5E_MC_IPV4);
147 
148 	if ((addr[0] == 0x33) &&
149 	    (addr[1] == 0x33))
150 		return (MLX5E_MC_IPV6);
151 
152 	return (MLX5E_MC_OTHER);
153 }
154 
155 static	u32
156 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
157 {
158 	int eth_addr_type;
159 	u32 ret;
160 
161 	switch (type) {
162 	case MLX5E_FULLMATCH:
163 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
164 		switch (eth_addr_type) {
165 		case MLX5E_UC:
166 			ret =
167 			    (1 << MLX5E_TT_IPV4_TCP) |
168 			    (1 << MLX5E_TT_IPV6_TCP) |
169 			    (1 << MLX5E_TT_IPV4_UDP) |
170 			    (1 << MLX5E_TT_IPV6_UDP) |
171 			    (1 << MLX5E_TT_IPV4) |
172 			    (1 << MLX5E_TT_IPV6) |
173 			    (1 << MLX5E_TT_ANY) |
174 			    0;
175 			break;
176 
177 		case MLX5E_MC_IPV4:
178 			ret =
179 			    (1 << MLX5E_TT_IPV4_UDP) |
180 			    (1 << MLX5E_TT_IPV4) |
181 			    0;
182 			break;
183 
184 		case MLX5E_MC_IPV6:
185 			ret =
186 			    (1 << MLX5E_TT_IPV6_UDP) |
187 			    (1 << MLX5E_TT_IPV6) |
188 			    0;
189 			break;
190 
191 		default:
192 			ret =
193 			    (1 << MLX5E_TT_ANY) |
194 			    0;
195 			break;
196 		}
197 		break;
198 
199 	case MLX5E_ALLMULTI:
200 		ret =
201 		    (1 << MLX5E_TT_IPV4_UDP) |
202 		    (1 << MLX5E_TT_IPV6_UDP) |
203 		    (1 << MLX5E_TT_IPV4) |
204 		    (1 << MLX5E_TT_IPV6) |
205 		    (1 << MLX5E_TT_ANY) |
206 		    0;
207 		break;
208 
209 	default:			/* MLX5E_PROMISC */
210 		ret =
211 		    (1 << MLX5E_TT_IPV4_TCP) |
212 		    (1 << MLX5E_TT_IPV6_TCP) |
213 		    (1 << MLX5E_TT_IPV4_UDP) |
214 		    (1 << MLX5E_TT_IPV6_UDP) |
215 		    (1 << MLX5E_TT_IPV4) |
216 		    (1 << MLX5E_TT_IPV6) |
217 		    (1 << MLX5E_TT_ANY) |
218 		    0;
219 		break;
220 	}
221 
222 	return (ret);
223 }
224 
225 static int
226 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
227     struct mlx5e_eth_addr_info *ai, int type,
228     u32 *mc, u32 *mv)
229 {
230 	struct mlx5_flow_destination dest;
231 	u8 mc_enable = 0;
232 	struct mlx5_flow_rule **rule_p;
233 	struct mlx5_flow_table *ft = priv->fts.main.t;
234 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
235 				   outer_headers.dmac_47_16);
236 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
237 				   outer_headers.dmac_47_16);
238 	u32 *tirn = priv->tirn;
239 	u32 tt_vec;
240 	int err = 0;
241 
242 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
243 
244 	switch (type) {
245 	case MLX5E_FULLMATCH:
246 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
247 		memset(mc_dmac, 0xff, ETH_ALEN);
248 		ether_addr_copy(mv_dmac, ai->addr);
249 		break;
250 
251 	case MLX5E_ALLMULTI:
252 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
253 		mc_dmac[0] = 0x01;
254 		mv_dmac[0] = 0x01;
255 		break;
256 
257 	case MLX5E_PROMISC:
258 		break;
259 	default:
260 		break;
261 	}
262 
263 	tt_vec = mlx5e_get_tt_vec(ai, type);
264 
265 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
266 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
267 		dest.tir_num = tirn[MLX5E_TT_ANY];
268 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
269 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
270 					     MLX5_FS_ETH_FLOW_TAG, &dest);
271 		if (IS_ERR_OR_NULL(*rule_p))
272 			goto err_del_ai;
273 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
274 	}
275 
276 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
277 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
278 
279 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
280 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
281 		dest.tir_num = tirn[MLX5E_TT_IPV4];
282 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
283 			 ETHERTYPE_IP);
284 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
285 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
286 					     MLX5_FS_ETH_FLOW_TAG, &dest);
287 		if (IS_ERR_OR_NULL(*rule_p))
288 			goto err_del_ai;
289 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
290 	}
291 
292 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
293 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
294 		dest.tir_num = tirn[MLX5E_TT_IPV6];
295 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
296 			 ETHERTYPE_IPV6);
297 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
298 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
299 					     MLX5_FS_ETH_FLOW_TAG, &dest);
300 		if (IS_ERR_OR_NULL(*rule_p))
301 			goto err_del_ai;
302 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
303 	}
304 
305 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
306 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
307 
308 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
309 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
310 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
311 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
312 			 ETHERTYPE_IP);
313 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
314 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
315 					     MLX5_FS_ETH_FLOW_TAG, &dest);
316 		if (IS_ERR_OR_NULL(*rule_p))
317 			goto err_del_ai;
318 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
319 	}
320 
321 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
322 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
323 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
324 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
325 			 ETHERTYPE_IPV6);
326 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
327 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
328 					     MLX5_FS_ETH_FLOW_TAG, &dest);
329 		if (IS_ERR_OR_NULL(*rule_p))
330 			goto err_del_ai;
331 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
332 	}
333 
334 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
335 
336 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
337 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
338 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
339 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
340 			 ETHERTYPE_IP);
341 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
342 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
343 					     MLX5_FS_ETH_FLOW_TAG, &dest);
344 		if (IS_ERR_OR_NULL(*rule_p))
345 			goto err_del_ai;
346 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
347 	}
348 
349 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
350 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
351 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
352 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
353 			 ETHERTYPE_IPV6);
354 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
355 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
356 					     MLX5_FS_ETH_FLOW_TAG, &dest);
357 		if (IS_ERR_OR_NULL(*rule_p))
358 			goto err_del_ai;
359 
360 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
361 	}
362 
363 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
364 
365 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
366 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
367 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
368 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
369 			 ETHERTYPE_IP);
370 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
371 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
372 					     MLX5_FS_ETH_FLOW_TAG, &dest);
373 		if (IS_ERR_OR_NULL(*rule_p))
374 			goto err_del_ai;
375 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
376 	}
377 
378 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
379 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
380 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
381 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
382 			 ETHERTYPE_IPV6);
383 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
384 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
385 					     MLX5_FS_ETH_FLOW_TAG, &dest);
386 		if (IS_ERR_OR_NULL(*rule_p))
387 			goto err_del_ai;
388 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
389 	}
390 
391 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
392 
393 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
394 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
395 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
396 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
397 			 ETHERTYPE_IP);
398 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
399 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
400 					     MLX5_FS_ETH_FLOW_TAG, &dest);
401 		if (IS_ERR_OR_NULL(*rule_p))
402 			goto err_del_ai;
403 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
404 	}
405 
406 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
407 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
408 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
409 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
410 			 ETHERTYPE_IPV6);
411 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
412 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
413 					     MLX5_FS_ETH_FLOW_TAG, &dest);
414 		if (IS_ERR_OR_NULL(*rule_p))
415 			goto err_del_ai;
416 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
417 	}
418 
419 	return 0;
420 
421 err_del_ai:
422 	err = PTR_ERR(*rule_p);
423 	*rule_p = NULL;
424 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
425 
426 	return err;
427 }
428 
429 static int
430 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
431     struct mlx5e_eth_addr_info *ai, int type)
432 {
433 	u32 *match_criteria;
434 	u32 *match_value;
435 	int err = 0;
436 
437 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
438 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
439 	if (!match_value || !match_criteria) {
440 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
441 		err = -ENOMEM;
442 		goto add_eth_addr_rule_out;
443 	}
444 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
445 	    match_value);
446 
447 add_eth_addr_rule_out:
448 	kvfree(match_criteria);
449 	kvfree(match_value);
450 
451 	return (err);
452 }
453 
454 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
455 {
456 	struct ifnet *ifp = priv->ifp;
457 	int max_list_size;
458 	int list_size;
459 	u16 *vlans;
460 	int vlan;
461 	int err;
462 	int i;
463 
464 	list_size = 0;
465 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
466 		list_size++;
467 
468 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
469 
470 	if (list_size > max_list_size) {
471 		if_printf(ifp,
472 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
473 			    list_size, max_list_size);
474 		list_size = max_list_size;
475 	}
476 
477 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
478 	if (!vlans)
479 		return -ENOMEM;
480 
481 	i = 0;
482 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
483 		if (i >= list_size)
484 			break;
485 		vlans[i++] = vlan;
486 	}
487 
488 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
489 	if (err)
490 		if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
491 			   err);
492 
493 	kfree(vlans);
494 	return err;
495 }
496 
497 enum mlx5e_vlan_rule_type {
498 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
499 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
500 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
501 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
502 };
503 
504 static int
505 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
506     enum mlx5e_vlan_rule_type rule_type, u16 vid,
507     u32 *mc, u32 *mv)
508 {
509 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
510 	struct mlx5_flow_destination dest;
511 	u8 mc_enable = 0;
512 	struct mlx5_flow_rule **rule_p;
513 	int err = 0;
514 
515 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
516 	dest.ft = priv->fts.main.t;
517 
518 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
519 
520 	switch (rule_type) {
521 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
522 		rule_p = &priv->vlan.untagged_ft_rule;
523 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
524 		break;
525 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
526 		rule_p = &priv->vlan.any_cvlan_ft_rule;
527 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
528 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
529 		break;
530 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
531 		rule_p = &priv->vlan.any_svlan_ft_rule;
532 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
533 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
534 		break;
535 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
536 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
537 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
538 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
539 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
540 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
541 		mlx5e_vport_context_update_vlans(priv);
542 		break;
543 	}
544 
545 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
546 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
547 				     MLX5_FS_ETH_FLOW_TAG,
548 				     &dest);
549 
550 	if (IS_ERR(*rule_p)) {
551 		err = PTR_ERR(*rule_p);
552 		*rule_p = NULL;
553 		if_printf(priv->ifp, "%s: add rule failed\n", __func__);
554 	}
555 
556 	return (err);
557 }
558 
559 static int
560 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
561     enum mlx5e_vlan_rule_type rule_type, u16 vid)
562 {
563 	u32 *match_criteria;
564 	u32 *match_value;
565 	int err = 0;
566 
567 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
569 	if (!match_value || !match_criteria) {
570 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
571 		err = -ENOMEM;
572 		goto add_vlan_rule_out;
573 	}
574 
575 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
576 				    match_value);
577 
578 add_vlan_rule_out:
579 	kvfree(match_criteria);
580 	kvfree(match_value);
581 
582 	return (err);
583 }
584 
585 static void
586 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
587     enum mlx5e_vlan_rule_type rule_type, u16 vid)
588 {
589 	switch (rule_type) {
590 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
591 		if (priv->vlan.untagged_ft_rule) {
592 			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
593 			priv->vlan.untagged_ft_rule = NULL;
594 		}
595 		break;
596 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
597 		if (priv->vlan.any_cvlan_ft_rule) {
598 			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
599 			priv->vlan.any_cvlan_ft_rule = NULL;
600 		}
601 		break;
602 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
603 		if (priv->vlan.any_svlan_ft_rule) {
604 			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
605 			priv->vlan.any_svlan_ft_rule = NULL;
606 		}
607 		break;
608 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
609 		if (priv->vlan.active_vlans_ft_rule[vid]) {
610 			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
611 			priv->vlan.active_vlans_ft_rule[vid] = NULL;
612 		}
613 		mlx5e_vport_context_update_vlans(priv);
614 		break;
615 	default:
616 		break;
617 	}
618 }
619 
620 static void
621 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
622 {
623 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
624 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
625 }
626 
627 static int
628 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
629 {
630 	int err;
631 
632 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
633 	if (err)
634 		return (err);
635 
636 	return (mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0));
637 }
638 
639 void
640 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
641 {
642 	if (priv->vlan.filter_disabled) {
643 		priv->vlan.filter_disabled = false;
644 		if (priv->ifp->if_flags & IFF_PROMISC)
645 			return;
646 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
647 			mlx5e_del_any_vid_rules(priv);
648 	}
649 }
650 
651 void
652 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
653 {
654 	if (!priv->vlan.filter_disabled) {
655 		priv->vlan.filter_disabled = true;
656 		if (priv->ifp->if_flags & IFF_PROMISC)
657 			return;
658 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
659 			mlx5e_add_any_vid_rules(priv);
660 	}
661 }
662 
663 void
664 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
665 {
666 	struct mlx5e_priv *priv = arg;
667 
668 	if (ifp != priv->ifp)
669 		return;
670 
671 	PRIV_LOCK(priv);
672 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
673 	    test_bit(MLX5E_STATE_OPENED, &priv->state))
674 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
675 	PRIV_UNLOCK(priv);
676 }
677 
678 void
679 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
680 {
681 	struct mlx5e_priv *priv = arg;
682 
683 	if (ifp != priv->ifp)
684 		return;
685 
686 	PRIV_LOCK(priv);
687 	clear_bit(vid, priv->vlan.active_vlans);
688 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
689 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
690 	PRIV_UNLOCK(priv);
691 }
692 
693 int
694 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
695 {
696 	int err;
697 	int i;
698 
699 	set_bit(0, priv->vlan.active_vlans);
700 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
701 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
702 					  i);
703 		if (err)
704 			return (err);
705 	}
706 
707 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
708 	if (err)
709 		return (err);
710 
711 	if (priv->vlan.filter_disabled) {
712 		err = mlx5e_add_any_vid_rules(priv);
713 		if (err)
714 			return (err);
715 	}
716 	return (0);
717 }
718 
719 void
720 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
721 {
722 	int i;
723 
724 	if (priv->vlan.filter_disabled)
725 		mlx5e_del_any_vid_rules(priv);
726 
727 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
728 
729 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
730 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
731 	clear_bit(0, priv->vlan.active_vlans);
732 }
733 
734 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
735 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
736 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
737 
738 static void
739 mlx5e_execute_action(struct mlx5e_priv *priv,
740     struct mlx5e_eth_addr_hash_node *hn)
741 {
742 	switch (hn->action) {
743 	case MLX5E_ACTION_ADD:
744 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
745 		hn->action = MLX5E_ACTION_NONE;
746 		break;
747 
748 	case MLX5E_ACTION_DEL:
749 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
750 		mlx5e_del_eth_addr_from_hash(hn);
751 		break;
752 
753 	default:
754 		break;
755 	}
756 }
757 
758 static void
759 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
760 {
761 	struct ifnet *ifp = priv->ifp;
762 	struct ifaddr *ifa;
763 	struct ifmultiaddr *ifma;
764 
765 	/* XXX adding this entry might not be needed */
766 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
767 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
768 
769 	if_addr_rlock(ifp);
770 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
771 		if (ifa->ifa_addr->sa_family != AF_LINK)
772 			continue;
773 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
774 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
775 	}
776 	if_addr_runlock(ifp);
777 
778 	if_maddr_rlock(ifp);
779 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
780 		if (ifma->ifma_addr->sa_family != AF_LINK)
781 			continue;
782 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
783 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
784 	}
785 	if_maddr_runlock(ifp);
786 }
787 
788 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
789 				  u8 addr_array[][ETH_ALEN], int size)
790 {
791 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
792 	struct ifnet *ifp = priv->ifp;
793 	struct mlx5e_eth_addr_hash_node *hn;
794 	struct mlx5e_eth_addr_hash_head *addr_list;
795 	struct mlx5e_eth_addr_hash_node *tmp;
796 	int i = 0;
797 	int hi;
798 
799 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
800 
801 	if (is_uc) /* Make sure our own address is pushed first */
802 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
803 	else if (priv->eth_addr.broadcast_enabled)
804 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
805 
806 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
807 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
808 			continue;
809 		if (i >= size)
810 			break;
811 		ether_addr_copy(addr_array[i++], hn->ai.addr);
812 	}
813 }
814 
815 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
816 						 int list_type)
817 {
818 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
819 	struct mlx5e_eth_addr_hash_node *hn;
820 	u8 (*addr_array)[ETH_ALEN] = NULL;
821 	struct mlx5e_eth_addr_hash_head *addr_list;
822 	struct mlx5e_eth_addr_hash_node *tmp;
823 	int max_size;
824 	int size;
825 	int err;
826 	int hi;
827 
828 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
829 	max_size = is_uc ?
830 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
831 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
832 
833 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
834 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
835 		size++;
836 
837 	if (size > max_size) {
838 		if_printf(priv->ifp,
839 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
840 			    is_uc ? "UC" : "MC", size, max_size);
841 		size = max_size;
842 	}
843 
844 	if (size) {
845 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
846 		if (!addr_array) {
847 			err = -ENOMEM;
848 			goto out;
849 		}
850 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
851 	}
852 
853 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
854 out:
855 	if (err)
856 		if_printf(priv->ifp,
857 			   "Failed to modify vport %s list err(%d)\n",
858 			   is_uc ? "UC" : "MC", err);
859 	kfree(addr_array);
860 }
861 
862 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
863 {
864 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
865 
866 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
867 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
868 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
869 				      ea->allmulti_enabled,
870 				      ea->promisc_enabled);
871 }
872 
873 static void
874 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
875 {
876 	struct mlx5e_eth_addr_hash_node *hn;
877 	struct mlx5e_eth_addr_hash_node *tmp;
878 	int i;
879 
880 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
881 	    mlx5e_execute_action(priv, hn);
882 
883 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
884 	    mlx5e_execute_action(priv, hn);
885 }
886 
887 static void
888 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
889 {
890 	struct mlx5e_eth_addr_hash_node *hn;
891 	struct mlx5e_eth_addr_hash_node *tmp;
892 	int i;
893 
894 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
895 	    hn->action = MLX5E_ACTION_DEL;
896 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
897 	    hn->action = MLX5E_ACTION_DEL;
898 
899 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
900 		mlx5e_sync_ifp_addr(priv);
901 
902 	mlx5e_apply_ifp_addr(priv);
903 }
904 
905 void
906 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
907 {
908 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
909 	struct ifnet *ndev = priv->ifp;
910 
911 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
912 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
913 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
914 	bool broadcast_enabled = rx_mode_enable;
915 
916 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
917 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
918 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
919 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
920 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
921 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
922 
923 	/* update broadcast address */
924 	ether_addr_copy(priv->eth_addr.broadcast.addr,
925 	    priv->ifp->if_broadcastaddr);
926 
927 	if (enable_promisc) {
928 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
929 		if (!priv->vlan.filter_disabled)
930 			mlx5e_add_any_vid_rules(priv);
931 	}
932 	if (enable_allmulti)
933 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
934 	if (enable_broadcast)
935 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
936 
937 	mlx5e_handle_ifp_addr(priv);
938 
939 	if (disable_broadcast)
940 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
941 	if (disable_allmulti)
942 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
943 	if (disable_promisc) {
944 		if (!priv->vlan.filter_disabled)
945 			mlx5e_del_any_vid_rules(priv);
946 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
947 	}
948 
949 	ea->promisc_enabled = promisc_enabled;
950 	ea->allmulti_enabled = allmulti_enabled;
951 	ea->broadcast_enabled = broadcast_enabled;
952 
953 	mlx5e_vport_context_update(priv);
954 }
955 
956 void
957 mlx5e_set_rx_mode_work(struct work_struct *work)
958 {
959 	struct mlx5e_priv *priv =
960 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
961 
962 	PRIV_LOCK(priv);
963 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
964 		mlx5e_set_rx_mode_core(priv);
965 	PRIV_UNLOCK(priv);
966 }
967 
968 static void
969 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
970 {
971 	int i;
972 
973 	for (i = ft->num_groups - 1; i >= 0; i--) {
974 		if (!IS_ERR_OR_NULL(ft->g[i]))
975 			mlx5_destroy_flow_group(ft->g[i]);
976 		ft->g[i] = NULL;
977 	}
978 	ft->num_groups = 0;
979 }
980 
981 static void
982 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
983 {
984 	mlx5e_destroy_groups(ft);
985 	kfree(ft->g);
986 	mlx5_destroy_flow_table(ft->t);
987 	ft->t = NULL;
988 }
989 
990 #define MLX5E_NUM_MAIN_GROUPS	10
991 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
992 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
993 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
994 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
995 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
996 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
997 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
998 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
999 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1000 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1001 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1002 				 MLX5E_MAIN_GROUP1_SIZE +\
1003 				 MLX5E_MAIN_GROUP2_SIZE +\
1004 				 MLX5E_MAIN_GROUP3_SIZE +\
1005 				 MLX5E_MAIN_GROUP4_SIZE +\
1006 				 MLX5E_MAIN_GROUP5_SIZE +\
1007 				 MLX5E_MAIN_GROUP6_SIZE +\
1008 				 MLX5E_MAIN_GROUP7_SIZE +\
1009 				 MLX5E_MAIN_GROUP8_SIZE +\
1010 				 MLX5E_MAIN_GROUP9_SIZE +\
1011 				 0)
1012 
1013 static int
1014 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1015 				      int inlen)
1016 {
1017 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1018 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1019 				match_criteria.outer_headers.dmac_47_16);
1020 	int err;
1021 	int ix = 0;
1022 
1023 	/* Tunnel rules need to be first in this list of groups */
1024 
1025 	/* Start tunnel rules */
1026 	memset(in, 0, inlen);
1027 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1028 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1029 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1030 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1031 	MLX5_SET_CFG(in, start_flow_index, ix);
1032 	ix += MLX5E_MAIN_GROUP0_SIZE;
1033 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1034 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1035 	if (IS_ERR(ft->g[ft->num_groups]))
1036 		goto err_destory_groups;
1037 	ft->num_groups++;
1038 	/* End Tunnel Rules */
1039 
1040 	memset(in, 0, inlen);
1041 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1042 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1043 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1044 	MLX5_SET_CFG(in, start_flow_index, ix);
1045 	ix += MLX5E_MAIN_GROUP1_SIZE;
1046 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1047 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1048 	if (IS_ERR(ft->g[ft->num_groups]))
1049 		goto err_destory_groups;
1050 	ft->num_groups++;
1051 
1052 	memset(in, 0, inlen);
1053 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1054 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1055 	MLX5_SET_CFG(in, start_flow_index, ix);
1056 	ix += MLX5E_MAIN_GROUP2_SIZE;
1057 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1058 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1059 	if (IS_ERR(ft->g[ft->num_groups]))
1060 		goto err_destory_groups;
1061 	ft->num_groups++;
1062 
1063 	memset(in, 0, inlen);
1064 	MLX5_SET_CFG(in, start_flow_index, ix);
1065 	ix += MLX5E_MAIN_GROUP3_SIZE;
1066 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1067 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1068 	if (IS_ERR(ft->g[ft->num_groups]))
1069 		goto err_destory_groups;
1070 	ft->num_groups++;
1071 
1072 	memset(in, 0, inlen);
1073 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1074 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1075 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1076 	memset(dmac, 0xff, ETH_ALEN);
1077 	MLX5_SET_CFG(in, start_flow_index, ix);
1078 	ix += MLX5E_MAIN_GROUP4_SIZE;
1079 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1080 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1081 	if (IS_ERR(ft->g[ft->num_groups]))
1082 		goto err_destory_groups;
1083 	ft->num_groups++;
1084 
1085 	memset(in, 0, inlen);
1086 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1087 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1088 	memset(dmac, 0xff, ETH_ALEN);
1089 	MLX5_SET_CFG(in, start_flow_index, ix);
1090 	ix += MLX5E_MAIN_GROUP5_SIZE;
1091 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1092 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1093 	if (IS_ERR(ft->g[ft->num_groups]))
1094 		goto err_destory_groups;
1095 	ft->num_groups++;
1096 
1097 	memset(in, 0, inlen);
1098 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1099 	memset(dmac, 0xff, ETH_ALEN);
1100 	MLX5_SET_CFG(in, start_flow_index, ix);
1101 	ix += MLX5E_MAIN_GROUP6_SIZE;
1102 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1103 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1104 	if (IS_ERR(ft->g[ft->num_groups]))
1105 		goto err_destory_groups;
1106 	ft->num_groups++;
1107 
1108 	memset(in, 0, inlen);
1109 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1110 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1111 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1112 	dmac[0] = 0x01;
1113 	MLX5_SET_CFG(in, start_flow_index, ix);
1114 	ix += MLX5E_MAIN_GROUP7_SIZE;
1115 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1116 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1117 	if (IS_ERR(ft->g[ft->num_groups]))
1118 		goto err_destory_groups;
1119 	ft->num_groups++;
1120 
1121 	memset(in, 0, inlen);
1122 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1123 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1124 	dmac[0] = 0x01;
1125 	MLX5_SET_CFG(in, start_flow_index, ix);
1126 	ix += MLX5E_MAIN_GROUP8_SIZE;
1127 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1128 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1129 	if (IS_ERR(ft->g[ft->num_groups]))
1130 		goto err_destory_groups;
1131 	ft->num_groups++;
1132 
1133 	memset(in, 0, inlen);
1134 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1135 	dmac[0] = 0x01;
1136 	MLX5_SET_CFG(in, start_flow_index, ix);
1137 	ix += MLX5E_MAIN_GROUP9_SIZE;
1138 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1139 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1140 	if (IS_ERR(ft->g[ft->num_groups]))
1141 		goto err_destory_groups;
1142 	ft->num_groups++;
1143 
1144 	return (0);
1145 
1146 err_destory_groups:
1147 	err = PTR_ERR(ft->g[ft->num_groups]);
1148 	ft->g[ft->num_groups] = NULL;
1149 	mlx5e_destroy_groups(ft);
1150 
1151 	return (err);
1152 }
1153 
1154 static int
1155 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1156 {
1157 	u32 *in;
1158 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1159 	int err;
1160 
1161 	in = mlx5_vzalloc(inlen);
1162 	if (!in)
1163 		return (-ENOMEM);
1164 
1165 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1166 
1167 	kvfree(in);
1168 	return (err);
1169 }
1170 
1171 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1172 {
1173 	struct mlx5e_flow_table *ft = &priv->fts.main;
1174 	int err;
1175 
1176 	ft->num_groups = 0;
1177 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1178 				       MLX5E_MAIN_TABLE_SIZE);
1179 
1180 	if (IS_ERR(ft->t)) {
1181 		err = PTR_ERR(ft->t);
1182 		ft->t = NULL;
1183 		return (err);
1184 	}
1185 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1186 	if (!ft->g) {
1187 		err = -ENOMEM;
1188 		goto err_destroy_main_flow_table;
1189 	}
1190 
1191 	err = mlx5e_create_main_groups(ft);
1192 	if (err)
1193 		goto err_free_g;
1194 	return (0);
1195 
1196 err_free_g:
1197 	kfree(ft->g);
1198 
1199 err_destroy_main_flow_table:
1200 	mlx5_destroy_flow_table(ft->t);
1201 	ft->t = NULL;
1202 
1203 	return (err);
1204 }
1205 
1206 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1207 {
1208 	mlx5e_destroy_flow_table(&priv->fts.main);
1209 }
1210 
1211 #define MLX5E_NUM_VLAN_GROUPS	3
1212 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1213 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1214 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1215 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1216 				 MLX5E_VLAN_GROUP1_SIZE +\
1217 				 MLX5E_VLAN_GROUP2_SIZE +\
1218 				 0)
1219 
1220 static int
1221 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1222 				      int inlen)
1223 {
1224 	int err;
1225 	int ix = 0;
1226 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1227 
1228 	memset(in, 0, inlen);
1229 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1230 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1231 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1232 	MLX5_SET_CFG(in, start_flow_index, ix);
1233 	ix += MLX5E_VLAN_GROUP0_SIZE;
1234 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1235 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1236 	if (IS_ERR(ft->g[ft->num_groups]))
1237 		goto err_destory_groups;
1238 	ft->num_groups++;
1239 
1240 	memset(in, 0, inlen);
1241 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1242 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1243 	MLX5_SET_CFG(in, start_flow_index, ix);
1244 	ix += MLX5E_VLAN_GROUP1_SIZE;
1245 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1246 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1247 	if (IS_ERR(ft->g[ft->num_groups]))
1248 		goto err_destory_groups;
1249 	ft->num_groups++;
1250 
1251 	memset(in, 0, inlen);
1252 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1253 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1254 	MLX5_SET_CFG(in, start_flow_index, ix);
1255 	ix += MLX5E_VLAN_GROUP2_SIZE;
1256 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1257 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1258 	if (IS_ERR(ft->g[ft->num_groups]))
1259 		goto err_destory_groups;
1260 	ft->num_groups++;
1261 
1262 	return (0);
1263 
1264 err_destory_groups:
1265 	err = PTR_ERR(ft->g[ft->num_groups]);
1266 	ft->g[ft->num_groups] = NULL;
1267 	mlx5e_destroy_groups(ft);
1268 
1269 	return (err);
1270 }
1271 
1272 static int
1273 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1274 {
1275 	u32 *in;
1276 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1277 	int err;
1278 
1279 	in = mlx5_vzalloc(inlen);
1280 	if (!in)
1281 		return (-ENOMEM);
1282 
1283 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1284 
1285 	kvfree(in);
1286 	return (err);
1287 }
1288 
1289 static int
1290 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1291 {
1292 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1293 	int err;
1294 
1295 	ft->num_groups = 0;
1296 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1297 				       MLX5E_VLAN_TABLE_SIZE);
1298 
1299 	if (IS_ERR(ft->t)) {
1300 		err = PTR_ERR(ft->t);
1301 		ft->t = NULL;
1302 		return (err);
1303 	}
1304 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1305 	if (!ft->g) {
1306 		err = -ENOMEM;
1307 		goto err_destroy_vlan_flow_table;
1308 	}
1309 
1310 	err = mlx5e_create_vlan_groups(ft);
1311 	if (err)
1312 		goto err_free_g;
1313 
1314 	return (0);
1315 
1316 err_free_g:
1317 	kfree(ft->g);
1318 
1319 err_destroy_vlan_flow_table:
1320 	mlx5_destroy_flow_table(ft->t);
1321 	ft->t = NULL;
1322 
1323 	return (err);
1324 }
1325 
1326 static void
1327 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1328 {
1329 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1330 }
1331 
1332 #define MLX5E_NUM_INNER_RSS_GROUPS	3
1333 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
1334 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
1335 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
1336 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
1337 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
1338 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
1339 					 0)
1340 
1341 static int
1342 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1343 					   int inlen)
1344 {
1345 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1346 	int err;
1347 	int ix = 0;
1348 
1349 	memset(in, 0, inlen);
1350 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1351 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1352 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1353 	MLX5_SET_CFG(in, start_flow_index, ix);
1354 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1355 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1356 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1357 	if (IS_ERR(ft->g[ft->num_groups]))
1358 		goto err_destory_groups;
1359 	ft->num_groups++;
1360 
1361 	memset(in, 0, inlen);
1362 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1363 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1364 	MLX5_SET_CFG(in, start_flow_index, ix);
1365 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1366 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1367 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1368 	if (IS_ERR(ft->g[ft->num_groups]))
1369 		goto err_destory_groups;
1370 	ft->num_groups++;
1371 
1372 	memset(in, 0, inlen);
1373 	MLX5_SET_CFG(in, start_flow_index, ix);
1374 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1375 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1376 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1377 	if (IS_ERR(ft->g[ft->num_groups]))
1378 		goto err_destory_groups;
1379 	ft->num_groups++;
1380 
1381 	return (0);
1382 
1383 err_destory_groups:
1384 	err = PTR_ERR(ft->g[ft->num_groups]);
1385 	ft->g[ft->num_groups] = NULL;
1386 	mlx5e_destroy_groups(ft);
1387 
1388 	return (err);
1389 }
1390 
1391 static int
1392 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1393 {
1394 	u32 *in;
1395 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1396 	int err;
1397 
1398 	in = mlx5_vzalloc(inlen);
1399 	if (!in)
1400 		return (-ENOMEM);
1401 
1402 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1403 
1404 	kvfree(in);
1405 	return (err);
1406 }
1407 
1408 static int
1409 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1410 {
1411 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1412 	int err;
1413 
1414 	ft->num_groups = 0;
1415 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1416 				       MLX5E_INNER_RSS_TABLE_SIZE);
1417 
1418 	if (IS_ERR(ft->t)) {
1419 		err = PTR_ERR(ft->t);
1420 		ft->t = NULL;
1421 		return (err);
1422 	}
1423 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1424 			GFP_KERNEL);
1425 	if (!ft->g) {
1426 		err = -ENOMEM;
1427 		goto err_destroy_inner_rss_flow_table;
1428 	}
1429 
1430 	err = mlx5e_create_inner_rss_groups(ft);
1431 	if (err)
1432 		goto err_free_g;
1433 
1434 	return (0);
1435 
1436 err_free_g:
1437 	kfree(ft->g);
1438 
1439 err_destroy_inner_rss_flow_table:
1440 	mlx5_destroy_flow_table(ft->t);
1441 	ft->t = NULL;
1442 
1443 	return (err);
1444 }
1445 
1446 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1447 {
1448 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1449 }
1450 
1451 int
1452 mlx5e_open_flow_table(struct mlx5e_priv *priv)
1453 {
1454 	int err;
1455 
1456 	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1457 					       MLX5_FLOW_NAMESPACE_KERNEL);
1458 
1459 	err = mlx5e_create_vlan_flow_table(priv);
1460 	if (err)
1461 		return (err);
1462 
1463 	err = mlx5e_create_main_flow_table(priv);
1464 	if (err)
1465 		goto err_destroy_vlan_flow_table;
1466 
1467 	err = mlx5e_create_inner_rss_flow_table(priv);
1468 	if (err)
1469 		goto err_destroy_main_flow_table;
1470 
1471 	return (0);
1472 
1473 err_destroy_main_flow_table:
1474 	mlx5e_destroy_main_flow_table(priv);
1475 err_destroy_vlan_flow_table:
1476 	mlx5e_destroy_vlan_flow_table(priv);
1477 
1478 	return (err);
1479 }
1480 
1481 void
1482 mlx5e_close_flow_table(struct mlx5e_priv *priv)
1483 {
1484 	mlx5e_destroy_inner_rss_flow_table(priv);
1485 	mlx5e_destroy_main_flow_table(priv);
1486 	mlx5e_destroy_vlan_flow_table(priv);
1487 }
1488