1 /*-
2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <dev/mlx5/mlx5_en/en.h>
30
31 #include <linux/list.h>
32 #include <dev/mlx5/fs.h>
33 #include <dev/mlx5/mpfs.h>
34 #include <dev/mlx5/mlx5_core/fs_tcp.h>
35
36 /*
37 * The flow tables with rules define the packet processing on receive.
38 * Currently the following structure is set up to handle different
39 * offloads like TLS RX offload, VLAN decapsulation, packet
40 * classification, RSS hashing, VxLAN checksum offloading:
41 *
42 * +=========+ +=========+ +=================+
43 * |TCP/IPv4 | |TCP/IPv4 | |TCP/IPv4 Match |
44 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
45 * | | |Catch-all|\ | |
46 * +=========+ +=========+| +=================+
47 * |
48 * +------------------------+
49 * V
50 * +=========+ +=========+ +=================+
51 * |TCP/IPv6 | |TCP/IPv6 | |TCP/IPv6 Match |
52 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
53 * | | |Catch-all|\ | |
54 * +=========+ +=========+| +=================+
55 * |
56 * +------------------------+
57 * V
58 * +=========+ +=========+ +=================+
59 * |VLAN ft: | |VxLAN | |VxLAN Main |
60 * |CTAG/STAG|------>| VNI|----->|Inner Proto Match|=====> Inner TIR n
61 * |VID/noVID|/ |Catch-all|\ | |
62 * +=========+ +=========+| +=================+
63 * |
64 * |
65 * |
66 * v
67 * +=================+
68 * |Main |
69 * |Outer Proto Match|=====> TIR n
70 * | |
71 * +=================+
72 *
73 * The path through flow rules directs each packet into an appropriate TIR,
74 * according to the:
75 * - VLAN encapsulation
76 * - Outer protocol
77 * - Presence of inner protocol
78 */
79
80 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
81
82 enum {
83 MLX5E_FULLMATCH = 0,
84 MLX5E_ALLMULTI = 1,
85 MLX5E_PROMISC = 2,
86 };
87
88 enum {
89 MLX5E_UC = 0,
90 MLX5E_MC_IPV4 = 1,
91 MLX5E_MC_IPV6 = 2,
92 MLX5E_MC_OTHER = 3,
93 };
94
95 enum {
96 MLX5E_ACTION_NONE = 0,
97 MLX5E_ACTION_ADD = 1,
98 MLX5E_ACTION_DEL = 2,
99 };
100
101 struct mlx5e_eth_addr_hash_node {
102 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
103 u8 action;
104 u32 mpfs_index;
105 struct mlx5e_eth_addr_info ai;
106 };
107
108 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
109
110 static inline int
mlx5e_hash_eth_addr(const u8 * addr)111 mlx5e_hash_eth_addr(const u8 * addr)
112 {
113 return (addr[5]);
114 }
115
116 static bool
mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head * hash,struct mlx5e_eth_addr_hash_node * hn_new)117 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
118 struct mlx5e_eth_addr_hash_node *hn_new)
119 {
120 struct mlx5e_eth_addr_hash_node *hn;
121 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
122
123 LIST_FOREACH(hn, &hash[ix], hlist) {
124 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
125 if (hn->action == MLX5E_ACTION_DEL)
126 hn->action = MLX5E_ACTION_NONE;
127 free(hn_new, M_MLX5EN);
128 return (false);
129 }
130 }
131 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
132 return (true);
133 }
134
135 static void
mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node * hn)136 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
137 {
138 LIST_REMOVE(hn, hlist);
139 free(hn, M_MLX5EN);
140 }
141
142 static void
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai)143 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
144 struct mlx5e_eth_addr_info *ai)
145 {
146 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
147 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
148 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
149 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
150 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
151 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
152 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
153 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
154 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
155 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
156 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
157 }
158
159 static int
mlx5e_get_eth_addr_type(const u8 * addr)160 mlx5e_get_eth_addr_type(const u8 * addr)
161 {
162 if (ETHER_IS_MULTICAST(addr) == 0)
163 return (MLX5E_UC);
164
165 if ((addr[0] == 0x01) &&
166 (addr[1] == 0x00) &&
167 (addr[2] == 0x5e) &&
168 !(addr[3] & 0x80))
169 return (MLX5E_MC_IPV4);
170
171 if ((addr[0] == 0x33) &&
172 (addr[1] == 0x33))
173 return (MLX5E_MC_IPV6);
174
175 return (MLX5E_MC_OTHER);
176 }
177
178 static u32
mlx5e_get_tt_vec(struct mlx5e_eth_addr_info * ai,int type)179 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
180 {
181 int eth_addr_type;
182 u32 ret;
183
184 switch (type) {
185 case MLX5E_FULLMATCH:
186 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
187 switch (eth_addr_type) {
188 case MLX5E_UC:
189 ret =
190 (1 << MLX5E_TT_IPV4_TCP) |
191 (1 << MLX5E_TT_IPV6_TCP) |
192 (1 << MLX5E_TT_IPV4_UDP) |
193 (1 << MLX5E_TT_IPV6_UDP) |
194 (1 << MLX5E_TT_IPV4) |
195 (1 << MLX5E_TT_IPV6) |
196 (1 << MLX5E_TT_ANY) |
197 0;
198 break;
199
200 case MLX5E_MC_IPV4:
201 ret =
202 (1 << MLX5E_TT_IPV4_UDP) |
203 (1 << MLX5E_TT_IPV4) |
204 0;
205 break;
206
207 case MLX5E_MC_IPV6:
208 ret =
209 (1 << MLX5E_TT_IPV6_UDP) |
210 (1 << MLX5E_TT_IPV6) |
211 0;
212 break;
213
214 default:
215 ret =
216 (1 << MLX5E_TT_ANY) |
217 0;
218 break;
219 }
220 break;
221
222 case MLX5E_ALLMULTI:
223 ret =
224 (1 << MLX5E_TT_IPV4_UDP) |
225 (1 << MLX5E_TT_IPV6_UDP) |
226 (1 << MLX5E_TT_IPV4) |
227 (1 << MLX5E_TT_IPV6) |
228 (1 << MLX5E_TT_ANY) |
229 0;
230 break;
231
232 default: /* MLX5E_PROMISC */
233 ret =
234 (1 << MLX5E_TT_IPV4_TCP) |
235 (1 << MLX5E_TT_IPV6_TCP) |
236 (1 << MLX5E_TT_IPV4_UDP) |
237 (1 << MLX5E_TT_IPV6_UDP) |
238 (1 << MLX5E_TT_IPV4) |
239 (1 << MLX5E_TT_IPV6) |
240 (1 << MLX5E_TT_ANY) |
241 0;
242 break;
243 }
244
245 return (ret);
246 }
247
248 static int
mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type,u32 * mc,u32 * mv)249 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
250 struct mlx5e_eth_addr_info *ai, int type,
251 u32 *mc, u32 *mv)
252 {
253 struct mlx5_flow_destination dest = {};
254 u8 mc_enable = 0;
255 struct mlx5_flow_rule **rule_p;
256 struct mlx5_flow_table *ft = priv->fts.main.t;
257 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
258 outer_headers.dmac_47_16);
259 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
260 outer_headers.dmac_47_16);
261 u32 *tirn = priv->tirn;
262 u32 tt_vec;
263 int err = 0;
264 struct mlx5_flow_act flow_act = {
265 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
266 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
267 };
268
269 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
270
271 switch (type) {
272 case MLX5E_FULLMATCH:
273 mc_enable = MLX5_MATCH_OUTER_HEADERS;
274 memset(mc_dmac, 0xff, ETH_ALEN);
275 ether_addr_copy(mv_dmac, ai->addr);
276 break;
277
278 case MLX5E_ALLMULTI:
279 mc_enable = MLX5_MATCH_OUTER_HEADERS;
280 mc_dmac[0] = 0x01;
281 mv_dmac[0] = 0x01;
282 break;
283
284 case MLX5E_PROMISC:
285 break;
286 default:
287 break;
288 }
289
290 tt_vec = mlx5e_get_tt_vec(ai, type);
291
292 if (tt_vec & BIT(MLX5E_TT_ANY)) {
293 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
294 dest.tir_num = tirn[MLX5E_TT_ANY];
295 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
296 MLX5_FLOW_RULE_FWD_ACTION_DEST,
297 &flow_act, &dest);
298 if (IS_ERR_OR_NULL(*rule_p))
299 goto err_del_ai;
300 }
301
302 mc_enable = MLX5_MATCH_OUTER_HEADERS;
303 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
304
305 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
306 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
307 dest.tir_num = tirn[MLX5E_TT_IPV4];
308 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
309 ETHERTYPE_IP);
310 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
311 MLX5_FLOW_RULE_FWD_ACTION_DEST,
312 &flow_act, &dest);
313 if (IS_ERR_OR_NULL(*rule_p))
314 goto err_del_ai;
315 }
316
317 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
318 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
319 dest.tir_num = tirn[MLX5E_TT_IPV6];
320 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
321 ETHERTYPE_IPV6);
322 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
323 MLX5_FLOW_RULE_FWD_ACTION_DEST,
324 &flow_act, &dest);
325 if (IS_ERR_OR_NULL(*rule_p))
326 goto err_del_ai;
327 }
328
329 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
330 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
331
332 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
333 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
334 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
335 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
336 ETHERTYPE_IP);
337 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
338 MLX5_FLOW_RULE_FWD_ACTION_DEST,
339 &flow_act, &dest);
340 if (IS_ERR_OR_NULL(*rule_p))
341 goto err_del_ai;
342 }
343
344 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
345 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
346 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
347 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
348 ETHERTYPE_IPV6);
349 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
350 MLX5_FLOW_RULE_FWD_ACTION_DEST,
351 &flow_act, &dest);
352 if (IS_ERR_OR_NULL(*rule_p))
353 goto err_del_ai;
354 }
355
356 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
357
358 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
359 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
360 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
361 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
362 ETHERTYPE_IP);
363 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
364 MLX5_FLOW_RULE_FWD_ACTION_DEST,
365 &flow_act, &dest);
366 if (IS_ERR_OR_NULL(*rule_p))
367 goto err_del_ai;
368 }
369
370 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
371 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
372 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
373 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
374 ETHERTYPE_IPV6);
375 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
376 MLX5_FLOW_RULE_FWD_ACTION_DEST,
377 &flow_act, &dest);
378 if (IS_ERR_OR_NULL(*rule_p))
379 goto err_del_ai;
380 }
381
382 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
383
384 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
385 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
386 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
387 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
388 ETHERTYPE_IP);
389 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
390 MLX5_FLOW_RULE_FWD_ACTION_DEST,
391 &flow_act, &dest);
392 if (IS_ERR_OR_NULL(*rule_p))
393 goto err_del_ai;
394 }
395
396 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
397 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
398 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
399 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
400 ETHERTYPE_IPV6);
401 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
402 MLX5_FLOW_RULE_FWD_ACTION_DEST,
403 &flow_act, &dest);
404 if (IS_ERR_OR_NULL(*rule_p))
405 goto err_del_ai;
406 }
407
408 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
409
410 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
411 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
412 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
413 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
414 ETHERTYPE_IP);
415 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
416 MLX5_FLOW_RULE_FWD_ACTION_DEST,
417 &flow_act, &dest);
418 if (IS_ERR_OR_NULL(*rule_p))
419 goto err_del_ai;
420 }
421
422 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
423 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
424 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
425 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
426 ETHERTYPE_IPV6);
427 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
428 MLX5_FLOW_RULE_FWD_ACTION_DEST,
429 &flow_act, &dest);
430 if (IS_ERR_OR_NULL(*rule_p))
431 goto err_del_ai;
432 }
433
434 return 0;
435
436 err_del_ai:
437 err = PTR_ERR(*rule_p);
438 *rule_p = NULL;
439 mlx5e_del_eth_addr_from_flow_table(priv, ai);
440
441 return err;
442 }
443
444 static int
mlx5e_add_eth_addr_rule(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type)445 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
446 struct mlx5e_eth_addr_info *ai, int type)
447 {
448 u32 *match_criteria;
449 u32 *match_value;
450 int err = 0;
451
452 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
453 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
454 if (!match_value || !match_criteria) {
455 mlx5_en_err(priv->ifp, "alloc failed\n");
456 err = -ENOMEM;
457 goto add_eth_addr_rule_out;
458 }
459 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
460 match_value);
461
462 add_eth_addr_rule_out:
463 kvfree(match_criteria);
464 kvfree(match_value);
465
466 return (err);
467 }
468
469 static void
mlx5e_del_main_vxlan_rules(struct mlx5e_priv * priv)470 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
471 {
472 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
473 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
474 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
475 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
476 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
477 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
478 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
479 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
480 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
481 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
482 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
483 }
484
485 static int
mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv * priv,u32 * mc,u32 * mv)486 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
487 {
488 struct mlx5_flow_destination dest = {};
489 u8 mc_enable = 0;
490 struct mlx5_flow_rule **rule_p;
491 struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
492 u32 *tirn = priv->tirn_inner_vxlan;
493 struct mlx5_flow_act flow_act = {
494 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
495 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
496 };
497 int err = 0;
498
499 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
500
501 mc_enable = MLX5_MATCH_INNER_HEADERS;
502 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
503
504 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
505 dest.tir_num = tirn[MLX5E_TT_IPV4];
506 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
507 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
508 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
509 if (IS_ERR_OR_NULL(*rule_p))
510 goto err_del_ai;
511
512 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
513 dest.tir_num = tirn[MLX5E_TT_IPV6];
514 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
515 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
516 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
517 if (IS_ERR_OR_NULL(*rule_p))
518 goto err_del_ai;
519
520 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
521 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
522
523 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
524 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
525 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
526 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
527 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
528 if (IS_ERR_OR_NULL(*rule_p))
529 goto err_del_ai;
530
531 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
532 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
533 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
534 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
535 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
536 if (IS_ERR_OR_NULL(*rule_p))
537 goto err_del_ai;
538
539 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
540
541 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
542 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
543 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
546 if (IS_ERR_OR_NULL(*rule_p))
547 goto err_del_ai;
548
549 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
550 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
551 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
552 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
553 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
554 if (IS_ERR_OR_NULL(*rule_p))
555 goto err_del_ai;
556
557 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
558
559 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
560 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
561 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
562 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
563 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
564 if (IS_ERR_OR_NULL(*rule_p))
565 goto err_del_ai;
566
567 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
568 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
569 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
570 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
571 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
572 if (IS_ERR_OR_NULL(*rule_p))
573 goto err_del_ai;
574
575 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
576
577 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
578 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
579 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
580 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
581 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
582 if (IS_ERR_OR_NULL(*rule_p))
583 goto err_del_ai;
584
585 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
586 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
587 MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
588 ETHERTYPE_IPV6);
589 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
590 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
591 if (IS_ERR_OR_NULL(*rule_p))
592 goto err_del_ai;
593
594 mc_enable = 0;
595 memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
596 memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
597 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
598 dest.tir_num = tirn[MLX5E_TT_ANY];
599 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
600 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
601 if (IS_ERR_OR_NULL(*rule_p))
602 goto err_del_ai;
603
604 return (0);
605
606 err_del_ai:
607 err = PTR_ERR(*rule_p);
608 *rule_p = NULL;
609 mlx5e_del_main_vxlan_rules(priv);
610
611 return (err);
612 }
613
614 static int
mlx5e_add_main_vxlan_rules(struct mlx5e_priv * priv)615 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
616 {
617 u32 *match_criteria;
618 u32 *match_value;
619 int err = 0;
620
621 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
622 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
623 if (match_value == NULL || match_criteria == NULL) {
624 mlx5_en_err(priv->ifp, "alloc failed\n");
625 err = -ENOMEM;
626 goto add_main_vxlan_rules_out;
627 }
628 err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
629
630 add_main_vxlan_rules_out:
631 kvfree(match_criteria);
632 kvfree(match_value);
633
634 return (err);
635 }
636
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)637 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
638 {
639 if_t ifp = priv->ifp;
640 int max_list_size;
641 int list_size;
642 u16 *vlans;
643 int vlan;
644 int err;
645 int i;
646
647 list_size = 0;
648 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
649 list_size++;
650
651 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
652
653 if (list_size > max_list_size) {
654 mlx5_en_err(ifp,
655 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
656 list_size, max_list_size);
657 list_size = max_list_size;
658 }
659
660 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
661 if (!vlans)
662 return -ENOMEM;
663
664 i = 0;
665 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
666 if (i >= list_size)
667 break;
668 vlans[i++] = vlan;
669 }
670
671 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
672 if (err)
673 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
674 err);
675
676 kfree(vlans);
677 return err;
678 }
679
680 enum mlx5e_vlan_rule_type {
681 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
682 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
683 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
684 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
685 };
686
687 static int
mlx5e_add_vlan_rule_sub(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,u32 * mc,u32 * mv)688 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
689 enum mlx5e_vlan_rule_type rule_type, u16 vid,
690 u32 *mc, u32 *mv)
691 {
692 struct mlx5_flow_table *ft = priv->fts.vlan.t;
693 struct mlx5_flow_destination dest = {};
694 u8 mc_enable = 0;
695 struct mlx5_flow_rule **rule_p;
696 int err = 0;
697 struct mlx5_flow_act flow_act = {
698 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
699 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
700 };
701
702 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
703 dest.ft = priv->fts.vxlan.t;
704
705 mc_enable = MLX5_MATCH_OUTER_HEADERS;
706
707 switch (rule_type) {
708 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
709 rule_p = &priv->vlan.untagged_ft_rule;
710 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
711 break;
712 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
713 rule_p = &priv->vlan.any_cvlan_ft_rule;
714 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
715 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
716 break;
717 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
718 rule_p = &priv->vlan.any_svlan_ft_rule;
719 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
720 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
721 break;
722 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
723 rule_p = &priv->vlan.active_vlans_ft_rule[vid];
724 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
725 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
726 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
727 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
728 mlx5e_vport_context_update_vlans(priv);
729 break;
730 }
731
732 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
733 MLX5_FLOW_RULE_FWD_ACTION_DEST,
734 &flow_act,
735 &dest);
736
737 if (IS_ERR(*rule_p)) {
738 err = PTR_ERR(*rule_p);
739 *rule_p = NULL;
740 mlx5_en_err(priv->ifp, "add rule failed\n");
741 }
742
743 return (err);
744 }
745
746 static int
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)747 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
748 enum mlx5e_vlan_rule_type rule_type, u16 vid)
749 {
750 u32 *match_criteria;
751 u32 *match_value;
752 int err = 0;
753
754 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
755 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
756 if (!match_value || !match_criteria) {
757 mlx5_en_err(priv->ifp, "alloc failed\n");
758 err = -ENOMEM;
759 goto add_vlan_rule_out;
760 }
761
762 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
763 match_value);
764
765 add_vlan_rule_out:
766 kvfree(match_criteria);
767 kvfree(match_value);
768
769 return (err);
770 }
771
772 static void
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)773 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
774 enum mlx5e_vlan_rule_type rule_type, u16 vid)
775 {
776 switch (rule_type) {
777 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
778 mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
779 break;
780 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
781 mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
782 break;
783 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
784 mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
785 break;
786 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
787 mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
788 mlx5e_vport_context_update_vlans(priv);
789 break;
790 default:
791 break;
792 }
793 }
794
795 static void
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)796 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
797 {
798 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
799 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
800 }
801
802 static int
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)803 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
804 {
805 int err;
806
807 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
808 if (err)
809 return (err);
810
811 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
812 if (err)
813 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
814
815 return (err);
816 }
817
818 void
mlx5e_enable_vlan_filter(struct mlx5e_priv * priv)819 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
820 {
821 if (priv->vlan.filter_disabled) {
822 priv->vlan.filter_disabled = false;
823 if (if_getflags(priv->ifp) & IFF_PROMISC)
824 return;
825 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
826 mlx5e_del_any_vid_rules(priv);
827 }
828 }
829
830 void
mlx5e_disable_vlan_filter(struct mlx5e_priv * priv)831 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
832 {
833 if (!priv->vlan.filter_disabled) {
834 priv->vlan.filter_disabled = true;
835 if (if_getflags(priv->ifp) & IFF_PROMISC)
836 return;
837 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
838 mlx5e_add_any_vid_rules(priv);
839 }
840 }
841
842 void
mlx5e_vlan_rx_add_vid(void * arg,if_t ifp,u16 vid)843 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
844 {
845 struct mlx5e_priv *priv = arg;
846
847 if (ifp != priv->ifp)
848 return;
849
850 PRIV_LOCK(priv);
851 if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
852 test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
853 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
854 PRIV_UNLOCK(priv);
855 }
856
857 void
mlx5e_vlan_rx_kill_vid(void * arg,if_t ifp,u16 vid)858 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
859 {
860 struct mlx5e_priv *priv = arg;
861
862 if (ifp != priv->ifp)
863 return;
864
865 PRIV_LOCK(priv);
866 clear_bit(vid, priv->vlan.active_vlans);
867 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
868 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
869 PRIV_UNLOCK(priv);
870 }
871
872 static int
mlx5e_add_all_vlan_rules(struct mlx5e_priv * priv)873 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
874 {
875 int err;
876 int i;
877
878 set_bit(0, priv->vlan.active_vlans);
879 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
880 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
881 i);
882 if (err)
883 goto error;
884 }
885
886 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
887 if (err)
888 goto error;
889
890 if (priv->vlan.filter_disabled) {
891 err = mlx5e_add_any_vid_rules(priv);
892 if (err)
893 goto error;
894 }
895 return (0);
896 error:
897 mlx5e_del_all_vlan_rules(priv);
898 return (err);
899 }
900
901 static void
mlx5e_del_all_vlan_rules(struct mlx5e_priv * priv)902 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
903 {
904 int i;
905
906 if (priv->vlan.filter_disabled)
907 mlx5e_del_any_vid_rules(priv);
908
909 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
910
911 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
912 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
913 clear_bit(0, priv->vlan.active_vlans);
914 }
915
916 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
917 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
918 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
919
920 static void
mlx5e_execute_action(struct mlx5e_priv * priv,struct mlx5e_eth_addr_hash_node * hn)921 mlx5e_execute_action(struct mlx5e_priv *priv,
922 struct mlx5e_eth_addr_hash_node *hn)
923 {
924 switch (hn->action) {
925 case MLX5E_ACTION_ADD:
926 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
927 hn->action = MLX5E_ACTION_NONE;
928 break;
929
930 case MLX5E_ACTION_DEL:
931 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
932 if (hn->mpfs_index != -1U)
933 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
934 mlx5e_del_eth_addr_from_hash(hn);
935 break;
936
937 default:
938 break;
939 }
940 }
941
942 static struct mlx5e_eth_addr_hash_node *
mlx5e_move_hn(struct mlx5e_eth_addr_hash_head * fh,struct mlx5e_eth_addr_hash_head * uh)943 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
944 {
945 struct mlx5e_eth_addr_hash_node *hn;
946
947 hn = LIST_FIRST(fh);
948 if (hn != NULL) {
949 LIST_REMOVE(hn, hlist);
950 LIST_INSERT_HEAD(uh, hn, hlist);
951 }
952 return (hn);
953 }
954
955 static struct mlx5e_eth_addr_hash_node *
mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head * fh)956 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
957 {
958 struct mlx5e_eth_addr_hash_node *hn;
959
960 hn = LIST_FIRST(fh);
961 if (hn != NULL)
962 LIST_REMOVE(hn, hlist);
963 return (hn);
964 }
965
966 struct mlx5e_copy_addr_ctx {
967 struct mlx5e_eth_addr_hash_head *free;
968 struct mlx5e_eth_addr_hash_head *fill;
969 bool success;
970 };
971
972 static u_int
mlx5e_copy_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)973 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
974 {
975 struct mlx5e_copy_addr_ctx *ctx = arg;
976 struct mlx5e_eth_addr_hash_node *hn;
977
978 hn = mlx5e_move_hn(ctx->free, ctx->fill);
979 if (hn == NULL) {
980 ctx->success = false;
981 return (0);
982 }
983 ether_addr_copy(hn->ai.addr, LLADDR(sdl));
984
985 return (1);
986 }
987
988 static void
mlx5e_sync_ifp_addr(struct mlx5e_priv * priv)989 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
990 {
991 struct mlx5e_copy_addr_ctx ctx;
992 struct mlx5e_eth_addr_hash_head head_free;
993 struct mlx5e_eth_addr_hash_head head_uc;
994 struct mlx5e_eth_addr_hash_head head_mc;
995 struct mlx5e_eth_addr_hash_node *hn;
996 if_t ifp = priv->ifp;
997 size_t x;
998 size_t num;
999
1000 PRIV_ASSERT_LOCKED(priv);
1001
1002 retry:
1003 LIST_INIT(&head_free);
1004 LIST_INIT(&head_uc);
1005 LIST_INIT(&head_mc);
1006 num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
1007
1008 /* allocate place holders */
1009 for (x = 0; x != num; x++) {
1010 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
1011 hn->action = MLX5E_ACTION_ADD;
1012 hn->mpfs_index = -1U;
1013 LIST_INSERT_HEAD(&head_free, hn, hlist);
1014 }
1015
1016 hn = mlx5e_move_hn(&head_free, &head_uc);
1017 MPASS(hn != NULL);
1018
1019 ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
1020
1021 ctx.free = &head_free;
1022 ctx.fill = &head_uc;
1023 ctx.success = true;
1024 if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1025 if (ctx.success == false)
1026 goto cleanup;
1027
1028 ctx.fill = &head_mc;
1029 if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1030 if (ctx.success == false)
1031 goto cleanup;
1032
1033 /* insert L2 unicast addresses into hash list */
1034
1035 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1036 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1037 continue;
1038 if (hn->mpfs_index == -1U)
1039 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1040 hn->ai.addr, 0, 0);
1041 }
1042
1043 /* insert L2 multicast addresses into hash list */
1044
1045 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1046 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1047 continue;
1048 }
1049
1050 cleanup:
1051 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1052 free(hn, M_MLX5EN);
1053 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1054 free(hn, M_MLX5EN);
1055 while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1056 free(hn, M_MLX5EN);
1057
1058 if (ctx.success == false)
1059 goto retry;
1060 }
1061
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)1062 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1063 u8 addr_array[][ETH_ALEN], int size)
1064 {
1065 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1066 if_t ifp = priv->ifp;
1067 struct mlx5e_eth_addr_hash_node *hn;
1068 struct mlx5e_eth_addr_hash_head *addr_list;
1069 struct mlx5e_eth_addr_hash_node *tmp;
1070 int i = 0;
1071 int hi;
1072
1073 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1074
1075 if (is_uc) /* Make sure our own address is pushed first */
1076 ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1077 else if (priv->eth_addr.broadcast_enabled)
1078 ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1079
1080 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1081 if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1082 continue;
1083 if (i >= size)
1084 break;
1085 ether_addr_copy(addr_array[i++], hn->ai.addr);
1086 }
1087 }
1088
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)1089 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1090 int list_type)
1091 {
1092 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1093 struct mlx5e_eth_addr_hash_node *hn;
1094 u8 (*addr_array)[ETH_ALEN] = NULL;
1095 struct mlx5e_eth_addr_hash_head *addr_list;
1096 struct mlx5e_eth_addr_hash_node *tmp;
1097 int max_size;
1098 int size;
1099 int err;
1100 int hi;
1101
1102 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1103 max_size = is_uc ?
1104 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1105 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1106
1107 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1108 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1109 size++;
1110
1111 if (size > max_size) {
1112 mlx5_en_err(priv->ifp,
1113 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1114 is_uc ? "UC" : "MC", size, max_size);
1115 size = max_size;
1116 }
1117
1118 if (size) {
1119 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1120 if (!addr_array) {
1121 err = -ENOMEM;
1122 goto out;
1123 }
1124 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1125 }
1126
1127 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1128 out:
1129 if (err)
1130 mlx5_en_err(priv->ifp,
1131 "Failed to modify vport %s list err(%d)\n",
1132 is_uc ? "UC" : "MC", err);
1133 kfree(addr_array);
1134 }
1135
mlx5e_vport_context_update(struct mlx5e_priv * priv)1136 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1137 {
1138 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1139
1140 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1141 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1142 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1143 ea->allmulti_enabled,
1144 ea->promisc_enabled);
1145 }
1146
1147 static void
mlx5e_apply_ifp_addr(struct mlx5e_priv * priv)1148 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1149 {
1150 struct mlx5e_eth_addr_hash_node *hn;
1151 struct mlx5e_eth_addr_hash_node *tmp;
1152 int i;
1153
1154 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1155 mlx5e_execute_action(priv, hn);
1156
1157 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1158 mlx5e_execute_action(priv, hn);
1159 }
1160
1161 static void
mlx5e_handle_ifp_addr(struct mlx5e_priv * priv,bool rx_mode_enable)1162 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1163 {
1164 struct mlx5e_eth_addr_hash_node *hn;
1165 struct mlx5e_eth_addr_hash_node *tmp;
1166 int i;
1167
1168 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1169 hn->action = MLX5E_ACTION_DEL;
1170 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1171 hn->action = MLX5E_ACTION_DEL;
1172
1173 if (rx_mode_enable)
1174 mlx5e_sync_ifp_addr(priv);
1175
1176 mlx5e_apply_ifp_addr(priv);
1177 }
1178
1179 static void
mlx5e_set_rx_mode_core(struct mlx5e_priv * priv,bool rx_mode_enable)1180 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1181 {
1182 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1183 if_t ndev = priv->ifp;
1184 int ndev_flags = if_getflags(ndev);
1185
1186 bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1187 bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1188 bool broadcast_enabled = rx_mode_enable;
1189
1190 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1191 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1192 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1193 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1194 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1195 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1196
1197 /* update broadcast address */
1198 ether_addr_copy(priv->eth_addr.broadcast.addr,
1199 if_getbroadcastaddr(priv->ifp));
1200
1201 if (enable_promisc) {
1202 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1203 if (!priv->vlan.filter_disabled)
1204 mlx5e_add_any_vid_rules(priv);
1205 }
1206 if (enable_allmulti)
1207 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1208 if (enable_broadcast)
1209 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1210
1211 mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1212
1213 if (disable_broadcast)
1214 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1215 if (disable_allmulti)
1216 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1217 if (disable_promisc) {
1218 if (!priv->vlan.filter_disabled)
1219 mlx5e_del_any_vid_rules(priv);
1220 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1221 }
1222
1223 ea->promisc_enabled = promisc_enabled;
1224 ea->allmulti_enabled = allmulti_enabled;
1225 ea->broadcast_enabled = broadcast_enabled;
1226
1227 mlx5e_vport_context_update(priv);
1228 }
1229
1230 void
mlx5e_set_rx_mode_work(struct work_struct * work)1231 mlx5e_set_rx_mode_work(struct work_struct *work)
1232 {
1233 struct mlx5e_priv *priv =
1234 container_of(work, struct mlx5e_priv, set_rx_mode_work);
1235
1236 PRIV_LOCK(priv);
1237 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1238 mlx5e_set_rx_mode_core(priv, true);
1239 PRIV_UNLOCK(priv);
1240 }
1241
1242 static void
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)1243 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1244 {
1245 int i;
1246
1247 for (i = ft->num_groups - 1; i >= 0; i--) {
1248 if (!IS_ERR_OR_NULL(ft->g[i]))
1249 mlx5_destroy_flow_group(ft->g[i]);
1250 ft->g[i] = NULL;
1251 }
1252 ft->num_groups = 0;
1253 }
1254
1255 static void
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)1256 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1257 {
1258 mlx5e_destroy_groups(ft);
1259 kfree(ft->g);
1260 mlx5_destroy_flow_table(ft->t);
1261 ft->t = NULL;
1262 }
1263
1264 #define MLX5E_NUM_MAIN_GROUPS 10
1265 #define MLX5E_MAIN_GROUP0_SIZE BIT(4)
1266 #define MLX5E_MAIN_GROUP1_SIZE BIT(3)
1267 #define MLX5E_MAIN_GROUP2_SIZE BIT(1)
1268 #define MLX5E_MAIN_GROUP3_SIZE BIT(0)
1269 #define MLX5E_MAIN_GROUP4_SIZE BIT(14)
1270 #define MLX5E_MAIN_GROUP5_SIZE BIT(13)
1271 #define MLX5E_MAIN_GROUP6_SIZE BIT(11)
1272 #define MLX5E_MAIN_GROUP7_SIZE BIT(2)
1273 #define MLX5E_MAIN_GROUP8_SIZE BIT(1)
1274 #define MLX5E_MAIN_GROUP9_SIZE BIT(0)
1275 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
1276 MLX5E_MAIN_GROUP1_SIZE +\
1277 MLX5E_MAIN_GROUP2_SIZE +\
1278 MLX5E_MAIN_GROUP3_SIZE +\
1279 MLX5E_MAIN_GROUP4_SIZE +\
1280 MLX5E_MAIN_GROUP5_SIZE +\
1281 MLX5E_MAIN_GROUP6_SIZE +\
1282 MLX5E_MAIN_GROUP7_SIZE +\
1283 MLX5E_MAIN_GROUP8_SIZE +\
1284 MLX5E_MAIN_GROUP9_SIZE +\
1285 0)
1286
1287 static int
mlx5e_create_main_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1288 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1289 int inlen)
1290 {
1291 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1292 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1293 match_criteria.outer_headers.dmac_47_16);
1294 int err;
1295 int ix = 0;
1296
1297 /* Tunnel rules need to be first in this list of groups */
1298
1299 /* Start tunnel rules */
1300 memset(in, 0, inlen);
1301 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1302 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1303 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1305 MLX5_SET_CFG(in, start_flow_index, ix);
1306 ix += MLX5E_MAIN_GROUP0_SIZE;
1307 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1308 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1309 if (IS_ERR(ft->g[ft->num_groups]))
1310 goto err_destory_groups;
1311 ft->num_groups++;
1312 /* End Tunnel Rules */
1313
1314 memset(in, 0, inlen);
1315 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1316 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1317 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1318 MLX5_SET_CFG(in, start_flow_index, ix);
1319 ix += MLX5E_MAIN_GROUP1_SIZE;
1320 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1321 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1322 if (IS_ERR(ft->g[ft->num_groups]))
1323 goto err_destory_groups;
1324 ft->num_groups++;
1325
1326 memset(in, 0, inlen);
1327 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1328 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1329 MLX5_SET_CFG(in, start_flow_index, ix);
1330 ix += MLX5E_MAIN_GROUP2_SIZE;
1331 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1332 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1333 if (IS_ERR(ft->g[ft->num_groups]))
1334 goto err_destory_groups;
1335 ft->num_groups++;
1336
1337 memset(in, 0, inlen);
1338 MLX5_SET_CFG(in, start_flow_index, ix);
1339 ix += MLX5E_MAIN_GROUP3_SIZE;
1340 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1341 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1342 if (IS_ERR(ft->g[ft->num_groups]))
1343 goto err_destory_groups;
1344 ft->num_groups++;
1345
1346 memset(in, 0, inlen);
1347 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1348 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1349 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1350 memset(dmac, 0xff, ETH_ALEN);
1351 MLX5_SET_CFG(in, start_flow_index, ix);
1352 ix += MLX5E_MAIN_GROUP4_SIZE;
1353 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1354 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1355 if (IS_ERR(ft->g[ft->num_groups]))
1356 goto err_destory_groups;
1357 ft->num_groups++;
1358
1359 memset(in, 0, inlen);
1360 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1361 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1362 memset(dmac, 0xff, ETH_ALEN);
1363 MLX5_SET_CFG(in, start_flow_index, ix);
1364 ix += MLX5E_MAIN_GROUP5_SIZE;
1365 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367 if (IS_ERR(ft->g[ft->num_groups]))
1368 goto err_destory_groups;
1369 ft->num_groups++;
1370
1371 memset(in, 0, inlen);
1372 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1373 memset(dmac, 0xff, ETH_ALEN);
1374 MLX5_SET_CFG(in, start_flow_index, ix);
1375 ix += MLX5E_MAIN_GROUP6_SIZE;
1376 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1377 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1378 if (IS_ERR(ft->g[ft->num_groups]))
1379 goto err_destory_groups;
1380 ft->num_groups++;
1381
1382 memset(in, 0, inlen);
1383 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1384 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1385 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1386 dmac[0] = 0x01;
1387 MLX5_SET_CFG(in, start_flow_index, ix);
1388 ix += MLX5E_MAIN_GROUP7_SIZE;
1389 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1390 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1391 if (IS_ERR(ft->g[ft->num_groups]))
1392 goto err_destory_groups;
1393 ft->num_groups++;
1394
1395 memset(in, 0, inlen);
1396 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1397 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1398 dmac[0] = 0x01;
1399 MLX5_SET_CFG(in, start_flow_index, ix);
1400 ix += MLX5E_MAIN_GROUP8_SIZE;
1401 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1402 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1403 if (IS_ERR(ft->g[ft->num_groups]))
1404 goto err_destory_groups;
1405 ft->num_groups++;
1406
1407 memset(in, 0, inlen);
1408 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1409 dmac[0] = 0x01;
1410 MLX5_SET_CFG(in, start_flow_index, ix);
1411 ix += MLX5E_MAIN_GROUP9_SIZE;
1412 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1413 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1414 if (IS_ERR(ft->g[ft->num_groups]))
1415 goto err_destory_groups;
1416 ft->num_groups++;
1417
1418 return (0);
1419
1420 err_destory_groups:
1421 err = PTR_ERR(ft->g[ft->num_groups]);
1422 ft->g[ft->num_groups] = NULL;
1423 mlx5e_destroy_groups(ft);
1424
1425 return (err);
1426 }
1427
1428 static int
mlx5e_create_main_groups(struct mlx5e_flow_table * ft)1429 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1430 {
1431 u32 *in;
1432 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1433 int err;
1434
1435 in = mlx5_vzalloc(inlen);
1436 if (!in)
1437 return (-ENOMEM);
1438
1439 err = mlx5e_create_main_groups_sub(ft, in, inlen);
1440
1441 kvfree(in);
1442 return (err);
1443 }
1444
1445 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE BIT(3)
1446 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE BIT(3)
1447 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE BIT(0)
1448 static int
mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1449 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1450 int inlen)
1451 {
1452 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1453 int err;
1454 int ix = 0;
1455
1456 memset(in, 0, inlen);
1457 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1458 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1459 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1460 MLX5_SET_CFG(in, start_flow_index, ix);
1461 ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1462 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1463 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1464 if (IS_ERR(ft->g[ft->num_groups]))
1465 goto err_destory_groups;
1466 ft->num_groups++;
1467
1468 memset(in, 0, inlen);
1469 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1470 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1471 MLX5_SET_CFG(in, start_flow_index, ix);
1472 ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1473 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1474 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1475 if (IS_ERR(ft->g[ft->num_groups]))
1476 goto err_destory_groups;
1477 ft->num_groups++;
1478
1479 memset(in, 0, inlen);
1480 MLX5_SET_CFG(in, start_flow_index, ix);
1481 ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1482 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1483 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1484 if (IS_ERR(ft->g[ft->num_groups]))
1485 goto err_destory_groups;
1486 ft->num_groups++;
1487
1488 return (0);
1489
1490 err_destory_groups:
1491 err = PTR_ERR(ft->g[ft->num_groups]);
1492 ft->g[ft->num_groups] = NULL;
1493 mlx5e_destroy_groups(ft);
1494
1495 return (err);
1496 }
1497
1498 static int
mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table * ft)1499 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1500 {
1501 u32 *in;
1502 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1503 int err;
1504
1505 in = mlx5_vzalloc(inlen);
1506 if (!in)
1507 return (-ENOMEM);
1508
1509 err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1510
1511 kvfree(in);
1512 return (err);
1513 }
1514
1515
1516 static int
mlx5e_create_main_flow_table(struct mlx5e_priv * priv,bool inner_vxlan)1517 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1518 {
1519 struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1520 &priv->fts.main;
1521 int err;
1522
1523 ft->num_groups = 0;
1524 ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1525 inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1526
1527 if (IS_ERR(ft->t)) {
1528 err = PTR_ERR(ft->t);
1529 ft->t = NULL;
1530 return (err);
1531 }
1532 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1533 if (!ft->g) {
1534 err = -ENOMEM;
1535 goto err_destroy_main_flow_table;
1536 }
1537
1538 err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1539 mlx5e_create_main_groups(ft);
1540 if (err)
1541 goto err_free_g;
1542 return (0);
1543
1544 err_free_g:
1545 kfree(ft->g);
1546
1547 err_destroy_main_flow_table:
1548 mlx5_destroy_flow_table(ft->t);
1549 ft->t = NULL;
1550
1551 return (err);
1552 }
1553
mlx5e_destroy_main_flow_table(struct mlx5e_priv * priv)1554 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1555 {
1556 mlx5e_destroy_flow_table(&priv->fts.main);
1557 }
1558
mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv * priv)1559 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1560 {
1561 mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1562 }
1563
1564 #define MLX5E_NUM_VLAN_GROUPS 3
1565 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1566 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1567 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1568 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1569 MLX5E_VLAN_GROUP1_SIZE +\
1570 MLX5E_VLAN_GROUP2_SIZE +\
1571 0)
1572
1573 static int
mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1574 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1575 int inlen)
1576 {
1577 int err;
1578 int ix = 0;
1579 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1580
1581 memset(in, 0, inlen);
1582 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1583 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1584 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1585 MLX5_SET_CFG(in, start_flow_index, ix);
1586 ix += MLX5E_VLAN_GROUP0_SIZE;
1587 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1588 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1589 if (IS_ERR(ft->g[ft->num_groups]))
1590 goto err_destory_groups;
1591 ft->num_groups++;
1592
1593 memset(in, 0, inlen);
1594 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1595 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1596 MLX5_SET_CFG(in, start_flow_index, ix);
1597 ix += MLX5E_VLAN_GROUP1_SIZE;
1598 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1599 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1600 if (IS_ERR(ft->g[ft->num_groups]))
1601 goto err_destory_groups;
1602 ft->num_groups++;
1603
1604 memset(in, 0, inlen);
1605 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1606 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1607 MLX5_SET_CFG(in, start_flow_index, ix);
1608 ix += MLX5E_VLAN_GROUP2_SIZE;
1609 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1610 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1611 if (IS_ERR(ft->g[ft->num_groups]))
1612 goto err_destory_groups;
1613 ft->num_groups++;
1614
1615 return (0);
1616
1617 err_destory_groups:
1618 err = PTR_ERR(ft->g[ft->num_groups]);
1619 ft->g[ft->num_groups] = NULL;
1620 mlx5e_destroy_groups(ft);
1621
1622 return (err);
1623 }
1624
1625 static int
mlx5e_create_vlan_groups(struct mlx5e_flow_table * ft)1626 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1627 {
1628 u32 *in;
1629 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1630 int err;
1631
1632 in = mlx5_vzalloc(inlen);
1633 if (!in)
1634 return (-ENOMEM);
1635
1636 err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1637
1638 kvfree(in);
1639 return (err);
1640 }
1641
1642 static int
mlx5e_create_vlan_flow_table(struct mlx5e_priv * priv)1643 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1644 {
1645 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1646 int err;
1647
1648 ft->num_groups = 0;
1649 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1650 MLX5E_VLAN_TABLE_SIZE);
1651
1652 if (IS_ERR(ft->t)) {
1653 err = PTR_ERR(ft->t);
1654 ft->t = NULL;
1655 return (err);
1656 }
1657 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1658 if (!ft->g) {
1659 err = -ENOMEM;
1660 goto err_destroy_vlan_flow_table;
1661 }
1662
1663 err = mlx5e_create_vlan_groups(ft);
1664 if (err)
1665 goto err_free_g;
1666
1667 return (0);
1668
1669 err_free_g:
1670 kfree(ft->g);
1671
1672 err_destroy_vlan_flow_table:
1673 mlx5_destroy_flow_table(ft->t);
1674 ft->t = NULL;
1675
1676 return (err);
1677 }
1678
1679 static void
mlx5e_destroy_vlan_flow_table(struct mlx5e_priv * priv)1680 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1681 {
1682 mlx5e_destroy_flow_table(&priv->fts.vlan);
1683 }
1684
1685 static int
mlx5e_add_vxlan_rule_sub(struct mlx5e_priv * priv,u32 * mc,u32 * mv,struct mlx5e_vxlan_db_el * el)1686 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1687 struct mlx5e_vxlan_db_el *el)
1688 {
1689 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1690 struct mlx5_flow_destination dest = {};
1691 u8 mc_enable;
1692 struct mlx5_flow_rule **rule_p;
1693 int err = 0;
1694 struct mlx5_flow_act flow_act = {
1695 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
1696 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
1697 };
1698
1699 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1700 dest.ft = priv->fts.main_vxlan.t;
1701
1702 mc_enable = MLX5_MATCH_OUTER_HEADERS;
1703 rule_p = &el->vxlan_ft_rule;
1704 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1705 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1706 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1707 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1708 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1709 MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1710
1711 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1712 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
1713
1714 if (IS_ERR(*rule_p)) {
1715 err = PTR_ERR(*rule_p);
1716 *rule_p = NULL;
1717 mlx5_en_err(priv->ifp, "add rule failed\n");
1718 }
1719
1720 return (err);
1721 }
1722
1723 static struct mlx5e_vxlan_db_el *
mlx5e_vxlan_find_db_el(struct mlx5e_priv * priv,u_int proto,u_int port)1724 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1725 {
1726 struct mlx5e_vxlan_db_el *el;
1727
1728 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1729 if (el->proto == proto && el->port == port)
1730 return (el);
1731 }
1732 return (NULL);
1733 }
1734
1735 static struct mlx5e_vxlan_db_el *
mlx5e_vxlan_alloc_db_el(struct mlx5e_priv * priv,u_int proto,u_int port)1736 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1737 {
1738 struct mlx5e_vxlan_db_el *el;
1739
1740 el = mlx5_vzalloc(sizeof(*el));
1741 el->refcount = 1;
1742 el->proto = proto;
1743 el->port = port;
1744 el->vxlan_ft_rule = NULL;
1745 return (el);
1746 }
1747
1748 static int
mlx5e_vxlan_family_to_proto(sa_family_t family,u_int * proto)1749 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1750 {
1751 switch (family) {
1752 case AF_INET:
1753 *proto = ETHERTYPE_IP;
1754 return (0);
1755 case AF_INET6:
1756 *proto = ETHERTYPE_IPV6;
1757 return (0);
1758 default:
1759 return (-EINVAL);
1760 }
1761 }
1762
1763 static int
mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv * priv,struct mlx5e_vxlan_db_el * el)1764 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1765 struct mlx5e_vxlan_db_el *el)
1766 {
1767 u32 *match_criteria;
1768 u32 *match_value;
1769 int err;
1770
1771 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1772 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1773 if (match_value == NULL || match_criteria == NULL) {
1774 mlx5_en_err(priv->ifp, "alloc failed\n");
1775 err = -ENOMEM;
1776 goto add_vxlan_rule_out;
1777 }
1778
1779 err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1780
1781 add_vxlan_rule_out:
1782 kvfree(match_criteria);
1783 kvfree(match_value);
1784
1785 return (err);
1786 }
1787
1788 static int
mlx5e_add_vxlan_rule(struct mlx5e_priv * priv,sa_family_t family,u_int port)1789 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1790 {
1791 struct mlx5e_vxlan_db_el *el;
1792 u_int proto;
1793 int err;
1794
1795 err = mlx5e_vxlan_family_to_proto(family, &proto);
1796 if (err != 0)
1797 return (err);
1798
1799 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1800 if (el != NULL) {
1801 el->refcount++;
1802 if (el->installed)
1803 return (0);
1804 }
1805 el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1806
1807 if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1808 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1809 if (err == 0)
1810 el->installed = true;
1811 }
1812 if (err == 0)
1813 TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1814 else
1815 kvfree(el);
1816
1817 return (err);
1818 }
1819
1820 static int
mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv * priv,u32 * mc,u32 * mv)1821 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1822 {
1823 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1824 struct mlx5_flow_destination dest = {};
1825 u8 mc_enable = 0;
1826 struct mlx5_flow_rule **rule_p;
1827 int err = 0;
1828 struct mlx5_flow_act flow_act = {
1829 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
1830 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
1831 };
1832
1833 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1834 dest.ft = priv->fts.main.t;
1835
1836 rule_p = &priv->fts.vxlan_catchall_ft_rule;
1837 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1838 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
1839
1840 if (IS_ERR(*rule_p)) {
1841 err = PTR_ERR(*rule_p);
1842 *rule_p = NULL;
1843 mlx5_en_err(priv->ifp, "add rule failed\n");
1844 }
1845
1846 return (err);
1847 }
1848
1849
1850 static int
mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv * priv)1851 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1852 {
1853 u32 *match_criteria;
1854 u32 *match_value;
1855 int err;
1856
1857 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1858 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1859 if (match_value == NULL || match_criteria == NULL) {
1860 mlx5_en_err(priv->ifp, "alloc failed\n");
1861 err = -ENOMEM;
1862 goto add_vxlan_rule_out;
1863 }
1864
1865 err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1866 match_value);
1867
1868 add_vxlan_rule_out:
1869 kvfree(match_criteria);
1870 kvfree(match_value);
1871
1872 return (err);
1873 }
1874
1875 int
mlx5e_add_all_vxlan_rules(struct mlx5e_priv * priv)1876 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1877 {
1878 struct mlx5e_vxlan_db_el *el;
1879 int err;
1880
1881 err = 0;
1882 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1883 if (el->installed)
1884 continue;
1885 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1886 if (err != 0)
1887 break;
1888 el->installed = true;
1889 }
1890
1891 return (err);
1892 }
1893
1894 static int
mlx5e_del_vxlan_rule(struct mlx5e_priv * priv,sa_family_t family,u_int port)1895 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1896 {
1897 struct mlx5e_vxlan_db_el *el;
1898 u_int proto;
1899 int err;
1900
1901 err = mlx5e_vxlan_family_to_proto(family, &proto);
1902 if (err != 0)
1903 return (err);
1904
1905 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1906 if (el == NULL)
1907 return (0);
1908 if (el->refcount > 1) {
1909 el->refcount--;
1910 return (0);
1911 }
1912
1913 if (el->installed)
1914 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1915 TAILQ_REMOVE(&priv->vxlan.head, el, link);
1916 kvfree(el);
1917 return (0);
1918 }
1919
1920 void
mlx5e_del_all_vxlan_rules(struct mlx5e_priv * priv)1921 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1922 {
1923 struct mlx5e_vxlan_db_el *el;
1924
1925 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1926 if (!el->installed)
1927 continue;
1928 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1929 el->installed = false;
1930 }
1931 }
1932
1933 static void
mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv * priv)1934 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1935 {
1936 mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
1937 }
1938
1939 void
mlx5e_vxlan_start(void * arg,if_t ifp __unused,sa_family_t family,u_int port)1940 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1941 u_int port)
1942 {
1943 struct mlx5e_priv *priv = arg;
1944 int err;
1945
1946 PRIV_LOCK(priv);
1947 err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1948 if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1949 mlx5e_add_vxlan_rule(priv, family, port);
1950 PRIV_UNLOCK(priv);
1951 }
1952
1953 void
mlx5e_vxlan_stop(void * arg,if_t ifp __unused,sa_family_t family,u_int port)1954 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1955 u_int port)
1956 {
1957 struct mlx5e_priv *priv = arg;
1958
1959 PRIV_LOCK(priv);
1960 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1961 mlx5e_del_vxlan_rule(priv, family, port);
1962 (void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1963 PRIV_UNLOCK(priv);
1964 }
1965
1966 #define MLX5E_VXLAN_GROUP0_SIZE BIT(3) /* XXXKIB */
1967 #define MLX5E_VXLAN_GROUP1_SIZE BIT(0)
1968 #define MLX5E_NUM_VXLAN_GROUPS BIT(1)
1969 #define MLX5E_VXLAN_TABLE_SIZE \
1970 (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1971
1972 static int
mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1973 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1974 int inlen)
1975 {
1976 int err;
1977 int ix = 0;
1978 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1979
1980 memset(in, 0, inlen);
1981 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1982 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1983 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1984 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1985 MLX5_SET_CFG(in, start_flow_index, ix);
1986 ix += MLX5E_VXLAN_GROUP0_SIZE;
1987 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1988 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1989 if (IS_ERR(ft->g[ft->num_groups]))
1990 goto err_destory_groups;
1991 ft->num_groups++;
1992
1993 memset(in, 0, inlen);
1994 MLX5_SET_CFG(in, start_flow_index, ix);
1995 ix += MLX5E_VXLAN_GROUP1_SIZE;
1996 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1997 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1998 if (IS_ERR(ft->g[ft->num_groups]))
1999 goto err_destory_groups;
2000 ft->num_groups++;
2001
2002 return (0);
2003
2004 err_destory_groups:
2005 err = PTR_ERR(ft->g[ft->num_groups]);
2006 ft->g[ft->num_groups] = NULL;
2007 mlx5e_destroy_groups(ft);
2008
2009 return (err);
2010 }
2011
2012 static int
mlx5e_create_vxlan_groups(struct mlx5e_flow_table * ft)2013 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
2014 {
2015 u32 *in;
2016 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2017 int err;
2018
2019 in = mlx5_vzalloc(inlen);
2020 if (!in)
2021 return (-ENOMEM);
2022
2023 err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2024
2025 kvfree(in);
2026 return (err);
2027 }
2028
2029 static int
mlx5e_create_vxlan_flow_table(struct mlx5e_priv * priv)2030 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2031 {
2032 struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2033 int err;
2034
2035 ft->num_groups = 0;
2036 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2037 MLX5E_VXLAN_TABLE_SIZE);
2038
2039 if (IS_ERR(ft->t)) {
2040 err = PTR_ERR(ft->t);
2041 ft->t = NULL;
2042 return (err);
2043 }
2044 ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2045 if (!ft->g) {
2046 err = -ENOMEM;
2047 goto err_destroy_vxlan_flow_table;
2048 }
2049
2050 err = mlx5e_create_vxlan_groups(ft);
2051 if (err)
2052 goto err_free_g;
2053
2054 TAILQ_INIT(&priv->vxlan.head);
2055 return (0);
2056
2057 err_free_g:
2058 kfree(ft->g);
2059
2060 err_destroy_vxlan_flow_table:
2061 mlx5_destroy_flow_table(ft->t);
2062 ft->t = NULL;
2063
2064 return (err);
2065 }
2066
2067 #define MLX5E_NUM_INNER_RSS_GROUPS 3
2068 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3)
2069 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1)
2070 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0)
2071 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\
2072 MLX5E_INNER_RSS_GROUP1_SIZE +\
2073 MLX5E_INNER_RSS_GROUP2_SIZE +\
2074 0)
2075
2076 static int
mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)2077 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2078 int inlen)
2079 {
2080 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2081 int err;
2082 int ix = 0;
2083
2084 memset(in, 0, inlen);
2085 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2086 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2087 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2088 MLX5_SET_CFG(in, start_flow_index, ix);
2089 ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2090 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2091 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2092 if (IS_ERR(ft->g[ft->num_groups]))
2093 goto err_destory_groups;
2094 ft->num_groups++;
2095
2096 memset(in, 0, inlen);
2097 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2098 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2099 MLX5_SET_CFG(in, start_flow_index, ix);
2100 ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2101 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2102 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2103 if (IS_ERR(ft->g[ft->num_groups]))
2104 goto err_destory_groups;
2105 ft->num_groups++;
2106
2107 memset(in, 0, inlen);
2108 MLX5_SET_CFG(in, start_flow_index, ix);
2109 ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2112 if (IS_ERR(ft->g[ft->num_groups]))
2113 goto err_destory_groups;
2114 ft->num_groups++;
2115
2116 return (0);
2117
2118 err_destory_groups:
2119 err = PTR_ERR(ft->g[ft->num_groups]);
2120 ft->g[ft->num_groups] = NULL;
2121 mlx5e_destroy_groups(ft);
2122
2123 return (err);
2124 }
2125
2126 static int
mlx5e_create_inner_rss_groups(struct mlx5e_flow_table * ft)2127 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2128 {
2129 u32 *in;
2130 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2131 int err;
2132
2133 in = mlx5_vzalloc(inlen);
2134 if (!in)
2135 return (-ENOMEM);
2136
2137 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2138
2139 kvfree(in);
2140 return (err);
2141 }
2142
2143 static int
mlx5e_create_inner_rss_flow_table(struct mlx5e_priv * priv)2144 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2145 {
2146 struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2147 int err;
2148
2149 ft->num_groups = 0;
2150 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2151 MLX5E_INNER_RSS_TABLE_SIZE);
2152
2153 if (IS_ERR(ft->t)) {
2154 err = PTR_ERR(ft->t);
2155 ft->t = NULL;
2156 return (err);
2157 }
2158 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2159 GFP_KERNEL);
2160 if (!ft->g) {
2161 err = -ENOMEM;
2162 goto err_destroy_inner_rss_flow_table;
2163 }
2164
2165 err = mlx5e_create_inner_rss_groups(ft);
2166 if (err)
2167 goto err_free_g;
2168
2169 return (0);
2170
2171 err_free_g:
2172 kfree(ft->g);
2173
2174 err_destroy_inner_rss_flow_table:
2175 mlx5_destroy_flow_table(ft->t);
2176 ft->t = NULL;
2177
2178 return (err);
2179 }
2180
mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv * priv)2181 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2182 {
2183 mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2184 }
2185
2186 static void
mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv * priv)2187 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2188 {
2189 mlx5e_destroy_flow_table(&priv->fts.vxlan);
2190 }
2191
2192 int
mlx5e_open_flow_tables(struct mlx5e_priv * priv)2193 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2194 {
2195 int err;
2196
2197 /* setup namespace pointer */
2198 priv->fts.ns = mlx5_get_flow_namespace(
2199 priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2200
2201 err = mlx5e_create_vlan_flow_table(priv);
2202 if (err)
2203 return (err);
2204
2205 err = mlx5e_create_vxlan_flow_table(priv);
2206 if (err)
2207 goto err_destroy_vlan_flow_table;
2208
2209 err = mlx5e_create_main_flow_table(priv, true);
2210 if (err)
2211 goto err_destroy_vxlan_flow_table;
2212
2213 err = mlx5e_create_inner_rss_flow_table(priv);
2214 if (err)
2215 goto err_destroy_main_flow_table_true;
2216
2217 err = mlx5e_create_main_flow_table(priv, false);
2218 if (err)
2219 goto err_destroy_inner_rss_flow_table;
2220
2221 err = mlx5e_add_vxlan_catchall_rule(priv);
2222 if (err)
2223 goto err_destroy_main_flow_table_false;
2224
2225 err = mlx5e_accel_fs_tcp_create(priv);
2226 if (err)
2227 goto err_del_vxlan_catchall_rule;
2228
2229 return (0);
2230
2231 err_del_vxlan_catchall_rule:
2232 mlx5e_del_vxlan_catchall_rule(priv);
2233 err_destroy_main_flow_table_false:
2234 mlx5e_destroy_main_flow_table(priv);
2235 err_destroy_inner_rss_flow_table:
2236 mlx5e_destroy_inner_rss_flow_table(priv);
2237 err_destroy_main_flow_table_true:
2238 mlx5e_destroy_main_vxlan_flow_table(priv);
2239 err_destroy_vxlan_flow_table:
2240 mlx5e_destroy_vxlan_flow_table(priv);
2241 err_destroy_vlan_flow_table:
2242 mlx5e_destroy_vlan_flow_table(priv);
2243
2244 return (err);
2245 }
2246
2247 void
mlx5e_close_flow_tables(struct mlx5e_priv * priv)2248 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2249 {
2250 mlx5e_accel_fs_tcp_destroy(priv);
2251 mlx5e_del_vxlan_catchall_rule(priv);
2252 mlx5e_destroy_main_flow_table(priv);
2253 mlx5e_destroy_inner_rss_flow_table(priv);
2254 mlx5e_destroy_main_vxlan_flow_table(priv);
2255 mlx5e_destroy_vxlan_flow_table(priv);
2256 mlx5e_destroy_vlan_flow_table(priv);
2257 }
2258
2259 int
mlx5e_open_flow_rules(struct mlx5e_priv * priv)2260 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2261 {
2262 int err;
2263
2264 err = mlx5e_add_all_vlan_rules(priv);
2265 if (err)
2266 return (err);
2267
2268 err = mlx5e_add_main_vxlan_rules(priv);
2269 if (err)
2270 goto err_del_all_vlan_rules;
2271
2272 err = mlx5e_add_all_vxlan_rules(priv);
2273 if (err)
2274 goto err_del_main_vxlan_rules;
2275
2276 mlx5e_set_rx_mode_core(priv, true);
2277
2278 set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2279
2280 return (0);
2281
2282 err_del_main_vxlan_rules:
2283 mlx5e_del_main_vxlan_rules(priv);
2284
2285 err_del_all_vlan_rules:
2286 mlx5e_del_all_vlan_rules(priv);
2287
2288 return (err);
2289 }
2290
2291 void
mlx5e_close_flow_rules(struct mlx5e_priv * priv)2292 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2293 {
2294 clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2295
2296 mlx5e_set_rx_mode_core(priv, false);
2297 mlx5e_del_all_vxlan_rules(priv);
2298 mlx5e_del_main_vxlan_rules(priv);
2299 mlx5e_del_all_vlan_rules(priv);
2300 }
2301