1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39 #include "en_rep.h"
40 #include "lib/mpfs.h"
41 #include "en/ptp.h"
42
43 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai, int type);
45 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
46 struct mlx5e_l2_rule *ai);
47
48 enum {
49 MLX5E_FULLMATCH = 0,
50 MLX5E_ALLMULTI = 1,
51 };
52
53 enum {
54 MLX5E_UC = 0,
55 MLX5E_MC_IPV4 = 1,
56 MLX5E_MC_IPV6 = 2,
57 MLX5E_MC_OTHER = 3,
58 };
59
60 enum {
61 MLX5E_ACTION_NONE = 0,
62 MLX5E_ACTION_ADD = 1,
63 MLX5E_ACTION_DEL = 2,
64 };
65
66 struct mlx5e_l2_hash_node {
67 struct hlist_node hlist;
68 u8 action;
69 struct mlx5e_l2_rule ai;
70 bool mpfs;
71 };
72
mlx5e_hash_l2(u8 * addr)73 static inline int mlx5e_hash_l2(u8 *addr)
74 {
75 return addr[5];
76 }
77
mlx5e_add_l2_to_hash(struct hlist_head * hash,u8 * addr)78 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
79 {
80 struct mlx5e_l2_hash_node *hn;
81 int ix = mlx5e_hash_l2(addr);
82 int found = 0;
83
84 hlist_for_each_entry(hn, &hash[ix], hlist)
85 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
86 found = 1;
87 break;
88 }
89
90 if (found) {
91 hn->action = MLX5E_ACTION_NONE;
92 return;
93 }
94
95 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
96 if (!hn)
97 return;
98
99 ether_addr_copy(hn->ai.addr, addr);
100 hn->action = MLX5E_ACTION_ADD;
101
102 hlist_add_head(&hn->hlist, &hash[ix]);
103 }
104
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)105 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
106 {
107 hlist_del(&hn->hlist);
108 kfree(hn);
109 }
110
111 struct mlx5e_vlan_table {
112 struct mlx5e_flow_table ft;
113 DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
114 DECLARE_BITMAP(active_svlans, VLAN_N_VID);
115 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
116 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
117 struct mlx5_flow_handle *untagged_rule;
118 struct mlx5_flow_handle *any_cvlan_rule;
119 struct mlx5_flow_handle *any_svlan_rule;
120 struct mlx5_flow_handle *trap_rule;
121 bool cvlan_filter_disabled;
122 };
123
mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table * vlan)124 unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
125 {
126 return vlan->active_svlans;
127 }
128
mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table * vlan)129 struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
130 {
131 return vlan->ft.t;
132 }
133
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)134 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
135 {
136 struct net_device *ndev = priv->netdev;
137 int max_list_size;
138 int list_size;
139 u16 *vlans;
140 int vlan;
141 int err;
142 int i;
143
144 list_size = 0;
145 for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
146 list_size++;
147
148 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
149
150 if (list_size > max_list_size) {
151 netdev_warn(ndev,
152 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
153 list_size, max_list_size);
154 list_size = max_list_size;
155 }
156
157 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
158 if (!vlans)
159 return -ENOMEM;
160
161 i = 0;
162 for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
163 if (i >= list_size)
164 break;
165 vlans[i++] = vlan;
166 }
167
168 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
169 if (err)
170 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
171 err);
172
173 kfree(vlans);
174 return err;
175 }
176
177 enum mlx5e_vlan_rule_type {
178 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
179 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
180 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
181 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
182 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
183 };
184
__mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)185 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
186 enum mlx5e_vlan_rule_type rule_type,
187 u16 vid, struct mlx5_flow_spec *spec)
188 {
189 struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
190 struct mlx5_flow_destination dest = {};
191 struct mlx5_flow_handle **rule_p;
192 MLX5_DECLARE_FLOW_ACT(flow_act);
193 int err = 0;
194
195 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
196 dest.ft = priv->fs.l2.ft.t;
197
198 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
199
200 switch (rule_type) {
201 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
202 /* cvlan_tag enabled in match criteria and
203 * disabled in match value means both S & C tags
204 * don't exist (untagged of both)
205 */
206 rule_p = &priv->fs.vlan->untagged_rule;
207 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
208 outer_headers.cvlan_tag);
209 break;
210 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
211 rule_p = &priv->fs.vlan->any_cvlan_rule;
212 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
213 outer_headers.cvlan_tag);
214 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
215 break;
216 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
217 rule_p = &priv->fs.vlan->any_svlan_rule;
218 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
219 outer_headers.svlan_tag);
220 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
221 break;
222 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
223 rule_p = &priv->fs.vlan->active_svlans_rule[vid];
224 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
225 outer_headers.svlan_tag);
226 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
227 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
228 outer_headers.first_vid);
229 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
230 vid);
231 break;
232 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
233 rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
234 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
235 outer_headers.cvlan_tag);
236 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
237 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
238 outer_headers.first_vid);
239 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
240 vid);
241 break;
242 }
243
244 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
245 return 0;
246
247 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
248
249 if (IS_ERR(*rule_p)) {
250 err = PTR_ERR(*rule_p);
251 *rule_p = NULL;
252 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
253 }
254
255 return err;
256 }
257
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)258 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
259 enum mlx5e_vlan_rule_type rule_type, u16 vid)
260 {
261 struct mlx5_flow_spec *spec;
262 int err = 0;
263
264 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
265 if (!spec)
266 return -ENOMEM;
267
268 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
269 mlx5e_vport_context_update_vlans(priv);
270
271 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
272
273 kvfree(spec);
274
275 return err;
276 }
277
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)278 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
279 enum mlx5e_vlan_rule_type rule_type, u16 vid)
280 {
281 switch (rule_type) {
282 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
283 if (priv->fs.vlan->untagged_rule) {
284 mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
285 priv->fs.vlan->untagged_rule = NULL;
286 }
287 break;
288 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
289 if (priv->fs.vlan->any_cvlan_rule) {
290 mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
291 priv->fs.vlan->any_cvlan_rule = NULL;
292 }
293 break;
294 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
295 if (priv->fs.vlan->any_svlan_rule) {
296 mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
297 priv->fs.vlan->any_svlan_rule = NULL;
298 }
299 break;
300 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
301 if (priv->fs.vlan->active_svlans_rule[vid]) {
302 mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
303 priv->fs.vlan->active_svlans_rule[vid] = NULL;
304 }
305 break;
306 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
307 if (priv->fs.vlan->active_cvlans_rule[vid]) {
308 mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
309 priv->fs.vlan->active_cvlans_rule[vid] = NULL;
310 }
311 mlx5e_vport_context_update_vlans(priv);
312 break;
313 }
314 }
315
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)316 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
317 {
318 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
319 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
320 }
321
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)322 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
323 {
324 int err;
325
326 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
327 if (err)
328 return err;
329
330 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
331 }
332
333 static struct mlx5_flow_handle *
mlx5e_add_trap_rule(struct mlx5_flow_table * ft,int trap_id,int tir_num)334 mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
335 {
336 struct mlx5_flow_destination dest = {};
337 MLX5_DECLARE_FLOW_ACT(flow_act);
338 struct mlx5_flow_handle *rule;
339 struct mlx5_flow_spec *spec;
340
341 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
342 if (!spec)
343 return ERR_PTR(-ENOMEM);
344 spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
345 spec->flow_context.flow_tag = trap_id;
346 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
347 dest.tir_num = tir_num;
348
349 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
350 kvfree(spec);
351 return rule;
352 }
353
mlx5e_add_vlan_trap(struct mlx5e_priv * priv,int trap_id,int tir_num)354 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
355 {
356 struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
357 struct mlx5_flow_handle *rule;
358 int err;
359
360 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
361 if (IS_ERR(rule)) {
362 err = PTR_ERR(rule);
363 priv->fs.vlan->trap_rule = NULL;
364 netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
365 __func__, err);
366 return err;
367 }
368 priv->fs.vlan->trap_rule = rule;
369 return 0;
370 }
371
mlx5e_remove_vlan_trap(struct mlx5e_priv * priv)372 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
373 {
374 if (priv->fs.vlan->trap_rule) {
375 mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
376 priv->fs.vlan->trap_rule = NULL;
377 }
378 }
379
mlx5e_add_mac_trap(struct mlx5e_priv * priv,int trap_id,int tir_num)380 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
381 {
382 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
383 struct mlx5_flow_handle *rule;
384 int err;
385
386 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
387 if (IS_ERR(rule)) {
388 err = PTR_ERR(rule);
389 priv->fs.l2.trap_rule = NULL;
390 netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
391 __func__, err);
392 return err;
393 }
394 priv->fs.l2.trap_rule = rule;
395 return 0;
396 }
397
mlx5e_remove_mac_trap(struct mlx5e_priv * priv)398 void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
399 {
400 if (priv->fs.l2.trap_rule) {
401 mlx5_del_flow_rules(priv->fs.l2.trap_rule);
402 priv->fs.l2.trap_rule = NULL;
403 }
404 }
405
mlx5e_enable_cvlan_filter(struct mlx5e_priv * priv)406 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
407 {
408 if (!priv->fs.vlan->cvlan_filter_disabled)
409 return;
410
411 priv->fs.vlan->cvlan_filter_disabled = false;
412 if (priv->netdev->flags & IFF_PROMISC)
413 return;
414 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
415 }
416
mlx5e_disable_cvlan_filter(struct mlx5e_priv * priv)417 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
418 {
419 if (priv->fs.vlan->cvlan_filter_disabled)
420 return;
421
422 priv->fs.vlan->cvlan_filter_disabled = true;
423 if (priv->netdev->flags & IFF_PROMISC)
424 return;
425 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
426 }
427
mlx5e_vlan_rx_add_cvid(struct mlx5e_priv * priv,u16 vid)428 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
429 {
430 int err;
431
432 set_bit(vid, priv->fs.vlan->active_cvlans);
433
434 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
435 if (err)
436 clear_bit(vid, priv->fs.vlan->active_cvlans);
437
438 return err;
439 }
440
mlx5e_vlan_rx_add_svid(struct mlx5e_priv * priv,u16 vid)441 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
442 {
443 struct net_device *netdev = priv->netdev;
444 int err;
445
446 set_bit(vid, priv->fs.vlan->active_svlans);
447
448 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
449 if (err) {
450 clear_bit(vid, priv->fs.vlan->active_svlans);
451 return err;
452 }
453
454 /* Need to fix some features.. */
455 netdev_update_features(netdev);
456 return err;
457 }
458
mlx5e_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)459 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
460 {
461 struct mlx5e_priv *priv = netdev_priv(dev);
462
463 if (mlx5e_is_uplink_rep(priv))
464 return 0; /* no vlan table for uplink rep */
465
466 if (be16_to_cpu(proto) == ETH_P_8021Q)
467 return mlx5e_vlan_rx_add_cvid(priv, vid);
468 else if (be16_to_cpu(proto) == ETH_P_8021AD)
469 return mlx5e_vlan_rx_add_svid(priv, vid);
470
471 return -EOPNOTSUPP;
472 }
473
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)474 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
475 {
476 struct mlx5e_priv *priv = netdev_priv(dev);
477
478 if (mlx5e_is_uplink_rep(priv))
479 return 0; /* no vlan table for uplink rep */
480
481 if (be16_to_cpu(proto) == ETH_P_8021Q) {
482 clear_bit(vid, priv->fs.vlan->active_cvlans);
483 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
484 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
485 clear_bit(vid, priv->fs.vlan->active_svlans);
486 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
487 netdev_update_features(dev);
488 }
489
490 return 0;
491 }
492
mlx5e_add_vlan_rules(struct mlx5e_priv * priv)493 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
494 {
495 int i;
496
497 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
498
499 for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
500 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
501 }
502
503 for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
504 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
505
506 if (priv->fs.vlan->cvlan_filter_disabled)
507 mlx5e_add_any_vid_rules(priv);
508 }
509
mlx5e_del_vlan_rules(struct mlx5e_priv * priv)510 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
511 {
512 int i;
513
514 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
515
516 for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
517 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
518 }
519
520 for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
521 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
522
523 WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
524
525 mlx5e_remove_vlan_trap(priv);
526
527 /* must be called after DESTROY bit is set and
528 * set_rx_mode is called and flushed
529 */
530 if (priv->fs.vlan->cvlan_filter_disabled)
531 mlx5e_del_any_vid_rules(priv);
532 }
533
534 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
535 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
536 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
537
mlx5e_execute_l2_action(struct mlx5e_priv * priv,struct mlx5e_l2_hash_node * hn)538 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
539 struct mlx5e_l2_hash_node *hn)
540 {
541 u8 action = hn->action;
542 u8 mac_addr[ETH_ALEN];
543 int l2_err = 0;
544
545 ether_addr_copy(mac_addr, hn->ai.addr);
546
547 switch (action) {
548 case MLX5E_ACTION_ADD:
549 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
550 if (!is_multicast_ether_addr(mac_addr)) {
551 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
552 hn->mpfs = !l2_err;
553 }
554 hn->action = MLX5E_ACTION_NONE;
555 break;
556
557 case MLX5E_ACTION_DEL:
558 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
559 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
560 mlx5e_del_l2_flow_rule(priv, &hn->ai);
561 mlx5e_del_l2_from_hash(hn);
562 break;
563 }
564
565 if (l2_err)
566 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
567 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
568 }
569
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)570 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
571 {
572 struct net_device *netdev = priv->netdev;
573 struct netdev_hw_addr *ha;
574
575 netif_addr_lock_bh(netdev);
576
577 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
578 priv->netdev->dev_addr);
579
580 netdev_for_each_uc_addr(ha, netdev)
581 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
582
583 netdev_for_each_mc_addr(ha, netdev)
584 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
585
586 netif_addr_unlock_bh(netdev);
587 }
588
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)589 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
590 u8 addr_array[][ETH_ALEN], int size)
591 {
592 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
593 struct net_device *ndev = priv->netdev;
594 struct mlx5e_l2_hash_node *hn;
595 struct hlist_head *addr_list;
596 struct hlist_node *tmp;
597 int i = 0;
598 int hi;
599
600 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
601
602 if (is_uc) /* Make sure our own address is pushed first */
603 ether_addr_copy(addr_array[i++], ndev->dev_addr);
604 else if (priv->fs.l2.broadcast_enabled)
605 ether_addr_copy(addr_array[i++], ndev->broadcast);
606
607 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
608 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
609 continue;
610 if (i >= size)
611 break;
612 ether_addr_copy(addr_array[i++], hn->ai.addr);
613 }
614 }
615
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)616 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
617 int list_type)
618 {
619 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
620 struct mlx5e_l2_hash_node *hn;
621 u8 (*addr_array)[ETH_ALEN] = NULL;
622 struct hlist_head *addr_list;
623 struct hlist_node *tmp;
624 int max_size;
625 int size;
626 int err;
627 int hi;
628
629 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
630 max_size = is_uc ?
631 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
632 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
633
634 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
635 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
636 size++;
637
638 if (size > max_size) {
639 netdev_warn(priv->netdev,
640 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
641 is_uc ? "UC" : "MC", size, max_size);
642 size = max_size;
643 }
644
645 if (size) {
646 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
647 if (!addr_array) {
648 err = -ENOMEM;
649 goto out;
650 }
651 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
652 }
653
654 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
655 out:
656 if (err)
657 netdev_err(priv->netdev,
658 "Failed to modify vport %s list err(%d)\n",
659 is_uc ? "UC" : "MC", err);
660 kfree(addr_array);
661 }
662
mlx5e_vport_context_update(struct mlx5e_priv * priv)663 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
664 {
665 struct mlx5e_l2_table *ea = &priv->fs.l2;
666
667 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
668 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
669 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
670 ea->allmulti_enabled,
671 ea->promisc_enabled);
672 }
673
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)674 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
675 {
676 struct mlx5e_l2_hash_node *hn;
677 struct hlist_node *tmp;
678 int i;
679
680 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
681 mlx5e_execute_l2_action(priv, hn);
682
683 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
684 mlx5e_execute_l2_action(priv, hn);
685 }
686
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)687 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
688 {
689 struct mlx5e_l2_hash_node *hn;
690 struct hlist_node *tmp;
691 int i;
692
693 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
694 hn->action = MLX5E_ACTION_DEL;
695 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
696 hn->action = MLX5E_ACTION_DEL;
697
698 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
699 mlx5e_sync_netdev_addr(priv);
700
701 mlx5e_apply_netdev_addr(priv);
702 }
703
704 #define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
705 #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
706
mlx5e_add_promisc_rule(struct mlx5e_priv * priv)707 static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
708 {
709 struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
710 struct mlx5_flow_destination dest = {};
711 struct mlx5_flow_handle **rule_p;
712 MLX5_DECLARE_FLOW_ACT(flow_act);
713 struct mlx5_flow_spec *spec;
714 int err = 0;
715
716 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
717 if (!spec)
718 return -ENOMEM;
719 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
720 dest.ft = priv->fs.ttc.ft.t;
721
722 rule_p = &priv->fs.promisc.rule;
723 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
724 if (IS_ERR(*rule_p)) {
725 err = PTR_ERR(*rule_p);
726 *rule_p = NULL;
727 netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
728 }
729 kvfree(spec);
730 return err;
731 }
732
mlx5e_create_promisc_table(struct mlx5e_priv * priv)733 static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
734 {
735 struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
736 struct mlx5_flow_table_attr ft_attr = {};
737 int err;
738
739 ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
740 ft_attr.autogroup.max_num_groups = 1;
741 ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
742 ft_attr.prio = MLX5E_NIC_PRIO;
743
744 ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
745 if (IS_ERR(ft->t)) {
746 err = PTR_ERR(ft->t);
747 netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
748 return err;
749 }
750
751 err = mlx5e_add_promisc_rule(priv);
752 if (err)
753 goto err_destroy_promisc_table;
754
755 return 0;
756
757 err_destroy_promisc_table:
758 mlx5_destroy_flow_table(ft->t);
759 ft->t = NULL;
760
761 return err;
762 }
763
mlx5e_del_promisc_rule(struct mlx5e_priv * priv)764 static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
765 {
766 if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
767 return;
768 mlx5_del_flow_rules(priv->fs.promisc.rule);
769 priv->fs.promisc.rule = NULL;
770 }
771
mlx5e_destroy_promisc_table(struct mlx5e_priv * priv)772 static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
773 {
774 if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
775 return;
776 mlx5e_del_promisc_rule(priv);
777 mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
778 priv->fs.promisc.ft.t = NULL;
779 }
780
mlx5e_set_rx_mode_work(struct work_struct * work)781 void mlx5e_set_rx_mode_work(struct work_struct *work)
782 {
783 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
784 set_rx_mode_work);
785
786 struct mlx5e_l2_table *ea = &priv->fs.l2;
787 struct net_device *ndev = priv->netdev;
788
789 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
790 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
791 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
792 bool broadcast_enabled = rx_mode_enable;
793
794 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
795 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
796 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
797 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
798 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
799 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
800 int err;
801
802 if (enable_promisc) {
803 err = mlx5e_create_promisc_table(priv);
804 if (err)
805 enable_promisc = false;
806 if (!priv->channels.params.vlan_strip_disable && !err)
807 netdev_warn_once(ndev,
808 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
809 }
810 if (enable_allmulti)
811 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
812 if (enable_broadcast)
813 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
814
815 mlx5e_handle_netdev_addr(priv);
816
817 if (disable_broadcast)
818 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
819 if (disable_allmulti)
820 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
821 if (disable_promisc)
822 mlx5e_destroy_promisc_table(priv);
823
824 ea->promisc_enabled = promisc_enabled;
825 ea->allmulti_enabled = allmulti_enabled;
826 ea->broadcast_enabled = broadcast_enabled;
827
828 mlx5e_vport_context_update(priv);
829 }
830
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)831 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
832 {
833 int i;
834
835 for (i = ft->num_groups - 1; i >= 0; i--) {
836 if (!IS_ERR_OR_NULL(ft->g[i]))
837 mlx5_destroy_flow_group(ft->g[i]);
838 ft->g[i] = NULL;
839 }
840 ft->num_groups = 0;
841 }
842
mlx5e_init_l2_addr(struct mlx5e_priv * priv)843 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
844 {
845 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
846 }
847
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)848 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
849 {
850 mlx5e_destroy_groups(ft);
851 kfree(ft->g);
852 mlx5_destroy_flow_table(ft->t);
853 ft->t = NULL;
854 }
855
mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table * ttc)856 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
857 {
858 int i;
859
860 for (i = 0; i < MLX5E_NUM_TT; i++) {
861 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
862 mlx5_del_flow_rules(ttc->rules[i].rule);
863 ttc->rules[i].rule = NULL;
864 }
865 }
866
867 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
868 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
869 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
870 ttc->tunnel_rules[i] = NULL;
871 }
872 }
873 }
874
875 struct mlx5e_etype_proto {
876 u16 etype;
877 u8 proto;
878 };
879
880 static struct mlx5e_etype_proto ttc_rules[] = {
881 [MLX5E_TT_IPV4_TCP] = {
882 .etype = ETH_P_IP,
883 .proto = IPPROTO_TCP,
884 },
885 [MLX5E_TT_IPV6_TCP] = {
886 .etype = ETH_P_IPV6,
887 .proto = IPPROTO_TCP,
888 },
889 [MLX5E_TT_IPV4_UDP] = {
890 .etype = ETH_P_IP,
891 .proto = IPPROTO_UDP,
892 },
893 [MLX5E_TT_IPV6_UDP] = {
894 .etype = ETH_P_IPV6,
895 .proto = IPPROTO_UDP,
896 },
897 [MLX5E_TT_IPV4_IPSEC_AH] = {
898 .etype = ETH_P_IP,
899 .proto = IPPROTO_AH,
900 },
901 [MLX5E_TT_IPV6_IPSEC_AH] = {
902 .etype = ETH_P_IPV6,
903 .proto = IPPROTO_AH,
904 },
905 [MLX5E_TT_IPV4_IPSEC_ESP] = {
906 .etype = ETH_P_IP,
907 .proto = IPPROTO_ESP,
908 },
909 [MLX5E_TT_IPV6_IPSEC_ESP] = {
910 .etype = ETH_P_IPV6,
911 .proto = IPPROTO_ESP,
912 },
913 [MLX5E_TT_IPV4] = {
914 .etype = ETH_P_IP,
915 .proto = 0,
916 },
917 [MLX5E_TT_IPV6] = {
918 .etype = ETH_P_IPV6,
919 .proto = 0,
920 },
921 [MLX5E_TT_ANY] = {
922 .etype = 0,
923 .proto = 0,
924 },
925 };
926
927 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
928 [MLX5E_TT_IPV4_GRE] = {
929 .etype = ETH_P_IP,
930 .proto = IPPROTO_GRE,
931 },
932 [MLX5E_TT_IPV6_GRE] = {
933 .etype = ETH_P_IPV6,
934 .proto = IPPROTO_GRE,
935 },
936 [MLX5E_TT_IPV4_IPIP] = {
937 .etype = ETH_P_IP,
938 .proto = IPPROTO_IPIP,
939 },
940 [MLX5E_TT_IPV6_IPIP] = {
941 .etype = ETH_P_IPV6,
942 .proto = IPPROTO_IPIP,
943 },
944 [MLX5E_TT_IPV4_IPV6] = {
945 .etype = ETH_P_IP,
946 .proto = IPPROTO_IPV6,
947 },
948 [MLX5E_TT_IPV6_IPV6] = {
949 .etype = ETH_P_IPV6,
950 .proto = IPPROTO_IPV6,
951 },
952
953 };
954
mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)955 u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
956 {
957 return ttc_tunnel_rules[tt].proto;
958 }
959
mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)960 static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
961 {
962 switch (proto_type) {
963 case IPPROTO_GRE:
964 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
965 case IPPROTO_IPIP:
966 case IPPROTO_IPV6:
967 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
968 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
969 default:
970 return false;
971 }
972 }
973
mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)974 static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
975 {
976 int tt;
977
978 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
979 if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
980 return true;
981 }
982 return false;
983 }
984
mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)985 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
986 {
987 return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
988 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
989 }
990
mlx5e_etype_to_ipv(u16 ethertype)991 static u8 mlx5e_etype_to_ipv(u16 ethertype)
992 {
993 if (ethertype == ETH_P_IP)
994 return 4;
995
996 if (ethertype == ETH_P_IPV6)
997 return 6;
998
999 return 0;
1000 }
1001
1002 static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)1003 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
1004 struct mlx5_flow_table *ft,
1005 struct mlx5_flow_destination *dest,
1006 u16 etype,
1007 u8 proto)
1008 {
1009 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1010 MLX5_DECLARE_FLOW_ACT(flow_act);
1011 struct mlx5_flow_handle *rule;
1012 struct mlx5_flow_spec *spec;
1013 int err = 0;
1014 u8 ipv;
1015
1016 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1017 if (!spec)
1018 return ERR_PTR(-ENOMEM);
1019
1020 if (proto) {
1021 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1022 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1023 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
1024 }
1025
1026 ipv = mlx5e_etype_to_ipv(etype);
1027 if (match_ipv_outer && ipv) {
1028 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1029 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1030 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
1031 } else if (etype) {
1032 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1033 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
1034 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
1035 }
1036
1037 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1038 if (IS_ERR(rule)) {
1039 err = PTR_ERR(rule);
1040 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1041 }
1042
1043 kvfree(spec);
1044 return err ? ERR_PTR(err) : rule;
1045 }
1046
mlx5e_generate_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1047 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
1048 struct ttc_params *params,
1049 struct mlx5e_ttc_table *ttc)
1050 {
1051 struct mlx5_flow_destination dest = {};
1052 struct mlx5_flow_handle **trules;
1053 struct mlx5e_ttc_rule *rules;
1054 struct mlx5_flow_table *ft;
1055 int tt;
1056 int err;
1057
1058 ft = ttc->ft.t;
1059 rules = ttc->rules;
1060
1061 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1062 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1063 struct mlx5e_ttc_rule *rule = &rules[tt];
1064
1065 if (tt == MLX5E_TT_ANY)
1066 dest.tir_num = params->any_tt_tirn;
1067 else
1068 dest.tir_num = params->indir_tirn[tt];
1069
1070 rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
1071 ttc_rules[tt].etype,
1072 ttc_rules[tt].proto);
1073 if (IS_ERR(rule->rule)) {
1074 err = PTR_ERR(rule->rule);
1075 rule->rule = NULL;
1076 goto del_rules;
1077 }
1078 rule->default_dest = dest;
1079 }
1080
1081 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
1082 return 0;
1083
1084 trules = ttc->tunnel_rules;
1085 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1086 dest.ft = params->inner_ttc->ft.t;
1087 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
1088 if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
1089 ttc_tunnel_rules[tt].proto))
1090 continue;
1091 trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
1092 ttc_tunnel_rules[tt].etype,
1093 ttc_tunnel_rules[tt].proto);
1094 if (IS_ERR(trules[tt])) {
1095 err = PTR_ERR(trules[tt]);
1096 trules[tt] = NULL;
1097 goto del_rules;
1098 }
1099 }
1100
1101 return 0;
1102
1103 del_rules:
1104 mlx5e_cleanup_ttc_rules(ttc);
1105 return err;
1106 }
1107
mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table * ttc,bool use_ipv)1108 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
1109 bool use_ipv)
1110 {
1111 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1112 struct mlx5e_flow_table *ft = &ttc->ft;
1113 int ix = 0;
1114 u32 *in;
1115 int err;
1116 u8 *mc;
1117
1118 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
1119 sizeof(*ft->g), GFP_KERNEL);
1120 if (!ft->g)
1121 return -ENOMEM;
1122 in = kvzalloc(inlen, GFP_KERNEL);
1123 if (!in) {
1124 kfree(ft->g);
1125 ft->g = NULL;
1126 return -ENOMEM;
1127 }
1128
1129 /* L4 Group */
1130 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1131 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1132 if (use_ipv)
1133 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
1134 else
1135 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1136 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1137 MLX5_SET_CFG(in, start_flow_index, ix);
1138 ix += MLX5E_TTC_GROUP1_SIZE;
1139 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1140 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1141 if (IS_ERR(ft->g[ft->num_groups]))
1142 goto err;
1143 ft->num_groups++;
1144
1145 /* L3 Group */
1146 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
1147 MLX5_SET_CFG(in, start_flow_index, ix);
1148 ix += MLX5E_TTC_GROUP2_SIZE;
1149 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1150 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1151 if (IS_ERR(ft->g[ft->num_groups]))
1152 goto err;
1153 ft->num_groups++;
1154
1155 /* Any Group */
1156 memset(in, 0, inlen);
1157 MLX5_SET_CFG(in, start_flow_index, ix);
1158 ix += MLX5E_TTC_GROUP3_SIZE;
1159 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1160 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1161 if (IS_ERR(ft->g[ft->num_groups]))
1162 goto err;
1163 ft->num_groups++;
1164
1165 kvfree(in);
1166 return 0;
1167
1168 err:
1169 err = PTR_ERR(ft->g[ft->num_groups]);
1170 ft->g[ft->num_groups] = NULL;
1171 kvfree(in);
1172
1173 return err;
1174 }
1175
1176 static struct mlx5_flow_handle *
mlx5e_generate_inner_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)1177 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
1178 struct mlx5_flow_table *ft,
1179 struct mlx5_flow_destination *dest,
1180 u16 etype, u8 proto)
1181 {
1182 MLX5_DECLARE_FLOW_ACT(flow_act);
1183 struct mlx5_flow_handle *rule;
1184 struct mlx5_flow_spec *spec;
1185 int err = 0;
1186 u8 ipv;
1187
1188 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1189 if (!spec)
1190 return ERR_PTR(-ENOMEM);
1191
1192 ipv = mlx5e_etype_to_ipv(etype);
1193 if (etype && ipv) {
1194 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1195 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1196 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1197 }
1198
1199 if (proto) {
1200 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1201 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1202 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1203 }
1204
1205 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1206 if (IS_ERR(rule)) {
1207 err = PTR_ERR(rule);
1208 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1209 }
1210
1211 kvfree(spec);
1212 return err ? ERR_PTR(err) : rule;
1213 }
1214
mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1215 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1216 struct ttc_params *params,
1217 struct mlx5e_ttc_table *ttc)
1218 {
1219 struct mlx5_flow_destination dest = {};
1220 struct mlx5e_ttc_rule *rules;
1221 struct mlx5_flow_table *ft;
1222 int err;
1223 int tt;
1224
1225 ft = ttc->ft.t;
1226 rules = ttc->rules;
1227 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1228
1229 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1230 struct mlx5e_ttc_rule *rule = &rules[tt];
1231
1232 if (tt == MLX5E_TT_ANY)
1233 dest.tir_num = params->any_tt_tirn;
1234 else
1235 dest.tir_num = params->indir_tirn[tt];
1236
1237 rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1238 ttc_rules[tt].etype,
1239 ttc_rules[tt].proto);
1240 if (IS_ERR(rule->rule)) {
1241 err = PTR_ERR(rule->rule);
1242 rule->rule = NULL;
1243 goto del_rules;
1244 }
1245 rule->default_dest = dest;
1246 }
1247
1248 return 0;
1249
1250 del_rules:
1251
1252 mlx5e_cleanup_ttc_rules(ttc);
1253 return err;
1254 }
1255
mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table * ttc)1256 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1257 {
1258 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1259 struct mlx5e_flow_table *ft = &ttc->ft;
1260 int ix = 0;
1261 u32 *in;
1262 int err;
1263 u8 *mc;
1264
1265 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1266 if (!ft->g)
1267 return -ENOMEM;
1268 in = kvzalloc(inlen, GFP_KERNEL);
1269 if (!in) {
1270 kfree(ft->g);
1271 ft->g = NULL;
1272 return -ENOMEM;
1273 }
1274
1275 /* L4 Group */
1276 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1277 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1278 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1279 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1280 MLX5_SET_CFG(in, start_flow_index, ix);
1281 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1282 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1283 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1284 if (IS_ERR(ft->g[ft->num_groups]))
1285 goto err;
1286 ft->num_groups++;
1287
1288 /* L3 Group */
1289 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1290 MLX5_SET_CFG(in, start_flow_index, ix);
1291 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1292 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1293 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1294 if (IS_ERR(ft->g[ft->num_groups]))
1295 goto err;
1296 ft->num_groups++;
1297
1298 /* Any Group */
1299 memset(in, 0, inlen);
1300 MLX5_SET_CFG(in, start_flow_index, ix);
1301 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1302 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1303 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1304 if (IS_ERR(ft->g[ft->num_groups]))
1305 goto err;
1306 ft->num_groups++;
1307
1308 kvfree(in);
1309 return 0;
1310
1311 err:
1312 err = PTR_ERR(ft->g[ft->num_groups]);
1313 ft->g[ft->num_groups] = NULL;
1314 kvfree(in);
1315
1316 return err;
1317 }
1318
mlx5e_set_ttc_basic_params(struct mlx5e_priv * priv,struct ttc_params * ttc_params)1319 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1320 struct ttc_params *ttc_params)
1321 {
1322 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1323 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1324 }
1325
mlx5e_set_inner_ttc_ft_params(struct ttc_params * ttc_params)1326 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1327 {
1328 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1329
1330 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1331 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1332 ft_attr->prio = MLX5E_NIC_PRIO;
1333 }
1334
mlx5e_set_ttc_ft_params(struct ttc_params * ttc_params)1335 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1336
1337 {
1338 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1339
1340 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1341 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1342 ft_attr->prio = MLX5E_NIC_PRIO;
1343 }
1344
mlx5e_create_inner_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1345 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1346 struct mlx5e_ttc_table *ttc)
1347 {
1348 struct mlx5e_flow_table *ft = &ttc->ft;
1349 int err;
1350
1351 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1352 return 0;
1353
1354 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1355 if (IS_ERR(ft->t)) {
1356 err = PTR_ERR(ft->t);
1357 ft->t = NULL;
1358 return err;
1359 }
1360
1361 err = mlx5e_create_inner_ttc_table_groups(ttc);
1362 if (err)
1363 goto err;
1364
1365 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1366 if (err)
1367 goto err;
1368
1369 return 0;
1370
1371 err:
1372 mlx5e_destroy_flow_table(ft);
1373 return err;
1374 }
1375
mlx5e_destroy_inner_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1376 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1377 struct mlx5e_ttc_table *ttc)
1378 {
1379 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1380 return;
1381
1382 mlx5e_cleanup_ttc_rules(ttc);
1383 mlx5e_destroy_flow_table(&ttc->ft);
1384 }
1385
mlx5e_destroy_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1386 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1387 struct mlx5e_ttc_table *ttc)
1388 {
1389 mlx5e_cleanup_ttc_rules(ttc);
1390 mlx5e_destroy_flow_table(&ttc->ft);
1391 }
1392
mlx5e_create_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1393 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1394 struct mlx5e_ttc_table *ttc)
1395 {
1396 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1397 struct mlx5e_flow_table *ft = &ttc->ft;
1398 int err;
1399
1400 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1401 if (IS_ERR(ft->t)) {
1402 err = PTR_ERR(ft->t);
1403 ft->t = NULL;
1404 return err;
1405 }
1406
1407 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1408 if (err)
1409 goto err;
1410
1411 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1412 if (err)
1413 goto err;
1414
1415 return 0;
1416 err:
1417 mlx5e_destroy_flow_table(ft);
1418 return err;
1419 }
1420
mlx5e_ttc_fwd_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type,struct mlx5_flow_destination * new_dest)1421 int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
1422 struct mlx5_flow_destination *new_dest)
1423 {
1424 return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
1425 }
1426
1427 struct mlx5_flow_destination
mlx5e_ttc_get_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1428 mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1429 {
1430 struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
1431
1432 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
1433 "TTC[%d] default dest is not setup yet", type);
1434
1435 return *dest;
1436 }
1437
mlx5e_ttc_fwd_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1438 int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1439 {
1440 struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
1441
1442 return mlx5e_ttc_fwd_dest(priv, type, &dest);
1443 }
1444
mlx5e_del_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai)1445 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1446 struct mlx5e_l2_rule *ai)
1447 {
1448 if (!IS_ERR_OR_NULL(ai->rule)) {
1449 mlx5_del_flow_rules(ai->rule);
1450 ai->rule = NULL;
1451 }
1452 }
1453
mlx5e_add_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai,int type)1454 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1455 struct mlx5e_l2_rule *ai, int type)
1456 {
1457 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1458 struct mlx5_flow_destination dest = {};
1459 MLX5_DECLARE_FLOW_ACT(flow_act);
1460 struct mlx5_flow_spec *spec;
1461 int err = 0;
1462 u8 *mc_dmac;
1463 u8 *mv_dmac;
1464
1465 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1466 if (!spec)
1467 return -ENOMEM;
1468
1469 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1470 outer_headers.dmac_47_16);
1471 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1472 outer_headers.dmac_47_16);
1473
1474 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1475 dest.ft = priv->fs.ttc.ft.t;
1476
1477 switch (type) {
1478 case MLX5E_FULLMATCH:
1479 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1480 eth_broadcast_addr(mc_dmac);
1481 ether_addr_copy(mv_dmac, ai->addr);
1482 break;
1483
1484 case MLX5E_ALLMULTI:
1485 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1486 mc_dmac[0] = 0x01;
1487 mv_dmac[0] = 0x01;
1488 break;
1489 }
1490
1491 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1492 if (IS_ERR(ai->rule)) {
1493 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1494 __func__, mv_dmac);
1495 err = PTR_ERR(ai->rule);
1496 ai->rule = NULL;
1497 }
1498
1499 kvfree(spec);
1500
1501 return err;
1502 }
1503
1504 #define MLX5E_NUM_L2_GROUPS 3
1505 #define MLX5E_L2_GROUP1_SIZE BIT(15)
1506 #define MLX5E_L2_GROUP2_SIZE BIT(0)
1507 #define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
1508 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1509 MLX5E_L2_GROUP2_SIZE +\
1510 MLX5E_L2_GROUP_TRAP_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1511 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1512 {
1513 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1514 struct mlx5e_flow_table *ft = &l2_table->ft;
1515 int ix = 0;
1516 u8 *mc_dmac;
1517 u32 *in;
1518 int err;
1519 u8 *mc;
1520
1521 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1522 if (!ft->g)
1523 return -ENOMEM;
1524 in = kvzalloc(inlen, GFP_KERNEL);
1525 if (!in) {
1526 kfree(ft->g);
1527 return -ENOMEM;
1528 }
1529
1530 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1531 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1532 outer_headers.dmac_47_16);
1533 /* Flow Group for full match */
1534 eth_broadcast_addr(mc_dmac);
1535 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1536 MLX5_SET_CFG(in, start_flow_index, ix);
1537 ix += MLX5E_L2_GROUP1_SIZE;
1538 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1539 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1540 if (IS_ERR(ft->g[ft->num_groups]))
1541 goto err_destroy_groups;
1542 ft->num_groups++;
1543
1544 /* Flow Group for allmulti */
1545 eth_zero_addr(mc_dmac);
1546 mc_dmac[0] = 0x01;
1547 MLX5_SET_CFG(in, start_flow_index, ix);
1548 ix += MLX5E_L2_GROUP2_SIZE;
1549 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1550 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1551 if (IS_ERR(ft->g[ft->num_groups]))
1552 goto err_destroy_groups;
1553 ft->num_groups++;
1554
1555 /* Flow Group for l2 traps */
1556 memset(in, 0, inlen);
1557 MLX5_SET_CFG(in, start_flow_index, ix);
1558 ix += MLX5E_L2_GROUP_TRAP_SIZE;
1559 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1560 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1561 if (IS_ERR(ft->g[ft->num_groups]))
1562 goto err_destroy_groups;
1563 ft->num_groups++;
1564
1565 kvfree(in);
1566 return 0;
1567
1568 err_destroy_groups:
1569 err = PTR_ERR(ft->g[ft->num_groups]);
1570 ft->g[ft->num_groups] = NULL;
1571 mlx5e_destroy_groups(ft);
1572 kvfree(in);
1573 kfree(ft->g);
1574
1575 return err;
1576 }
1577
mlx5e_destroy_l2_table(struct mlx5e_priv * priv)1578 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1579 {
1580 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1581 }
1582
mlx5e_create_l2_table(struct mlx5e_priv * priv)1583 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1584 {
1585 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1586 struct mlx5e_flow_table *ft = &l2_table->ft;
1587 struct mlx5_flow_table_attr ft_attr = {};
1588 int err;
1589
1590 ft->num_groups = 0;
1591
1592 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1593 ft_attr.level = MLX5E_L2_FT_LEVEL;
1594 ft_attr.prio = MLX5E_NIC_PRIO;
1595
1596 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1597 if (IS_ERR(ft->t)) {
1598 err = PTR_ERR(ft->t);
1599 ft->t = NULL;
1600 return err;
1601 }
1602
1603 err = mlx5e_create_l2_table_groups(l2_table);
1604 if (err)
1605 goto err_destroy_flow_table;
1606
1607 return 0;
1608
1609 err_destroy_flow_table:
1610 mlx5_destroy_flow_table(ft->t);
1611 ft->t = NULL;
1612
1613 return err;
1614 }
1615
1616 #define MLX5E_NUM_VLAN_GROUPS 5
1617 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1618 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1619 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1620 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1621 #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
1622 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1623 MLX5E_VLAN_GROUP1_SIZE +\
1624 MLX5E_VLAN_GROUP2_SIZE +\
1625 MLX5E_VLAN_GROUP3_SIZE +\
1626 MLX5E_VLAN_GROUP_TRAP_SIZE)
1627
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1628 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1629 int inlen)
1630 {
1631 int err;
1632 int ix = 0;
1633 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1634
1635 memset(in, 0, inlen);
1636 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1637 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1638 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1639 MLX5_SET_CFG(in, start_flow_index, ix);
1640 ix += MLX5E_VLAN_GROUP0_SIZE;
1641 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1642 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1643 if (IS_ERR(ft->g[ft->num_groups]))
1644 goto err_destroy_groups;
1645 ft->num_groups++;
1646
1647 memset(in, 0, inlen);
1648 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1649 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1650 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1651 MLX5_SET_CFG(in, start_flow_index, ix);
1652 ix += MLX5E_VLAN_GROUP1_SIZE;
1653 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1654 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1655 if (IS_ERR(ft->g[ft->num_groups]))
1656 goto err_destroy_groups;
1657 ft->num_groups++;
1658
1659 memset(in, 0, inlen);
1660 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1661 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1662 MLX5_SET_CFG(in, start_flow_index, ix);
1663 ix += MLX5E_VLAN_GROUP2_SIZE;
1664 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1665 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1666 if (IS_ERR(ft->g[ft->num_groups]))
1667 goto err_destroy_groups;
1668 ft->num_groups++;
1669
1670 memset(in, 0, inlen);
1671 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1672 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1673 MLX5_SET_CFG(in, start_flow_index, ix);
1674 ix += MLX5E_VLAN_GROUP3_SIZE;
1675 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1676 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1677 if (IS_ERR(ft->g[ft->num_groups]))
1678 goto err_destroy_groups;
1679 ft->num_groups++;
1680
1681 memset(in, 0, inlen);
1682 MLX5_SET_CFG(in, start_flow_index, ix);
1683 ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1684 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1685 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1686 if (IS_ERR(ft->g[ft->num_groups]))
1687 goto err_destroy_groups;
1688 ft->num_groups++;
1689
1690 return 0;
1691
1692 err_destroy_groups:
1693 err = PTR_ERR(ft->g[ft->num_groups]);
1694 ft->g[ft->num_groups] = NULL;
1695 mlx5e_destroy_groups(ft);
1696
1697 return err;
1698 }
1699
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1700 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1701 {
1702 u32 *in;
1703 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1704 int err;
1705
1706 in = kvzalloc(inlen, GFP_KERNEL);
1707 if (!in)
1708 return -ENOMEM;
1709
1710 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1711
1712 kvfree(in);
1713 return err;
1714 }
1715
mlx5e_create_vlan_table(struct mlx5e_priv * priv)1716 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1717 {
1718 struct mlx5_flow_table_attr ft_attr = {};
1719 struct mlx5e_flow_table *ft;
1720 int err;
1721
1722 priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
1723 if (!priv->fs.vlan)
1724 return -ENOMEM;
1725
1726 ft = &priv->fs.vlan->ft;
1727 ft->num_groups = 0;
1728
1729 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1730 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1731 ft_attr.prio = MLX5E_NIC_PRIO;
1732
1733 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1734 if (IS_ERR(ft->t)) {
1735 err = PTR_ERR(ft->t);
1736 goto err_free_t;
1737 }
1738
1739 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1740 if (!ft->g) {
1741 err = -ENOMEM;
1742 goto err_destroy_vlan_table;
1743 }
1744
1745 err = mlx5e_create_vlan_table_groups(ft);
1746 if (err)
1747 goto err_free_g;
1748
1749 mlx5e_add_vlan_rules(priv);
1750
1751 return 0;
1752
1753 err_free_g:
1754 kfree(ft->g);
1755 err_destroy_vlan_table:
1756 mlx5_destroy_flow_table(ft->t);
1757 err_free_t:
1758 kvfree(priv->fs.vlan);
1759 priv->fs.vlan = NULL;
1760
1761 return err;
1762 }
1763
mlx5e_destroy_vlan_table(struct mlx5e_priv * priv)1764 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1765 {
1766 mlx5e_del_vlan_rules(priv);
1767 mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
1768 kvfree(priv->fs.vlan);
1769 }
1770
mlx5e_create_flow_steering(struct mlx5e_priv * priv)1771 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1772 {
1773 struct ttc_params ttc_params = {};
1774 int tt, err;
1775
1776 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1777 MLX5_FLOW_NAMESPACE_KERNEL);
1778
1779 if (!priv->fs.ns)
1780 return -EOPNOTSUPP;
1781
1782 err = mlx5e_arfs_create_tables(priv);
1783 if (err) {
1784 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1785 err);
1786 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1787 }
1788
1789 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1790 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1791 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1792 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1793
1794 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1795 if (err) {
1796 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1797 err);
1798 goto err_destroy_arfs_tables;
1799 }
1800
1801 mlx5e_set_ttc_ft_params(&ttc_params);
1802 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1803 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1804
1805 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1806 if (err) {
1807 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1808 err);
1809 goto err_destroy_inner_ttc_table;
1810 }
1811
1812 err = mlx5e_create_l2_table(priv);
1813 if (err) {
1814 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1815 err);
1816 goto err_destroy_ttc_table;
1817 }
1818
1819 err = mlx5e_create_vlan_table(priv);
1820 if (err) {
1821 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1822 err);
1823 goto err_destroy_l2_table;
1824 }
1825
1826 err = mlx5e_ptp_alloc_rx_fs(priv);
1827 if (err)
1828 goto err_destory_vlan_table;
1829
1830 mlx5e_ethtool_init_steering(priv);
1831
1832 return 0;
1833
1834 err_destory_vlan_table:
1835 mlx5e_destroy_vlan_table(priv);
1836 err_destroy_l2_table:
1837 mlx5e_destroy_l2_table(priv);
1838 err_destroy_ttc_table:
1839 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1840 err_destroy_inner_ttc_table:
1841 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1842 err_destroy_arfs_tables:
1843 mlx5e_arfs_destroy_tables(priv);
1844
1845 return err;
1846 }
1847
mlx5e_destroy_flow_steering(struct mlx5e_priv * priv)1848 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1849 {
1850 mlx5e_ptp_free_rx_fs(priv);
1851 mlx5e_destroy_vlan_table(priv);
1852 mlx5e_destroy_l2_table(priv);
1853 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1854 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1855 mlx5e_arfs_destroy_tables(priv);
1856 mlx5e_ethtool_cleanup_steering(priv);
1857 }
1858