1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
44
45 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
46 sizeof(struct init_tree_node))
47
48 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
49 ...) {.type = FS_TYPE_PRIO,\
50 .min_ft_level = min_level_val,\
51 .num_levels = num_levels_val,\
52 .num_leaf_prios = num_prios_val,\
53 .caps = caps_val,\
54 .children = (struct init_tree_node[]) {__VA_ARGS__},\
55 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
56 }
57
58 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
59 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
60 __VA_ARGS__)\
61
62 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
63 .def_miss_action = def_miss_act,\
64 .children = (struct init_tree_node[]) {__VA_ARGS__},\
65 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
66 }
67
68 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
69 sizeof(long))
70
71 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
72
73 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
74 .caps = (long[]) {__VA_ARGS__} }
75
76 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
77 FS_CAP(flow_table_properties_nic_receive.modify_root), \
78 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
79 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
80
81 #define FS_CHAINING_CAPS_EGRESS \
82 FS_REQUIRED_CAPS( \
83 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85 FS_CAP(flow_table_properties_nic_transmit \
86 .identified_miss_table_mode), \
87 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88
89 #define FS_CHAINING_CAPS_RDMA_TX \
90 FS_REQUIRED_CAPS( \
91 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma \
94 .identified_miss_table_mode), \
95 FS_CAP(flow_table_properties_nic_transmit_rdma \
96 .flow_table_modify))
97
98 #define LEFTOVERS_NUM_LEVELS 1
99 #define LEFTOVERS_NUM_PRIOS 1
100
101 #define BY_PASS_PRIO_NUM_LEVELS 1
102 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
103 LEFTOVERS_NUM_PRIOS)
104
105 #define ETHTOOL_PRIO_NUM_LEVELS 1
106 #define ETHTOOL_NUM_PRIOS 11
107 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
108 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
109 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
110 #define KERNEL_NIC_NUM_PRIOS 1
111 /* One more level for tc */
112 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
113
114 #define KERNEL_NIC_TC_NUM_PRIOS 1
115 #define KERNEL_NIC_TC_NUM_LEVELS 2
116
117 #define ANCHOR_NUM_LEVELS 1
118 #define ANCHOR_NUM_PRIOS 1
119 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
120
121 #define OFFLOADS_MAX_FT 2
122 #define OFFLOADS_NUM_PRIOS 2
123 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
124
125 #define LAG_PRIO_NUM_LEVELS 1
126 #define LAG_NUM_PRIOS 1
127 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
128
129 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
130 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
131 #define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
132
133 struct node_caps {
134 size_t arr_sz;
135 long *caps;
136 };
137
138 static struct init_tree_node {
139 enum fs_node_type type;
140 struct init_tree_node *children;
141 int ar_size;
142 struct node_caps caps;
143 int min_ft_level;
144 int num_leaf_prios;
145 int prio;
146 int num_levels;
147 enum mlx5_flow_table_miss_action def_miss_action;
148 } root_fs = {
149 .type = FS_TYPE_NAMESPACE,
150 .ar_size = 7,
151 .children = (struct init_tree_node[]){
152 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
153 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
154 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
155 BY_PASS_PRIO_NUM_LEVELS))),
156 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
159 LAG_PRIO_NUM_LEVELS))),
160 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
163 OFFLOADS_MAX_FT))),
164 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
167 ETHTOOL_PRIO_NUM_LEVELS))),
168 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
169 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
171 KERNEL_NIC_TC_NUM_LEVELS),
172 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
173 KERNEL_NIC_PRIO_NUM_LEVELS))),
174 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
177 LEFTOVERS_NUM_LEVELS))),
178 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
181 ANCHOR_NUM_LEVELS))),
182 }
183 };
184
185 static struct init_tree_node egress_root_fs = {
186 .type = FS_TYPE_NAMESPACE,
187 #ifdef CONFIG_MLX5_IPSEC
188 .ar_size = 2,
189 #else
190 .ar_size = 1,
191 #endif
192 .children = (struct init_tree_node[]) {
193 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
194 FS_CHAINING_CAPS_EGRESS,
195 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
196 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
197 BY_PASS_PRIO_NUM_LEVELS))),
198 #ifdef CONFIG_MLX5_IPSEC
199 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
200 FS_CHAINING_CAPS_EGRESS,
201 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
202 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
203 KERNEL_TX_IPSEC_NUM_LEVELS))),
204 #endif
205 }
206 };
207
208 #define RDMA_RX_BYPASS_PRIO 0
209 #define RDMA_RX_KERNEL_PRIO 1
210 static struct init_tree_node rdma_rx_root_fs = {
211 .type = FS_TYPE_NAMESPACE,
212 .ar_size = 2,
213 .children = (struct init_tree_node[]) {
214 [RDMA_RX_BYPASS_PRIO] =
215 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
216 FS_CHAINING_CAPS,
217 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
218 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
219 BY_PASS_PRIO_NUM_LEVELS))),
220 [RDMA_RX_KERNEL_PRIO] =
221 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
222 FS_CHAINING_CAPS,
223 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
224 ADD_MULTIPLE_PRIO(1, 1))),
225 }
226 };
227
228 static struct init_tree_node rdma_tx_root_fs = {
229 .type = FS_TYPE_NAMESPACE,
230 .ar_size = 1,
231 .children = (struct init_tree_node[]) {
232 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
233 FS_CHAINING_CAPS_RDMA_TX,
234 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
235 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
236 BY_PASS_PRIO_NUM_LEVELS))),
237 }
238 };
239
240 enum fs_i_lock_class {
241 FS_LOCK_GRANDPARENT,
242 FS_LOCK_PARENT,
243 FS_LOCK_CHILD
244 };
245
246 static const struct rhashtable_params rhash_fte = {
247 .key_len = sizeof_field(struct fs_fte, val),
248 .key_offset = offsetof(struct fs_fte, val),
249 .head_offset = offsetof(struct fs_fte, hash),
250 .automatic_shrinking = true,
251 .min_size = 1,
252 };
253
254 static const struct rhashtable_params rhash_fg = {
255 .key_len = sizeof_field(struct mlx5_flow_group, mask),
256 .key_offset = offsetof(struct mlx5_flow_group, mask),
257 .head_offset = offsetof(struct mlx5_flow_group, hash),
258 .automatic_shrinking = true,
259 .min_size = 1,
260
261 };
262
263 static void del_hw_flow_table(struct fs_node *node);
264 static void del_hw_flow_group(struct fs_node *node);
265 static void del_hw_fte(struct fs_node *node);
266 static void del_sw_flow_table(struct fs_node *node);
267 static void del_sw_flow_group(struct fs_node *node);
268 static void del_sw_fte(struct fs_node *node);
269 static void del_sw_prio(struct fs_node *node);
270 static void del_sw_ns(struct fs_node *node);
271 /* Delete rule (destination) is special case that
272 * requires to lock the FTE for all the deletion process.
273 */
274 static void del_sw_hw_rule(struct fs_node *node);
275 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
276 struct mlx5_flow_destination *d2);
277 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
278 static struct mlx5_flow_rule *
279 find_flow_rule(struct fs_fte *fte,
280 struct mlx5_flow_destination *dest);
281
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))282 static void tree_init_node(struct fs_node *node,
283 void (*del_hw_func)(struct fs_node *),
284 void (*del_sw_func)(struct fs_node *))
285 {
286 refcount_set(&node->refcount, 1);
287 INIT_LIST_HEAD(&node->list);
288 INIT_LIST_HEAD(&node->children);
289 init_rwsem(&node->lock);
290 node->del_hw_func = del_hw_func;
291 node->del_sw_func = del_sw_func;
292 node->active = false;
293 }
294
tree_add_node(struct fs_node * node,struct fs_node * parent)295 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
296 {
297 if (parent)
298 refcount_inc(&parent->refcount);
299 node->parent = parent;
300
301 /* Parent is the root */
302 if (!parent)
303 node->root = node;
304 else
305 node->root = parent->root;
306 }
307
tree_get_node(struct fs_node * node)308 static int tree_get_node(struct fs_node *node)
309 {
310 return refcount_inc_not_zero(&node->refcount);
311 }
312
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)313 static void nested_down_read_ref_node(struct fs_node *node,
314 enum fs_i_lock_class class)
315 {
316 if (node) {
317 down_read_nested(&node->lock, class);
318 refcount_inc(&node->refcount);
319 }
320 }
321
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)322 static void nested_down_write_ref_node(struct fs_node *node,
323 enum fs_i_lock_class class)
324 {
325 if (node) {
326 down_write_nested(&node->lock, class);
327 refcount_inc(&node->refcount);
328 }
329 }
330
down_write_ref_node(struct fs_node * node,bool locked)331 static void down_write_ref_node(struct fs_node *node, bool locked)
332 {
333 if (node) {
334 if (!locked)
335 down_write(&node->lock);
336 refcount_inc(&node->refcount);
337 }
338 }
339
up_read_ref_node(struct fs_node * node)340 static void up_read_ref_node(struct fs_node *node)
341 {
342 refcount_dec(&node->refcount);
343 up_read(&node->lock);
344 }
345
up_write_ref_node(struct fs_node * node,bool locked)346 static void up_write_ref_node(struct fs_node *node, bool locked)
347 {
348 refcount_dec(&node->refcount);
349 if (!locked)
350 up_write(&node->lock);
351 }
352
tree_put_node(struct fs_node * node,bool locked)353 static void tree_put_node(struct fs_node *node, bool locked)
354 {
355 struct fs_node *parent_node = node->parent;
356
357 if (refcount_dec_and_test(&node->refcount)) {
358 if (node->del_hw_func)
359 node->del_hw_func(node);
360 if (parent_node) {
361 down_write_ref_node(parent_node, locked);
362 list_del_init(&node->list);
363 }
364 node->del_sw_func(node);
365 if (parent_node)
366 up_write_ref_node(parent_node, locked);
367 node = NULL;
368 }
369 if (!node && parent_node)
370 tree_put_node(parent_node, locked);
371 }
372
tree_remove_node(struct fs_node * node,bool locked)373 static int tree_remove_node(struct fs_node *node, bool locked)
374 {
375 if (refcount_read(&node->refcount) > 1) {
376 refcount_dec(&node->refcount);
377 return -EEXIST;
378 }
379 tree_put_node(node, locked);
380 return 0;
381 }
382
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)383 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
384 unsigned int prio)
385 {
386 struct fs_prio *iter_prio;
387
388 fs_for_each_prio(iter_prio, ns) {
389 if (iter_prio->prio == prio)
390 return iter_prio;
391 }
392
393 return NULL;
394 }
395
is_fwd_next_action(u32 action)396 static bool is_fwd_next_action(u32 action)
397 {
398 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
399 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
400 }
401
check_valid_spec(const struct mlx5_flow_spec * spec)402 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
403 {
404 int i;
405
406 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
407 if (spec->match_value[i] & ~spec->match_criteria[i]) {
408 pr_warn("mlx5_core: match_value differs from match_criteria\n");
409 return false;
410 }
411
412 return true;
413 }
414
find_root(struct fs_node * node)415 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
416 {
417 struct fs_node *root;
418 struct mlx5_flow_namespace *ns;
419
420 root = node->root;
421
422 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
423 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
424 return NULL;
425 }
426
427 ns = container_of(root, struct mlx5_flow_namespace, node);
428 return container_of(ns, struct mlx5_flow_root_namespace, ns);
429 }
430
get_steering(struct fs_node * node)431 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
432 {
433 struct mlx5_flow_root_namespace *root = find_root(node);
434
435 if (root)
436 return root->dev->priv.steering;
437 return NULL;
438 }
439
get_dev(struct fs_node * node)440 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
441 {
442 struct mlx5_flow_root_namespace *root = find_root(node);
443
444 if (root)
445 return root->dev;
446 return NULL;
447 }
448
del_sw_ns(struct fs_node * node)449 static void del_sw_ns(struct fs_node *node)
450 {
451 kfree(node);
452 }
453
del_sw_prio(struct fs_node * node)454 static void del_sw_prio(struct fs_node *node)
455 {
456 kfree(node);
457 }
458
del_hw_flow_table(struct fs_node * node)459 static void del_hw_flow_table(struct fs_node *node)
460 {
461 struct mlx5_flow_root_namespace *root;
462 struct mlx5_flow_table *ft;
463 struct mlx5_core_dev *dev;
464 int err;
465
466 fs_get_obj(ft, node);
467 dev = get_dev(&ft->node);
468 root = find_root(&ft->node);
469 trace_mlx5_fs_del_ft(ft);
470
471 if (node->active) {
472 err = root->cmds->destroy_flow_table(root, ft);
473 if (err)
474 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
475 }
476 }
477
del_sw_flow_table(struct fs_node * node)478 static void del_sw_flow_table(struct fs_node *node)
479 {
480 struct mlx5_flow_table *ft;
481 struct fs_prio *prio;
482
483 fs_get_obj(ft, node);
484
485 rhltable_destroy(&ft->fgs_hash);
486 if (ft->node.parent) {
487 fs_get_obj(prio, ft->node.parent);
488 prio->num_ft--;
489 }
490 kfree(ft);
491 }
492
modify_fte(struct fs_fte * fte)493 static void modify_fte(struct fs_fte *fte)
494 {
495 struct mlx5_flow_root_namespace *root;
496 struct mlx5_flow_table *ft;
497 struct mlx5_flow_group *fg;
498 struct mlx5_core_dev *dev;
499 int err;
500
501 fs_get_obj(fg, fte->node.parent);
502 fs_get_obj(ft, fg->node.parent);
503 dev = get_dev(&fte->node);
504
505 root = find_root(&ft->node);
506 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
507 if (err)
508 mlx5_core_warn(dev,
509 "%s can't del rule fg id=%d fte_index=%d\n",
510 __func__, fg->id, fte->index);
511 fte->modify_mask = 0;
512 }
513
del_sw_hw_rule(struct fs_node * node)514 static void del_sw_hw_rule(struct fs_node *node)
515 {
516 struct mlx5_flow_rule *rule;
517 struct fs_fte *fte;
518
519 fs_get_obj(rule, node);
520 fs_get_obj(fte, rule->node.parent);
521 trace_mlx5_fs_del_rule(rule);
522 if (is_fwd_next_action(rule->sw_action)) {
523 mutex_lock(&rule->dest_attr.ft->lock);
524 list_del(&rule->next_ft);
525 mutex_unlock(&rule->dest_attr.ft->lock);
526 }
527
528 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
529 --fte->dests_size) {
530 fte->modify_mask |=
531 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
532 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
533 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
534 goto out;
535 }
536
537 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
538 --fte->dests_size) {
539 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
540 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
541 goto out;
542 }
543
544 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
545 --fte->dests_size) {
546 fte->modify_mask |=
547 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
548 }
549 out:
550 kfree(rule);
551 }
552
del_hw_fte(struct fs_node * node)553 static void del_hw_fte(struct fs_node *node)
554 {
555 struct mlx5_flow_root_namespace *root;
556 struct mlx5_flow_table *ft;
557 struct mlx5_flow_group *fg;
558 struct mlx5_core_dev *dev;
559 struct fs_fte *fte;
560 int err;
561
562 fs_get_obj(fte, node);
563 fs_get_obj(fg, fte->node.parent);
564 fs_get_obj(ft, fg->node.parent);
565
566 trace_mlx5_fs_del_fte(fte);
567 dev = get_dev(&ft->node);
568 root = find_root(&ft->node);
569 if (node->active) {
570 err = root->cmds->delete_fte(root, ft, fte);
571 if (err)
572 mlx5_core_warn(dev,
573 "flow steering can't delete fte in index %d of flow group id %d\n",
574 fte->index, fg->id);
575 node->active = false;
576 }
577 }
578
del_sw_fte(struct fs_node * node)579 static void del_sw_fte(struct fs_node *node)
580 {
581 struct mlx5_flow_steering *steering = get_steering(node);
582 struct mlx5_flow_group *fg;
583 struct fs_fte *fte;
584 int err;
585
586 fs_get_obj(fte, node);
587 fs_get_obj(fg, fte->node.parent);
588
589 err = rhashtable_remove_fast(&fg->ftes_hash,
590 &fte->hash,
591 rhash_fte);
592 WARN_ON(err);
593 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
594 kmem_cache_free(steering->ftes_cache, fte);
595 }
596
del_hw_flow_group(struct fs_node * node)597 static void del_hw_flow_group(struct fs_node *node)
598 {
599 struct mlx5_flow_root_namespace *root;
600 struct mlx5_flow_group *fg;
601 struct mlx5_flow_table *ft;
602 struct mlx5_core_dev *dev;
603
604 fs_get_obj(fg, node);
605 fs_get_obj(ft, fg->node.parent);
606 dev = get_dev(&ft->node);
607 trace_mlx5_fs_del_fg(fg);
608
609 root = find_root(&ft->node);
610 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
611 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
612 fg->id, ft->id);
613 }
614
del_sw_flow_group(struct fs_node * node)615 static void del_sw_flow_group(struct fs_node *node)
616 {
617 struct mlx5_flow_steering *steering = get_steering(node);
618 struct mlx5_flow_group *fg;
619 struct mlx5_flow_table *ft;
620 int err;
621
622 fs_get_obj(fg, node);
623 fs_get_obj(ft, fg->node.parent);
624
625 rhashtable_destroy(&fg->ftes_hash);
626 ida_destroy(&fg->fte_allocator);
627 if (ft->autogroup.active &&
628 fg->max_ftes == ft->autogroup.group_size &&
629 fg->start_index < ft->autogroup.max_fte)
630 ft->autogroup.num_groups--;
631 err = rhltable_remove(&ft->fgs_hash,
632 &fg->hash,
633 rhash_fg);
634 WARN_ON(err);
635 kmem_cache_free(steering->fgs_cache, fg);
636 }
637
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)638 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
639 {
640 int index;
641 int ret;
642
643 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
644 if (index < 0)
645 return index;
646
647 fte->index = index + fg->start_index;
648 ret = rhashtable_insert_fast(&fg->ftes_hash,
649 &fte->hash,
650 rhash_fte);
651 if (ret)
652 goto err_ida_remove;
653
654 tree_add_node(&fte->node, &fg->node);
655 list_add_tail(&fte->node.list, &fg->node.children);
656 return 0;
657
658 err_ida_remove:
659 ida_free(&fg->fte_allocator, index);
660 return ret;
661 }
662
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)663 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
664 const struct mlx5_flow_spec *spec,
665 struct mlx5_flow_act *flow_act)
666 {
667 struct mlx5_flow_steering *steering = get_steering(&ft->node);
668 struct fs_fte *fte;
669
670 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
671 if (!fte)
672 return ERR_PTR(-ENOMEM);
673
674 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
675 fte->node.type = FS_TYPE_FLOW_ENTRY;
676 fte->action = *flow_act;
677 fte->flow_context = spec->flow_context;
678
679 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
680
681 return fte;
682 }
683
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)684 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
685 struct mlx5_flow_group *fg)
686 {
687 rhashtable_destroy(&fg->ftes_hash);
688 kmem_cache_free(steering->fgs_cache, fg);
689 }
690
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)691 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
692 u8 match_criteria_enable,
693 const void *match_criteria,
694 int start_index,
695 int end_index)
696 {
697 struct mlx5_flow_group *fg;
698 int ret;
699
700 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
701 if (!fg)
702 return ERR_PTR(-ENOMEM);
703
704 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
705 if (ret) {
706 kmem_cache_free(steering->fgs_cache, fg);
707 return ERR_PTR(ret);
708 }
709
710 ida_init(&fg->fte_allocator);
711 fg->mask.match_criteria_enable = match_criteria_enable;
712 memcpy(&fg->mask.match_criteria, match_criteria,
713 sizeof(fg->mask.match_criteria));
714 fg->node.type = FS_TYPE_FLOW_GROUP;
715 fg->start_index = start_index;
716 fg->max_ftes = end_index - start_index + 1;
717
718 return fg;
719 }
720
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)721 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
722 u8 match_criteria_enable,
723 const void *match_criteria,
724 int start_index,
725 int end_index,
726 struct list_head *prev)
727 {
728 struct mlx5_flow_steering *steering = get_steering(&ft->node);
729 struct mlx5_flow_group *fg;
730 int ret;
731
732 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
733 start_index, end_index);
734 if (IS_ERR(fg))
735 return fg;
736
737 /* initialize refcnt, add to parent list */
738 ret = rhltable_insert(&ft->fgs_hash,
739 &fg->hash,
740 rhash_fg);
741 if (ret) {
742 dealloc_flow_group(steering, fg);
743 return ERR_PTR(ret);
744 }
745
746 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
747 tree_add_node(&fg->node, &ft->node);
748 /* Add node to group list */
749 list_add(&fg->node.list, prev);
750 atomic_inc(&ft->node.version);
751
752 return fg;
753 }
754
alloc_flow_table(int level,u16 vport,int max_fte,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)755 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
756 enum fs_flow_table_type table_type,
757 enum fs_flow_table_op_mod op_mod,
758 u32 flags)
759 {
760 struct mlx5_flow_table *ft;
761 int ret;
762
763 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
764 if (!ft)
765 return ERR_PTR(-ENOMEM);
766
767 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
768 if (ret) {
769 kfree(ft);
770 return ERR_PTR(ret);
771 }
772
773 ft->level = level;
774 ft->node.type = FS_TYPE_FLOW_TABLE;
775 ft->op_mod = op_mod;
776 ft->type = table_type;
777 ft->vport = vport;
778 ft->max_fte = max_fte;
779 ft->flags = flags;
780 INIT_LIST_HEAD(&ft->fwd_rules);
781 mutex_init(&ft->lock);
782
783 return ft;
784 }
785
786 /* If reverse is false, then we search for the first flow table in the
787 * root sub-tree from start(closest from right), else we search for the
788 * last flow table in the root sub-tree till start(closest from left).
789 */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)790 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
791 struct list_head *start,
792 bool reverse)
793 {
794 #define list_advance_entry(pos, reverse) \
795 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
796
797 #define list_for_each_advance_continue(pos, head, reverse) \
798 for (pos = list_advance_entry(pos, reverse); \
799 &pos->list != (head); \
800 pos = list_advance_entry(pos, reverse))
801
802 struct fs_node *iter = list_entry(start, struct fs_node, list);
803 struct mlx5_flow_table *ft = NULL;
804
805 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
806 return NULL;
807
808 list_for_each_advance_continue(iter, &root->children, reverse) {
809 if (iter->type == FS_TYPE_FLOW_TABLE) {
810 fs_get_obj(ft, iter);
811 return ft;
812 }
813 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
814 if (ft)
815 return ft;
816 }
817
818 return ft;
819 }
820
821 /* If reverse is false then return the first flow table in next priority of
822 * prio in the tree, else return the last flow table in the previous priority
823 * of prio in the tree.
824 */
find_closest_ft(struct fs_prio * prio,bool reverse)825 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
826 {
827 struct mlx5_flow_table *ft = NULL;
828 struct fs_node *curr_node;
829 struct fs_node *parent;
830
831 parent = prio->node.parent;
832 curr_node = &prio->node;
833 while (!ft && parent) {
834 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
835 curr_node = parent;
836 parent = curr_node->parent;
837 }
838 return ft;
839 }
840
841 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)842 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
843 {
844 return find_closest_ft(prio, false);
845 }
846
847 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)848 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
849 {
850 return find_closest_ft(prio, true);
851 }
852
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)853 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
854 struct mlx5_flow_act *flow_act)
855 {
856 struct fs_prio *prio;
857 bool next_ns;
858
859 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
860 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
861
862 return find_next_chained_ft(prio);
863 }
864
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)865 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
866 struct fs_prio *prio,
867 struct mlx5_flow_table *ft)
868 {
869 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
870 struct mlx5_flow_table *iter;
871 int err;
872
873 fs_for_each_ft(iter, prio) {
874 err = root->cmds->modify_flow_table(root, iter, ft);
875 if (err) {
876 mlx5_core_err(dev,
877 "Failed to modify flow table id %d, type %d, err %d\n",
878 iter->id, iter->type, err);
879 /* The driver is out of sync with the FW */
880 return err;
881 }
882 }
883 return 0;
884 }
885
886 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)887 static int connect_prev_fts(struct mlx5_core_dev *dev,
888 struct mlx5_flow_table *ft,
889 struct fs_prio *prio)
890 {
891 struct mlx5_flow_table *prev_ft;
892
893 prev_ft = find_prev_chained_ft(prio);
894 if (prev_ft) {
895 struct fs_prio *prev_prio;
896
897 fs_get_obj(prev_prio, prev_ft->node.parent);
898 return connect_fts_in_prio(dev, prev_prio, ft);
899 }
900 return 0;
901 }
902
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)903 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
904 *prio)
905 {
906 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
907 struct mlx5_ft_underlay_qp *uqp;
908 int min_level = INT_MAX;
909 int err = 0;
910 u32 qpn;
911
912 if (root->root_ft)
913 min_level = root->root_ft->level;
914
915 if (ft->level >= min_level)
916 return 0;
917
918 if (list_empty(&root->underlay_qpns)) {
919 /* Don't set any QPN (zero) in case QPN list is empty */
920 qpn = 0;
921 err = root->cmds->update_root_ft(root, ft, qpn, false);
922 } else {
923 list_for_each_entry(uqp, &root->underlay_qpns, list) {
924 qpn = uqp->qpn;
925 err = root->cmds->update_root_ft(root, ft,
926 qpn, false);
927 if (err)
928 break;
929 }
930 }
931
932 if (err)
933 mlx5_core_warn(root->dev,
934 "Update root flow table of id(%u) qpn(%d) failed\n",
935 ft->id, qpn);
936 else
937 root->root_ft = ft;
938
939 return err;
940 }
941
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)942 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
943 struct mlx5_flow_destination *dest)
944 {
945 struct mlx5_flow_root_namespace *root;
946 struct mlx5_flow_table *ft;
947 struct mlx5_flow_group *fg;
948 struct fs_fte *fte;
949 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
950 int err = 0;
951
952 fs_get_obj(fte, rule->node.parent);
953 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
954 return -EINVAL;
955 down_write_ref_node(&fte->node, false);
956 fs_get_obj(fg, fte->node.parent);
957 fs_get_obj(ft, fg->node.parent);
958
959 memcpy(&rule->dest_attr, dest, sizeof(*dest));
960 root = find_root(&ft->node);
961 err = root->cmds->update_fte(root, ft, fg,
962 modify_mask, fte);
963 up_write_ref_node(&fte->node, false);
964
965 return err;
966 }
967
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)968 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
969 struct mlx5_flow_destination *new_dest,
970 struct mlx5_flow_destination *old_dest)
971 {
972 int i;
973
974 if (!old_dest) {
975 if (handle->num_rules != 1)
976 return -EINVAL;
977 return _mlx5_modify_rule_destination(handle->rule[0],
978 new_dest);
979 }
980
981 for (i = 0; i < handle->num_rules; i++) {
982 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
983 return _mlx5_modify_rule_destination(handle->rule[i],
984 new_dest);
985 }
986
987 return -EINVAL;
988 }
989
990 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)991 static int connect_fwd_rules(struct mlx5_core_dev *dev,
992 struct mlx5_flow_table *new_next_ft,
993 struct mlx5_flow_table *old_next_ft)
994 {
995 struct mlx5_flow_destination dest = {};
996 struct mlx5_flow_rule *iter;
997 int err = 0;
998
999 /* new_next_ft and old_next_ft could be NULL only
1000 * when we create/destroy the anchor flow table.
1001 */
1002 if (!new_next_ft || !old_next_ft)
1003 return 0;
1004
1005 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1006 dest.ft = new_next_ft;
1007
1008 mutex_lock(&old_next_ft->lock);
1009 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1010 mutex_unlock(&old_next_ft->lock);
1011 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1012 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1013 iter->ft->ns == new_next_ft->ns)
1014 continue;
1015
1016 err = _mlx5_modify_rule_destination(iter, &dest);
1017 if (err)
1018 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1019 new_next_ft->id);
1020 }
1021 return 0;
1022 }
1023
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1024 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1025 struct fs_prio *prio)
1026 {
1027 struct mlx5_flow_table *next_ft;
1028 int err = 0;
1029
1030 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1031
1032 if (list_empty(&prio->node.children)) {
1033 err = connect_prev_fts(dev, ft, prio);
1034 if (err)
1035 return err;
1036
1037 next_ft = find_next_chained_ft(prio);
1038 err = connect_fwd_rules(dev, ft, next_ft);
1039 if (err)
1040 return err;
1041 }
1042
1043 if (MLX5_CAP_FLOWTABLE(dev,
1044 flow_table_properties_nic_receive.modify_root))
1045 err = update_root_ft_create(ft, prio);
1046 return err;
1047 }
1048
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1049 static void list_add_flow_table(struct mlx5_flow_table *ft,
1050 struct fs_prio *prio)
1051 {
1052 struct list_head *prev = &prio->node.children;
1053 struct mlx5_flow_table *iter;
1054
1055 fs_for_each_ft(iter, prio) {
1056 if (iter->level > ft->level)
1057 break;
1058 prev = &iter->node.list;
1059 }
1060 list_add(&ft->node.list, prev);
1061 }
1062
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1063 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1064 struct mlx5_flow_table_attr *ft_attr,
1065 enum fs_flow_table_op_mod op_mod,
1066 u16 vport)
1067 {
1068 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1069 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1070 struct mlx5_flow_table *next_ft;
1071 struct fs_prio *fs_prio = NULL;
1072 struct mlx5_flow_table *ft;
1073 int log_table_sz;
1074 int err;
1075
1076 if (!root) {
1077 pr_err("mlx5: flow steering failed to find root of namespace\n");
1078 return ERR_PTR(-ENODEV);
1079 }
1080
1081 mutex_lock(&root->chain_lock);
1082 fs_prio = find_prio(ns, ft_attr->prio);
1083 if (!fs_prio) {
1084 err = -EINVAL;
1085 goto unlock_root;
1086 }
1087 if (!unmanaged) {
1088 /* The level is related to the
1089 * priority level range.
1090 */
1091 if (ft_attr->level >= fs_prio->num_levels) {
1092 err = -ENOSPC;
1093 goto unlock_root;
1094 }
1095
1096 ft_attr->level += fs_prio->start_level;
1097 }
1098
1099 /* The level is related to the
1100 * priority level range.
1101 */
1102 ft = alloc_flow_table(ft_attr->level,
1103 vport,
1104 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1105 root->table_type,
1106 op_mod, ft_attr->flags);
1107 if (IS_ERR(ft)) {
1108 err = PTR_ERR(ft);
1109 goto unlock_root;
1110 }
1111
1112 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1113 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1114 next_ft = unmanaged ? ft_attr->next_ft :
1115 find_next_chained_ft(fs_prio);
1116 ft->def_miss_action = ns->def_miss_action;
1117 ft->ns = ns;
1118 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1119 if (err)
1120 goto free_ft;
1121
1122 if (!unmanaged) {
1123 err = connect_flow_table(root->dev, ft, fs_prio);
1124 if (err)
1125 goto destroy_ft;
1126 }
1127
1128 ft->node.active = true;
1129 down_write_ref_node(&fs_prio->node, false);
1130 if (!unmanaged) {
1131 tree_add_node(&ft->node, &fs_prio->node);
1132 list_add_flow_table(ft, fs_prio);
1133 } else {
1134 ft->node.root = fs_prio->node.root;
1135 }
1136 fs_prio->num_ft++;
1137 up_write_ref_node(&fs_prio->node, false);
1138 mutex_unlock(&root->chain_lock);
1139 trace_mlx5_fs_add_ft(ft);
1140 return ft;
1141 destroy_ft:
1142 root->cmds->destroy_flow_table(root, ft);
1143 free_ft:
1144 rhltable_destroy(&ft->fgs_hash);
1145 kfree(ft);
1146 unlock_root:
1147 mutex_unlock(&root->chain_lock);
1148 return ERR_PTR(err);
1149 }
1150
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1151 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1152 struct mlx5_flow_table_attr *ft_attr)
1153 {
1154 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1155 }
1156 EXPORT_SYMBOL(mlx5_create_flow_table);
1157
1158 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1159 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1160 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1161 {
1162 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1163 }
1164
1165 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1166 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1167 int prio, u32 level)
1168 {
1169 struct mlx5_flow_table_attr ft_attr = {};
1170
1171 ft_attr.level = level;
1172 ft_attr.prio = prio;
1173 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1174 }
1175 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1176
1177 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1178 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1179 struct mlx5_flow_table_attr *ft_attr)
1180 {
1181 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1182 int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1183 int max_num_groups = ft_attr->autogroup.max_num_groups;
1184 struct mlx5_flow_table *ft;
1185
1186 if (max_num_groups > autogroups_max_fte)
1187 return ERR_PTR(-EINVAL);
1188 if (num_reserved_entries > ft_attr->max_fte)
1189 return ERR_PTR(-EINVAL);
1190
1191 ft = mlx5_create_flow_table(ns, ft_attr);
1192 if (IS_ERR(ft))
1193 return ft;
1194
1195 ft->autogroup.active = true;
1196 ft->autogroup.required_groups = max_num_groups;
1197 ft->autogroup.max_fte = autogroups_max_fte;
1198 /* We save place for flow groups in addition to max types */
1199 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1200
1201 return ft;
1202 }
1203 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1204
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1205 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1206 u32 *fg_in)
1207 {
1208 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1209 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1210 fg_in, match_criteria);
1211 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1212 fg_in,
1213 match_criteria_enable);
1214 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1215 start_flow_index);
1216 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1217 end_flow_index);
1218 struct mlx5_flow_group *fg;
1219 int err;
1220
1221 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1222 return ERR_PTR(-EPERM);
1223
1224 down_write_ref_node(&ft->node, false);
1225 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1226 start_index, end_index,
1227 ft->node.children.prev);
1228 up_write_ref_node(&ft->node, false);
1229 if (IS_ERR(fg))
1230 return fg;
1231
1232 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1233 if (err) {
1234 tree_put_node(&fg->node, false);
1235 return ERR_PTR(err);
1236 }
1237 trace_mlx5_fs_add_fg(fg);
1238 fg->node.active = true;
1239
1240 return fg;
1241 }
1242 EXPORT_SYMBOL(mlx5_create_flow_group);
1243
alloc_rule(struct mlx5_flow_destination * dest)1244 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1245 {
1246 struct mlx5_flow_rule *rule;
1247
1248 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1249 if (!rule)
1250 return NULL;
1251
1252 INIT_LIST_HEAD(&rule->next_ft);
1253 rule->node.type = FS_TYPE_FLOW_DEST;
1254 if (dest)
1255 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1256
1257 return rule;
1258 }
1259
alloc_handle(int num_rules)1260 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1261 {
1262 struct mlx5_flow_handle *handle;
1263
1264 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1265 if (!handle)
1266 return NULL;
1267
1268 handle->num_rules = num_rules;
1269
1270 return handle;
1271 }
1272
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1273 static void destroy_flow_handle(struct fs_fte *fte,
1274 struct mlx5_flow_handle *handle,
1275 struct mlx5_flow_destination *dest,
1276 int i)
1277 {
1278 for (; --i >= 0;) {
1279 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1280 fte->dests_size--;
1281 list_del(&handle->rule[i]->node.list);
1282 kfree(handle->rule[i]);
1283 }
1284 }
1285 kfree(handle);
1286 }
1287
1288 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1289 create_flow_handle(struct fs_fte *fte,
1290 struct mlx5_flow_destination *dest,
1291 int dest_num,
1292 int *modify_mask,
1293 bool *new_rule)
1294 {
1295 struct mlx5_flow_handle *handle;
1296 struct mlx5_flow_rule *rule = NULL;
1297 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1298 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1299 int type;
1300 int i = 0;
1301
1302 handle = alloc_handle((dest_num) ? dest_num : 1);
1303 if (!handle)
1304 return ERR_PTR(-ENOMEM);
1305
1306 do {
1307 if (dest) {
1308 rule = find_flow_rule(fte, dest + i);
1309 if (rule) {
1310 refcount_inc(&rule->node.refcount);
1311 goto rule_found;
1312 }
1313 }
1314
1315 *new_rule = true;
1316 rule = alloc_rule(dest + i);
1317 if (!rule)
1318 goto free_rules;
1319
1320 /* Add dest to dests list- we need flow tables to be in the
1321 * end of the list for forward to next prio rules.
1322 */
1323 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1324 if (dest &&
1325 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1326 list_add(&rule->node.list, &fte->node.children);
1327 else
1328 list_add_tail(&rule->node.list, &fte->node.children);
1329 if (dest) {
1330 fte->dests_size++;
1331
1332 type = dest[i].type ==
1333 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1334 *modify_mask |= type ? count : dst;
1335 }
1336 rule_found:
1337 handle->rule[i] = rule;
1338 } while (++i < dest_num);
1339
1340 return handle;
1341
1342 free_rules:
1343 destroy_flow_handle(fte, handle, dest, i);
1344 return ERR_PTR(-ENOMEM);
1345 }
1346
1347 /* fte should not be deleted while calling this function */
1348 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1349 add_rule_fte(struct fs_fte *fte,
1350 struct mlx5_flow_group *fg,
1351 struct mlx5_flow_destination *dest,
1352 int dest_num,
1353 bool update_action)
1354 {
1355 struct mlx5_flow_root_namespace *root;
1356 struct mlx5_flow_handle *handle;
1357 struct mlx5_flow_table *ft;
1358 int modify_mask = 0;
1359 int err;
1360 bool new_rule = false;
1361
1362 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1363 &new_rule);
1364 if (IS_ERR(handle) || !new_rule)
1365 goto out;
1366
1367 if (update_action)
1368 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1369
1370 fs_get_obj(ft, fg->node.parent);
1371 root = find_root(&fg->node);
1372 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1373 err = root->cmds->create_fte(root, ft, fg, fte);
1374 else
1375 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1376 if (err)
1377 goto free_handle;
1378
1379 fte->node.active = true;
1380 fte->status |= FS_FTE_STATUS_EXISTING;
1381 atomic_inc(&fg->node.version);
1382
1383 out:
1384 return handle;
1385
1386 free_handle:
1387 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1388 return ERR_PTR(err);
1389 }
1390
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1391 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1392 const struct mlx5_flow_spec *spec)
1393 {
1394 struct list_head *prev = &ft->node.children;
1395 u32 max_fte = ft->autogroup.max_fte;
1396 unsigned int candidate_index = 0;
1397 unsigned int group_size = 0;
1398 struct mlx5_flow_group *fg;
1399
1400 if (!ft->autogroup.active)
1401 return ERR_PTR(-ENOENT);
1402
1403 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1404 group_size = ft->autogroup.group_size;
1405
1406 /* max_fte == ft->autogroup.max_types */
1407 if (group_size == 0)
1408 group_size = 1;
1409
1410 /* sorted by start_index */
1411 fs_for_each_fg(fg, ft) {
1412 if (candidate_index + group_size > fg->start_index)
1413 candidate_index = fg->start_index + fg->max_ftes;
1414 else
1415 break;
1416 prev = &fg->node.list;
1417 }
1418
1419 if (candidate_index + group_size > max_fte)
1420 return ERR_PTR(-ENOSPC);
1421
1422 fg = alloc_insert_flow_group(ft,
1423 spec->match_criteria_enable,
1424 spec->match_criteria,
1425 candidate_index,
1426 candidate_index + group_size - 1,
1427 prev);
1428 if (IS_ERR(fg))
1429 goto out;
1430
1431 if (group_size == ft->autogroup.group_size)
1432 ft->autogroup.num_groups++;
1433
1434 out:
1435 return fg;
1436 }
1437
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1438 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1439 struct mlx5_flow_group *fg)
1440 {
1441 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1442 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1443 void *match_criteria_addr;
1444 u8 src_esw_owner_mask_on;
1445 void *misc;
1446 int err;
1447 u32 *in;
1448
1449 in = kvzalloc(inlen, GFP_KERNEL);
1450 if (!in)
1451 return -ENOMEM;
1452
1453 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1454 fg->mask.match_criteria_enable);
1455 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1456 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1457 fg->max_ftes - 1);
1458
1459 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1460 misc_parameters);
1461 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1462 source_eswitch_owner_vhca_id);
1463 MLX5_SET(create_flow_group_in, in,
1464 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1465
1466 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1467 in, match_criteria);
1468 memcpy(match_criteria_addr, fg->mask.match_criteria,
1469 sizeof(fg->mask.match_criteria));
1470
1471 err = root->cmds->create_flow_group(root, ft, in, fg);
1472 if (!err) {
1473 fg->node.active = true;
1474 trace_mlx5_fs_add_fg(fg);
1475 }
1476
1477 kvfree(in);
1478 return err;
1479 }
1480
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1481 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1482 struct mlx5_flow_destination *d2)
1483 {
1484 if (d1->type == d2->type) {
1485 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1486 d1->vport.num == d2->vport.num &&
1487 d1->vport.flags == d2->vport.flags &&
1488 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1489 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1490 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1491 (d1->vport.pkt_reformat->id ==
1492 d2->vport.pkt_reformat->id) : true)) ||
1493 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1494 d1->ft == d2->ft) ||
1495 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1496 d1->tir_num == d2->tir_num) ||
1497 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1498 d1->ft_num == d2->ft_num))
1499 return true;
1500 }
1501
1502 return false;
1503 }
1504
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1505 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1506 struct mlx5_flow_destination *dest)
1507 {
1508 struct mlx5_flow_rule *rule;
1509
1510 list_for_each_entry(rule, &fte->node.children, node.list) {
1511 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1512 return rule;
1513 }
1514 return NULL;
1515 }
1516
check_conflicting_actions(u32 action1,u32 action2)1517 static bool check_conflicting_actions(u32 action1, u32 action2)
1518 {
1519 u32 xored_actions = action1 ^ action2;
1520
1521 /* if one rule only wants to count, it's ok */
1522 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1523 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1524 return false;
1525
1526 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1527 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1528 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1529 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1530 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1531 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1532 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1533 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1534 return true;
1535
1536 return false;
1537 }
1538
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1539 static int check_conflicting_ftes(struct fs_fte *fte,
1540 const struct mlx5_flow_context *flow_context,
1541 const struct mlx5_flow_act *flow_act)
1542 {
1543 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1544 mlx5_core_warn(get_dev(&fte->node),
1545 "Found two FTEs with conflicting actions\n");
1546 return -EEXIST;
1547 }
1548
1549 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1550 fte->flow_context.flow_tag != flow_context->flow_tag) {
1551 mlx5_core_warn(get_dev(&fte->node),
1552 "FTE flow tag %u already exists with different flow tag %u\n",
1553 fte->flow_context.flow_tag,
1554 flow_context->flow_tag);
1555 return -EEXIST;
1556 }
1557
1558 return 0;
1559 }
1560
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1561 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1562 const struct mlx5_flow_spec *spec,
1563 struct mlx5_flow_act *flow_act,
1564 struct mlx5_flow_destination *dest,
1565 int dest_num,
1566 struct fs_fte *fte)
1567 {
1568 struct mlx5_flow_handle *handle;
1569 int old_action;
1570 int i;
1571 int ret;
1572
1573 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1574 if (ret)
1575 return ERR_PTR(ret);
1576
1577 old_action = fte->action.action;
1578 fte->action.action |= flow_act->action;
1579 handle = add_rule_fte(fte, fg, dest, dest_num,
1580 old_action != flow_act->action);
1581 if (IS_ERR(handle)) {
1582 fte->action.action = old_action;
1583 return handle;
1584 }
1585 trace_mlx5_fs_set_fte(fte, false);
1586
1587 for (i = 0; i < handle->num_rules; i++) {
1588 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1589 tree_add_node(&handle->rule[i]->node, &fte->node);
1590 trace_mlx5_fs_add_rule(handle->rule[i]);
1591 }
1592 }
1593 return handle;
1594 }
1595
counter_is_valid(u32 action)1596 static bool counter_is_valid(u32 action)
1597 {
1598 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1599 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1600 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1601 }
1602
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1603 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1604 struct mlx5_flow_act *flow_act,
1605 struct mlx5_flow_table *ft)
1606 {
1607 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1608 u32 action = flow_act->action;
1609
1610 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1611 return counter_is_valid(action);
1612
1613 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1614 return true;
1615
1616 if (ignore_level) {
1617 if (ft->type != FS_FT_FDB &&
1618 ft->type != FS_FT_NIC_RX)
1619 return false;
1620
1621 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1622 ft->type != dest->ft->type)
1623 return false;
1624 }
1625
1626 if (!dest || ((dest->type ==
1627 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1628 (dest->ft->level <= ft->level && !ignore_level)))
1629 return false;
1630 return true;
1631 }
1632
1633 struct match_list {
1634 struct list_head list;
1635 struct mlx5_flow_group *g;
1636 };
1637
free_match_list(struct match_list * head,bool ft_locked)1638 static void free_match_list(struct match_list *head, bool ft_locked)
1639 {
1640 struct match_list *iter, *match_tmp;
1641
1642 list_for_each_entry_safe(iter, match_tmp, &head->list,
1643 list) {
1644 tree_put_node(&iter->g->node, ft_locked);
1645 list_del(&iter->list);
1646 kfree(iter);
1647 }
1648 }
1649
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,bool ft_locked)1650 static int build_match_list(struct match_list *match_head,
1651 struct mlx5_flow_table *ft,
1652 const struct mlx5_flow_spec *spec,
1653 bool ft_locked)
1654 {
1655 struct rhlist_head *tmp, *list;
1656 struct mlx5_flow_group *g;
1657 int err = 0;
1658
1659 rcu_read_lock();
1660 INIT_LIST_HEAD(&match_head->list);
1661 /* Collect all fgs which has a matching match_criteria */
1662 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1663 /* RCU is atomic, we can't execute FW commands here */
1664 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1665 struct match_list *curr_match;
1666
1667 if (unlikely(!tree_get_node(&g->node)))
1668 continue;
1669
1670 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1671 if (!curr_match) {
1672 free_match_list(match_head, ft_locked);
1673 err = -ENOMEM;
1674 goto out;
1675 }
1676 curr_match->g = g;
1677 list_add_tail(&curr_match->list, &match_head->list);
1678 }
1679 out:
1680 rcu_read_unlock();
1681 return err;
1682 }
1683
matched_fgs_get_version(struct list_head * match_head)1684 static u64 matched_fgs_get_version(struct list_head *match_head)
1685 {
1686 struct match_list *iter;
1687 u64 version = 0;
1688
1689 list_for_each_entry(iter, match_head, list)
1690 version += (u64)atomic_read(&iter->g->node.version);
1691 return version;
1692 }
1693
1694 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1695 lookup_fte_locked(struct mlx5_flow_group *g,
1696 const u32 *match_value,
1697 bool take_write)
1698 {
1699 struct fs_fte *fte_tmp;
1700
1701 if (take_write)
1702 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1703 else
1704 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1705 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1706 rhash_fte);
1707 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1708 fte_tmp = NULL;
1709 goto out;
1710 }
1711 if (!fte_tmp->node.active) {
1712 tree_put_node(&fte_tmp->node, false);
1713 fte_tmp = NULL;
1714 goto out;
1715 }
1716
1717 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1718 out:
1719 if (take_write)
1720 up_write_ref_node(&g->node, false);
1721 else
1722 up_read_ref_node(&g->node);
1723 return fte_tmp;
1724 }
1725
1726 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1727 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1728 struct list_head *match_head,
1729 const struct mlx5_flow_spec *spec,
1730 struct mlx5_flow_act *flow_act,
1731 struct mlx5_flow_destination *dest,
1732 int dest_num,
1733 int ft_version)
1734 {
1735 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1736 struct mlx5_flow_group *g;
1737 struct mlx5_flow_handle *rule;
1738 struct match_list *iter;
1739 bool take_write = false;
1740 struct fs_fte *fte;
1741 u64 version = 0;
1742 int err;
1743
1744 fte = alloc_fte(ft, spec, flow_act);
1745 if (IS_ERR(fte))
1746 return ERR_PTR(-ENOMEM);
1747
1748 search_again_locked:
1749 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1750 goto skip_search;
1751 version = matched_fgs_get_version(match_head);
1752 /* Try to find an fte with identical match value and attempt update its
1753 * action.
1754 */
1755 list_for_each_entry(iter, match_head, list) {
1756 struct fs_fte *fte_tmp;
1757
1758 g = iter->g;
1759 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1760 if (!fte_tmp)
1761 continue;
1762 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1763 /* No error check needed here, because insert_fte() is not called */
1764 up_write_ref_node(&fte_tmp->node, false);
1765 tree_put_node(&fte_tmp->node, false);
1766 kmem_cache_free(steering->ftes_cache, fte);
1767 return rule;
1768 }
1769
1770 skip_search:
1771 /* No group with matching fte found, or we skipped the search.
1772 * Try to add a new fte to any matching fg.
1773 */
1774
1775 /* Check the ft version, for case that new flow group
1776 * was added while the fgs weren't locked
1777 */
1778 if (atomic_read(&ft->node.version) != ft_version) {
1779 rule = ERR_PTR(-EAGAIN);
1780 goto out;
1781 }
1782
1783 /* Check the fgs version. If version have changed it could be that an
1784 * FTE with the same match value was added while the fgs weren't
1785 * locked.
1786 */
1787 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1788 version != matched_fgs_get_version(match_head)) {
1789 take_write = true;
1790 goto search_again_locked;
1791 }
1792
1793 list_for_each_entry(iter, match_head, list) {
1794 g = iter->g;
1795
1796 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1797
1798 if (!g->node.active) {
1799 up_write_ref_node(&g->node, false);
1800 continue;
1801 }
1802
1803 err = insert_fte(g, fte);
1804 if (err) {
1805 up_write_ref_node(&g->node, false);
1806 if (err == -ENOSPC)
1807 continue;
1808 kmem_cache_free(steering->ftes_cache, fte);
1809 return ERR_PTR(err);
1810 }
1811
1812 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1813 up_write_ref_node(&g->node, false);
1814 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1815 up_write_ref_node(&fte->node, false);
1816 if (IS_ERR(rule))
1817 tree_put_node(&fte->node, false);
1818 return rule;
1819 }
1820 rule = ERR_PTR(-ENOENT);
1821 out:
1822 kmem_cache_free(steering->ftes_cache, fte);
1823 return rule;
1824 }
1825
1826 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1827 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1828 const struct mlx5_flow_spec *spec,
1829 struct mlx5_flow_act *flow_act,
1830 struct mlx5_flow_destination *dest,
1831 int dest_num)
1832
1833 {
1834 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1835 struct mlx5_flow_handle *rule;
1836 struct match_list match_head;
1837 struct mlx5_flow_group *g;
1838 bool take_write = false;
1839 struct fs_fte *fte;
1840 int version;
1841 int err;
1842 int i;
1843
1844 if (!check_valid_spec(spec))
1845 return ERR_PTR(-EINVAL);
1846
1847 for (i = 0; i < dest_num; i++) {
1848 if (!dest_is_valid(&dest[i], flow_act, ft))
1849 return ERR_PTR(-EINVAL);
1850 }
1851 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1852 search_again_locked:
1853 version = atomic_read(&ft->node.version);
1854
1855 /* Collect all fgs which has a matching match_criteria */
1856 err = build_match_list(&match_head, ft, spec, take_write);
1857 if (err) {
1858 if (take_write)
1859 up_write_ref_node(&ft->node, false);
1860 else
1861 up_read_ref_node(&ft->node);
1862 return ERR_PTR(err);
1863 }
1864
1865 if (!take_write)
1866 up_read_ref_node(&ft->node);
1867
1868 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1869 dest_num, version);
1870 free_match_list(&match_head, take_write);
1871 if (!IS_ERR(rule) ||
1872 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1873 if (take_write)
1874 up_write_ref_node(&ft->node, false);
1875 return rule;
1876 }
1877
1878 if (!take_write) {
1879 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1880 take_write = true;
1881 }
1882
1883 if (PTR_ERR(rule) == -EAGAIN ||
1884 version != atomic_read(&ft->node.version))
1885 goto search_again_locked;
1886
1887 g = alloc_auto_flow_group(ft, spec);
1888 if (IS_ERR(g)) {
1889 rule = ERR_CAST(g);
1890 up_write_ref_node(&ft->node, false);
1891 return rule;
1892 }
1893
1894 fte = alloc_fte(ft, spec, flow_act);
1895 if (IS_ERR(fte)) {
1896 up_write_ref_node(&ft->node, false);
1897 err = PTR_ERR(fte);
1898 goto err_alloc_fte;
1899 }
1900
1901 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1902 up_write_ref_node(&ft->node, false);
1903
1904 err = create_auto_flow_group(ft, g);
1905 if (err)
1906 goto err_release_fg;
1907
1908 err = insert_fte(g, fte);
1909 if (err)
1910 goto err_release_fg;
1911
1912 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1913 up_write_ref_node(&g->node, false);
1914 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1915 up_write_ref_node(&fte->node, false);
1916 if (IS_ERR(rule))
1917 tree_put_node(&fte->node, false);
1918 tree_put_node(&g->node, false);
1919 return rule;
1920
1921 err_release_fg:
1922 up_write_ref_node(&g->node, false);
1923 kmem_cache_free(steering->ftes_cache, fte);
1924 err_alloc_fte:
1925 tree_put_node(&g->node, false);
1926 return ERR_PTR(err);
1927 }
1928
fwd_next_prio_supported(struct mlx5_flow_table * ft)1929 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1930 {
1931 return ((ft->type == FS_FT_NIC_RX) &&
1932 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1933 }
1934
1935 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1936 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1937 const struct mlx5_flow_spec *spec,
1938 struct mlx5_flow_act *flow_act,
1939 struct mlx5_flow_destination *dest,
1940 int num_dest)
1941 {
1942 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1943 static const struct mlx5_flow_spec zero_spec = {};
1944 struct mlx5_flow_destination *gen_dest = NULL;
1945 struct mlx5_flow_table *next_ft = NULL;
1946 struct mlx5_flow_handle *handle = NULL;
1947 u32 sw_action = flow_act->action;
1948 int i;
1949
1950 if (!spec)
1951 spec = &zero_spec;
1952
1953 if (!is_fwd_next_action(sw_action))
1954 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1955
1956 if (!fwd_next_prio_supported(ft))
1957 return ERR_PTR(-EOPNOTSUPP);
1958
1959 mutex_lock(&root->chain_lock);
1960 next_ft = find_next_fwd_ft(ft, flow_act);
1961 if (!next_ft) {
1962 handle = ERR_PTR(-EOPNOTSUPP);
1963 goto unlock;
1964 }
1965
1966 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
1967 GFP_KERNEL);
1968 if (!gen_dest) {
1969 handle = ERR_PTR(-ENOMEM);
1970 goto unlock;
1971 }
1972 for (i = 0; i < num_dest; i++)
1973 gen_dest[i] = dest[i];
1974 gen_dest[i].type =
1975 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1976 gen_dest[i].ft = next_ft;
1977 dest = gen_dest;
1978 num_dest++;
1979 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
1980 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
1981 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1982 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1983 if (IS_ERR(handle))
1984 goto unlock;
1985
1986 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
1987 mutex_lock(&next_ft->lock);
1988 list_add(&handle->rule[num_dest - 1]->next_ft,
1989 &next_ft->fwd_rules);
1990 mutex_unlock(&next_ft->lock);
1991 handle->rule[num_dest - 1]->sw_action = sw_action;
1992 handle->rule[num_dest - 1]->ft = ft;
1993 }
1994 unlock:
1995 mutex_unlock(&root->chain_lock);
1996 kfree(gen_dest);
1997 return handle;
1998 }
1999 EXPORT_SYMBOL(mlx5_add_flow_rules);
2000
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2001 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2002 {
2003 struct fs_fte *fte;
2004 int i;
2005
2006 /* In order to consolidate the HW changes we lock the FTE for other
2007 * changes, and increase its refcount, in order not to perform the
2008 * "del" functions of the FTE. Will handle them here.
2009 * The removal of the rules is done under locked FTE.
2010 * After removing all the handle's rules, if there are remaining
2011 * rules, it means we just need to modify the FTE in FW, and
2012 * unlock/decrease the refcount we increased before.
2013 * Otherwise, it means the FTE should be deleted. First delete the
2014 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2015 * the FTE, which will handle the last decrease of the refcount, as
2016 * well as required handling of its parent.
2017 */
2018 fs_get_obj(fte, handle->rule[0]->node.parent);
2019 down_write_ref_node(&fte->node, false);
2020 for (i = handle->num_rules - 1; i >= 0; i--)
2021 tree_remove_node(&handle->rule[i]->node, true);
2022 if (fte->dests_size) {
2023 if (fte->modify_mask)
2024 modify_fte(fte);
2025 up_write_ref_node(&fte->node, false);
2026 } else if (list_empty(&fte->node.children)) {
2027 del_hw_fte(&fte->node);
2028 /* Avoid double call to del_hw_fte */
2029 fte->node.del_hw_func = NULL;
2030 up_write_ref_node(&fte->node, false);
2031 tree_put_node(&fte->node, false);
2032 }
2033 kfree(handle);
2034 }
2035 EXPORT_SYMBOL(mlx5_del_flow_rules);
2036
2037 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2038 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2039 {
2040 struct fs_prio *prio;
2041
2042 fs_get_obj(prio, ft->node.parent);
2043
2044 if (!list_is_last(&ft->node.list, &prio->node.children))
2045 return list_next_entry(ft, node.list);
2046 return find_next_chained_ft(prio);
2047 }
2048
update_root_ft_destroy(struct mlx5_flow_table * ft)2049 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2050 {
2051 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2052 struct mlx5_ft_underlay_qp *uqp;
2053 struct mlx5_flow_table *new_root_ft = NULL;
2054 int err = 0;
2055 u32 qpn;
2056
2057 if (root->root_ft != ft)
2058 return 0;
2059
2060 new_root_ft = find_next_ft(ft);
2061 if (!new_root_ft) {
2062 root->root_ft = NULL;
2063 return 0;
2064 }
2065
2066 if (list_empty(&root->underlay_qpns)) {
2067 /* Don't set any QPN (zero) in case QPN list is empty */
2068 qpn = 0;
2069 err = root->cmds->update_root_ft(root, new_root_ft,
2070 qpn, false);
2071 } else {
2072 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2073 qpn = uqp->qpn;
2074 err = root->cmds->update_root_ft(root,
2075 new_root_ft, qpn,
2076 false);
2077 if (err)
2078 break;
2079 }
2080 }
2081
2082 if (err)
2083 mlx5_core_warn(root->dev,
2084 "Update root flow table of id(%u) qpn(%d) failed\n",
2085 ft->id, qpn);
2086 else
2087 root->root_ft = new_root_ft;
2088
2089 return 0;
2090 }
2091
2092 /* Connect flow table from previous priority to
2093 * the next flow table.
2094 */
disconnect_flow_table(struct mlx5_flow_table * ft)2095 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2096 {
2097 struct mlx5_core_dev *dev = get_dev(&ft->node);
2098 struct mlx5_flow_table *next_ft;
2099 struct fs_prio *prio;
2100 int err = 0;
2101
2102 err = update_root_ft_destroy(ft);
2103 if (err)
2104 return err;
2105
2106 fs_get_obj(prio, ft->node.parent);
2107 if (!(list_first_entry(&prio->node.children,
2108 struct mlx5_flow_table,
2109 node.list) == ft))
2110 return 0;
2111
2112 next_ft = find_next_chained_ft(prio);
2113 err = connect_fwd_rules(dev, next_ft, ft);
2114 if (err)
2115 return err;
2116
2117 err = connect_prev_fts(dev, next_ft, prio);
2118 if (err)
2119 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2120 ft->id);
2121 return err;
2122 }
2123
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2124 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2125 {
2126 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2127 int err = 0;
2128
2129 mutex_lock(&root->chain_lock);
2130 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2131 err = disconnect_flow_table(ft);
2132 if (err) {
2133 mutex_unlock(&root->chain_lock);
2134 return err;
2135 }
2136 if (tree_remove_node(&ft->node, false))
2137 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2138 ft->id);
2139 mutex_unlock(&root->chain_lock);
2140
2141 return err;
2142 }
2143 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2144
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2145 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2146 {
2147 if (tree_remove_node(&fg->node, false))
2148 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2149 fg->id);
2150 }
2151 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2152
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2153 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2154 int n)
2155 {
2156 struct mlx5_flow_steering *steering = dev->priv.steering;
2157
2158 if (!steering || !steering->fdb_sub_ns)
2159 return NULL;
2160
2161 return steering->fdb_sub_ns[n];
2162 }
2163 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2164
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2165 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2166 enum mlx5_flow_namespace_type type)
2167 {
2168 struct mlx5_flow_steering *steering = dev->priv.steering;
2169 struct mlx5_flow_root_namespace *root_ns;
2170 int prio = 0;
2171 struct fs_prio *fs_prio;
2172 struct mlx5_flow_namespace *ns;
2173
2174 if (!steering)
2175 return NULL;
2176
2177 switch (type) {
2178 case MLX5_FLOW_NAMESPACE_FDB:
2179 if (steering->fdb_root_ns)
2180 return &steering->fdb_root_ns->ns;
2181 return NULL;
2182 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2183 if (steering->sniffer_rx_root_ns)
2184 return &steering->sniffer_rx_root_ns->ns;
2185 return NULL;
2186 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2187 if (steering->sniffer_tx_root_ns)
2188 return &steering->sniffer_tx_root_ns->ns;
2189 return NULL;
2190 default:
2191 break;
2192 }
2193
2194 if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2195 type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2196 root_ns = steering->egress_root_ns;
2197 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2198 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2199 root_ns = steering->rdma_rx_root_ns;
2200 prio = RDMA_RX_BYPASS_PRIO;
2201 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2202 root_ns = steering->rdma_rx_root_ns;
2203 prio = RDMA_RX_KERNEL_PRIO;
2204 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2205 root_ns = steering->rdma_tx_root_ns;
2206 } else { /* Must be NIC RX */
2207 root_ns = steering->root_ns;
2208 prio = type;
2209 }
2210
2211 if (!root_ns)
2212 return NULL;
2213
2214 fs_prio = find_prio(&root_ns->ns, prio);
2215 if (!fs_prio)
2216 return NULL;
2217
2218 ns = list_first_entry(&fs_prio->node.children,
2219 typeof(*ns),
2220 node.list);
2221
2222 return ns;
2223 }
2224 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2225
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2226 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2227 enum mlx5_flow_namespace_type type,
2228 int vport)
2229 {
2230 struct mlx5_flow_steering *steering = dev->priv.steering;
2231
2232 if (!steering)
2233 return NULL;
2234
2235 switch (type) {
2236 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2237 if (vport >= steering->esw_egress_acl_vports)
2238 return NULL;
2239 if (steering->esw_egress_root_ns &&
2240 steering->esw_egress_root_ns[vport])
2241 return &steering->esw_egress_root_ns[vport]->ns;
2242 else
2243 return NULL;
2244 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2245 if (vport >= steering->esw_ingress_acl_vports)
2246 return NULL;
2247 if (steering->esw_ingress_root_ns &&
2248 steering->esw_ingress_root_ns[vport])
2249 return &steering->esw_ingress_root_ns[vport]->ns;
2250 else
2251 return NULL;
2252 default:
2253 return NULL;
2254 }
2255 }
2256
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2257 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2258 unsigned int prio,
2259 int num_levels,
2260 enum fs_node_type type)
2261 {
2262 struct fs_prio *fs_prio;
2263
2264 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2265 if (!fs_prio)
2266 return ERR_PTR(-ENOMEM);
2267
2268 fs_prio->node.type = type;
2269 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2270 tree_add_node(&fs_prio->node, &ns->node);
2271 fs_prio->num_levels = num_levels;
2272 fs_prio->prio = prio;
2273 list_add_tail(&fs_prio->node.list, &ns->node.children);
2274
2275 return fs_prio;
2276 }
2277
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2278 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2279 unsigned int prio,
2280 int num_levels)
2281 {
2282 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2283 }
2284
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2285 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2286 unsigned int prio, int num_levels)
2287 {
2288 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2289 }
2290
fs_init_namespace(struct mlx5_flow_namespace * ns)2291 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2292 *ns)
2293 {
2294 ns->node.type = FS_TYPE_NAMESPACE;
2295
2296 return ns;
2297 }
2298
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2299 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2300 int def_miss_act)
2301 {
2302 struct mlx5_flow_namespace *ns;
2303
2304 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2305 if (!ns)
2306 return ERR_PTR(-ENOMEM);
2307
2308 fs_init_namespace(ns);
2309 ns->def_miss_action = def_miss_act;
2310 tree_init_node(&ns->node, NULL, del_sw_ns);
2311 tree_add_node(&ns->node, &prio->node);
2312 list_add_tail(&ns->node.list, &prio->node.children);
2313
2314 return ns;
2315 }
2316
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2317 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2318 struct init_tree_node *prio_metadata)
2319 {
2320 struct fs_prio *fs_prio;
2321 int i;
2322
2323 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2324 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2325 if (IS_ERR(fs_prio))
2326 return PTR_ERR(fs_prio);
2327 }
2328 return 0;
2329 }
2330
2331 #define FLOW_TABLE_BIT_SZ 1
2332 #define GET_FLOW_TABLE_CAP(dev, offset) \
2333 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2334 offset / 32)) >> \
2335 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2336 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2337 {
2338 int i;
2339
2340 for (i = 0; i < caps->arr_sz; i++) {
2341 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2342 return false;
2343 }
2344 return true;
2345 }
2346
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2347 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2348 struct init_tree_node *init_node,
2349 struct fs_node *fs_parent_node,
2350 struct init_tree_node *init_parent_node,
2351 int prio)
2352 {
2353 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2354 flow_table_properties_nic_receive.
2355 max_ft_level);
2356 struct mlx5_flow_namespace *fs_ns;
2357 struct fs_prio *fs_prio;
2358 struct fs_node *base;
2359 int i;
2360 int err;
2361
2362 if (init_node->type == FS_TYPE_PRIO) {
2363 if ((init_node->min_ft_level > max_ft_level) ||
2364 !has_required_caps(steering->dev, &init_node->caps))
2365 return 0;
2366
2367 fs_get_obj(fs_ns, fs_parent_node);
2368 if (init_node->num_leaf_prios)
2369 return create_leaf_prios(fs_ns, prio, init_node);
2370 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2371 if (IS_ERR(fs_prio))
2372 return PTR_ERR(fs_prio);
2373 base = &fs_prio->node;
2374 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2375 fs_get_obj(fs_prio, fs_parent_node);
2376 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2377 if (IS_ERR(fs_ns))
2378 return PTR_ERR(fs_ns);
2379 base = &fs_ns->node;
2380 } else {
2381 return -EINVAL;
2382 }
2383 prio = 0;
2384 for (i = 0; i < init_node->ar_size; i++) {
2385 err = init_root_tree_recursive(steering, &init_node->children[i],
2386 base, init_node, prio);
2387 if (err)
2388 return err;
2389 if (init_node->children[i].type == FS_TYPE_PRIO &&
2390 init_node->children[i].num_leaf_prios) {
2391 prio += init_node->children[i].num_leaf_prios;
2392 }
2393 }
2394
2395 return 0;
2396 }
2397
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2398 static int init_root_tree(struct mlx5_flow_steering *steering,
2399 struct init_tree_node *init_node,
2400 struct fs_node *fs_parent_node)
2401 {
2402 int err;
2403 int i;
2404
2405 for (i = 0; i < init_node->ar_size; i++) {
2406 err = init_root_tree_recursive(steering, &init_node->children[i],
2407 fs_parent_node,
2408 init_node, i);
2409 if (err)
2410 return err;
2411 }
2412 return 0;
2413 }
2414
del_sw_root_ns(struct fs_node * node)2415 static void del_sw_root_ns(struct fs_node *node)
2416 {
2417 struct mlx5_flow_root_namespace *root_ns;
2418 struct mlx5_flow_namespace *ns;
2419
2420 fs_get_obj(ns, node);
2421 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2422 mutex_destroy(&root_ns->chain_lock);
2423 kfree(node);
2424 }
2425
2426 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2427 *create_root_ns(struct mlx5_flow_steering *steering,
2428 enum fs_flow_table_type table_type)
2429 {
2430 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2431 struct mlx5_flow_root_namespace *root_ns;
2432 struct mlx5_flow_namespace *ns;
2433
2434 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2435 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2436 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2437
2438 /* Create the root namespace */
2439 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2440 if (!root_ns)
2441 return NULL;
2442
2443 root_ns->dev = steering->dev;
2444 root_ns->table_type = table_type;
2445 root_ns->cmds = cmds;
2446
2447 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2448
2449 ns = &root_ns->ns;
2450 fs_init_namespace(ns);
2451 mutex_init(&root_ns->chain_lock);
2452 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2453 tree_add_node(&ns->node, NULL);
2454
2455 return root_ns;
2456 }
2457
2458 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2459
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2460 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2461 {
2462 struct fs_prio *prio;
2463
2464 fs_for_each_prio(prio, ns) {
2465 /* This updates prio start_level and num_levels */
2466 set_prio_attrs_in_prio(prio, acc_level);
2467 acc_level += prio->num_levels;
2468 }
2469 return acc_level;
2470 }
2471
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2472 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2473 {
2474 struct mlx5_flow_namespace *ns;
2475 int acc_level_ns = acc_level;
2476
2477 prio->start_level = acc_level;
2478 fs_for_each_ns(ns, prio) {
2479 /* This updates start_level and num_levels of ns's priority descendants */
2480 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2481
2482 /* If this a prio with chains, and we can jump from one chain
2483 * (namepsace) to another, so we accumulate the levels
2484 */
2485 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2486 acc_level = acc_level_ns;
2487 }
2488
2489 if (!prio->num_levels)
2490 prio->num_levels = acc_level_ns - prio->start_level;
2491 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2492 }
2493
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2494 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2495 {
2496 struct mlx5_flow_namespace *ns = &root_ns->ns;
2497 struct fs_prio *prio;
2498 int start_level = 0;
2499
2500 fs_for_each_prio(prio, ns) {
2501 set_prio_attrs_in_prio(prio, start_level);
2502 start_level += prio->num_levels;
2503 }
2504 }
2505
2506 #define ANCHOR_PRIO 0
2507 #define ANCHOR_SIZE 1
2508 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2509 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2510 {
2511 struct mlx5_flow_namespace *ns = NULL;
2512 struct mlx5_flow_table_attr ft_attr = {};
2513 struct mlx5_flow_table *ft;
2514
2515 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2516 if (WARN_ON(!ns))
2517 return -EINVAL;
2518
2519 ft_attr.max_fte = ANCHOR_SIZE;
2520 ft_attr.level = ANCHOR_LEVEL;
2521 ft_attr.prio = ANCHOR_PRIO;
2522
2523 ft = mlx5_create_flow_table(ns, &ft_attr);
2524 if (IS_ERR(ft)) {
2525 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2526 return PTR_ERR(ft);
2527 }
2528 return 0;
2529 }
2530
init_root_ns(struct mlx5_flow_steering * steering)2531 static int init_root_ns(struct mlx5_flow_steering *steering)
2532 {
2533 int err;
2534
2535 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2536 if (!steering->root_ns)
2537 return -ENOMEM;
2538
2539 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2540 if (err)
2541 goto out_err;
2542
2543 set_prio_attrs(steering->root_ns);
2544 err = create_anchor_flow_table(steering);
2545 if (err)
2546 goto out_err;
2547
2548 return 0;
2549
2550 out_err:
2551 cleanup_root_ns(steering->root_ns);
2552 steering->root_ns = NULL;
2553 return err;
2554 }
2555
clean_tree(struct fs_node * node)2556 static void clean_tree(struct fs_node *node)
2557 {
2558 if (node) {
2559 struct fs_node *iter;
2560 struct fs_node *temp;
2561
2562 tree_get_node(node);
2563 list_for_each_entry_safe(iter, temp, &node->children, list)
2564 clean_tree(iter);
2565 tree_put_node(node, false);
2566 tree_remove_node(node, false);
2567 }
2568 }
2569
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2570 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2571 {
2572 if (!root_ns)
2573 return;
2574
2575 clean_tree(&root_ns->ns.node);
2576 }
2577
mlx5_cleanup_fs(struct mlx5_core_dev * dev)2578 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2579 {
2580 struct mlx5_flow_steering *steering = dev->priv.steering;
2581
2582 cleanup_root_ns(steering->root_ns);
2583 cleanup_root_ns(steering->fdb_root_ns);
2584 steering->fdb_root_ns = NULL;
2585 kfree(steering->fdb_sub_ns);
2586 steering->fdb_sub_ns = NULL;
2587 cleanup_root_ns(steering->sniffer_rx_root_ns);
2588 cleanup_root_ns(steering->sniffer_tx_root_ns);
2589 cleanup_root_ns(steering->rdma_rx_root_ns);
2590 cleanup_root_ns(steering->rdma_tx_root_ns);
2591 cleanup_root_ns(steering->egress_root_ns);
2592 mlx5_cleanup_fc_stats(dev);
2593 kmem_cache_destroy(steering->ftes_cache);
2594 kmem_cache_destroy(steering->fgs_cache);
2595 kfree(steering);
2596 }
2597
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2598 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2599 {
2600 struct fs_prio *prio;
2601
2602 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2603 if (!steering->sniffer_tx_root_ns)
2604 return -ENOMEM;
2605
2606 /* Create single prio */
2607 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2608 return PTR_ERR_OR_ZERO(prio);
2609 }
2610
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2611 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2612 {
2613 struct fs_prio *prio;
2614
2615 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2616 if (!steering->sniffer_rx_root_ns)
2617 return -ENOMEM;
2618
2619 /* Create single prio */
2620 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2621 return PTR_ERR_OR_ZERO(prio);
2622 }
2623
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2624 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2625 {
2626 int err;
2627
2628 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2629 if (!steering->rdma_rx_root_ns)
2630 return -ENOMEM;
2631
2632 err = init_root_tree(steering, &rdma_rx_root_fs,
2633 &steering->rdma_rx_root_ns->ns.node);
2634 if (err)
2635 goto out_err;
2636
2637 set_prio_attrs(steering->rdma_rx_root_ns);
2638
2639 return 0;
2640
2641 out_err:
2642 cleanup_root_ns(steering->rdma_rx_root_ns);
2643 steering->rdma_rx_root_ns = NULL;
2644 return err;
2645 }
2646
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2647 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2648 {
2649 int err;
2650
2651 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2652 if (!steering->rdma_tx_root_ns)
2653 return -ENOMEM;
2654
2655 err = init_root_tree(steering, &rdma_tx_root_fs,
2656 &steering->rdma_tx_root_ns->ns.node);
2657 if (err)
2658 goto out_err;
2659
2660 set_prio_attrs(steering->rdma_tx_root_ns);
2661
2662 return 0;
2663
2664 out_err:
2665 cleanup_root_ns(steering->rdma_tx_root_ns);
2666 steering->rdma_tx_root_ns = NULL;
2667 return err;
2668 }
2669
2670 /* FT and tc chains are stored in the same array so we can re-use the
2671 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2672 * When creating a new ns for each chain store it in the first available slot.
2673 * Assume tc chains are created and stored first and only then the FT chain.
2674 */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2675 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2676 struct mlx5_flow_namespace *ns)
2677 {
2678 int chain = 0;
2679
2680 while (steering->fdb_sub_ns[chain])
2681 ++chain;
2682
2683 steering->fdb_sub_ns[chain] = ns;
2684 }
2685
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2686 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2687 struct fs_prio *maj_prio)
2688 {
2689 struct mlx5_flow_namespace *ns;
2690 struct fs_prio *min_prio;
2691 int prio;
2692
2693 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2694 if (IS_ERR(ns))
2695 return PTR_ERR(ns);
2696
2697 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2698 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2699 if (IS_ERR(min_prio))
2700 return PTR_ERR(min_prio);
2701 }
2702
2703 store_fdb_sub_ns_prio_chain(steering, ns);
2704
2705 return 0;
2706 }
2707
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2708 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2709 int fs_prio,
2710 int chains)
2711 {
2712 struct fs_prio *maj_prio;
2713 int levels;
2714 int chain;
2715 int err;
2716
2717 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2718 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2719 fs_prio,
2720 levels);
2721 if (IS_ERR(maj_prio))
2722 return PTR_ERR(maj_prio);
2723
2724 for (chain = 0; chain < chains; chain++) {
2725 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2726 if (err)
2727 return err;
2728 }
2729
2730 return 0;
2731 }
2732
create_fdb_fast_path(struct mlx5_flow_steering * steering)2733 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2734 {
2735 int err;
2736
2737 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2738 sizeof(*steering->fdb_sub_ns),
2739 GFP_KERNEL);
2740 if (!steering->fdb_sub_ns)
2741 return -ENOMEM;
2742
2743 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2744 if (err)
2745 return err;
2746
2747 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2748 if (err)
2749 return err;
2750
2751 return 0;
2752 }
2753
init_fdb_root_ns(struct mlx5_flow_steering * steering)2754 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2755 {
2756 struct fs_prio *maj_prio;
2757 int err;
2758
2759 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2760 if (!steering->fdb_root_ns)
2761 return -ENOMEM;
2762
2763 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2764 1);
2765 if (IS_ERR(maj_prio)) {
2766 err = PTR_ERR(maj_prio);
2767 goto out_err;
2768 }
2769 err = create_fdb_fast_path(steering);
2770 if (err)
2771 goto out_err;
2772
2773 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2774 if (IS_ERR(maj_prio)) {
2775 err = PTR_ERR(maj_prio);
2776 goto out_err;
2777 }
2778
2779 /* We put this priority last, knowing that nothing will get here
2780 * unless explicitly forwarded to. This is possible because the
2781 * slow path tables have catch all rules and nothing gets passed
2782 * those tables.
2783 */
2784 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2785 if (IS_ERR(maj_prio)) {
2786 err = PTR_ERR(maj_prio);
2787 goto out_err;
2788 }
2789
2790 set_prio_attrs(steering->fdb_root_ns);
2791 return 0;
2792
2793 out_err:
2794 cleanup_root_ns(steering->fdb_root_ns);
2795 kfree(steering->fdb_sub_ns);
2796 steering->fdb_sub_ns = NULL;
2797 steering->fdb_root_ns = NULL;
2798 return err;
2799 }
2800
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2801 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2802 {
2803 struct fs_prio *prio;
2804
2805 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2806 if (!steering->esw_egress_root_ns[vport])
2807 return -ENOMEM;
2808
2809 /* create 1 prio*/
2810 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2811 return PTR_ERR_OR_ZERO(prio);
2812 }
2813
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2814 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2815 {
2816 struct fs_prio *prio;
2817
2818 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2819 if (!steering->esw_ingress_root_ns[vport])
2820 return -ENOMEM;
2821
2822 /* create 1 prio*/
2823 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2824 return PTR_ERR_OR_ZERO(prio);
2825 }
2826
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)2827 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2828 {
2829 struct mlx5_flow_steering *steering = dev->priv.steering;
2830 int err;
2831 int i;
2832
2833 steering->esw_egress_root_ns =
2834 kcalloc(total_vports,
2835 sizeof(*steering->esw_egress_root_ns),
2836 GFP_KERNEL);
2837 if (!steering->esw_egress_root_ns)
2838 return -ENOMEM;
2839
2840 for (i = 0; i < total_vports; i++) {
2841 err = init_egress_acl_root_ns(steering, i);
2842 if (err)
2843 goto cleanup_root_ns;
2844 }
2845 steering->esw_egress_acl_vports = total_vports;
2846 return 0;
2847
2848 cleanup_root_ns:
2849 for (i--; i >= 0; i--)
2850 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2851 kfree(steering->esw_egress_root_ns);
2852 steering->esw_egress_root_ns = NULL;
2853 return err;
2854 }
2855
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)2856 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2857 {
2858 struct mlx5_flow_steering *steering = dev->priv.steering;
2859 int i;
2860
2861 if (!steering->esw_egress_root_ns)
2862 return;
2863
2864 for (i = 0; i < steering->esw_egress_acl_vports; i++)
2865 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2866
2867 kfree(steering->esw_egress_root_ns);
2868 steering->esw_egress_root_ns = NULL;
2869 }
2870
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)2871 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2872 {
2873 struct mlx5_flow_steering *steering = dev->priv.steering;
2874 int err;
2875 int i;
2876
2877 steering->esw_ingress_root_ns =
2878 kcalloc(total_vports,
2879 sizeof(*steering->esw_ingress_root_ns),
2880 GFP_KERNEL);
2881 if (!steering->esw_ingress_root_ns)
2882 return -ENOMEM;
2883
2884 for (i = 0; i < total_vports; i++) {
2885 err = init_ingress_acl_root_ns(steering, i);
2886 if (err)
2887 goto cleanup_root_ns;
2888 }
2889 steering->esw_ingress_acl_vports = total_vports;
2890 return 0;
2891
2892 cleanup_root_ns:
2893 for (i--; i >= 0; i--)
2894 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2895 kfree(steering->esw_ingress_root_ns);
2896 steering->esw_ingress_root_ns = NULL;
2897 return err;
2898 }
2899
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)2900 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
2901 {
2902 struct mlx5_flow_steering *steering = dev->priv.steering;
2903 int i;
2904
2905 if (!steering->esw_ingress_root_ns)
2906 return;
2907
2908 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
2909 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2910
2911 kfree(steering->esw_ingress_root_ns);
2912 steering->esw_ingress_root_ns = NULL;
2913 }
2914
init_egress_root_ns(struct mlx5_flow_steering * steering)2915 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2916 {
2917 int err;
2918
2919 steering->egress_root_ns = create_root_ns(steering,
2920 FS_FT_NIC_TX);
2921 if (!steering->egress_root_ns)
2922 return -ENOMEM;
2923
2924 err = init_root_tree(steering, &egress_root_fs,
2925 &steering->egress_root_ns->ns.node);
2926 if (err)
2927 goto cleanup;
2928 set_prio_attrs(steering->egress_root_ns);
2929 return 0;
2930 cleanup:
2931 cleanup_root_ns(steering->egress_root_ns);
2932 steering->egress_root_ns = NULL;
2933 return err;
2934 }
2935
mlx5_init_fs(struct mlx5_core_dev * dev)2936 int mlx5_init_fs(struct mlx5_core_dev *dev)
2937 {
2938 struct mlx5_flow_steering *steering;
2939 int err = 0;
2940
2941 err = mlx5_init_fc_stats(dev);
2942 if (err)
2943 return err;
2944
2945 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2946 if (!steering)
2947 return -ENOMEM;
2948 steering->dev = dev;
2949 dev->priv.steering = steering;
2950
2951 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2952 sizeof(struct mlx5_flow_group), 0,
2953 0, NULL);
2954 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2955 0, NULL);
2956 if (!steering->ftes_cache || !steering->fgs_cache) {
2957 err = -ENOMEM;
2958 goto err;
2959 }
2960
2961 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2962 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2963 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2964 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2965 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2966 err = init_root_ns(steering);
2967 if (err)
2968 goto err;
2969 }
2970
2971 if (MLX5_ESWITCH_MANAGER(dev)) {
2972 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2973 err = init_fdb_root_ns(steering);
2974 if (err)
2975 goto err;
2976 }
2977 }
2978
2979 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2980 err = init_sniffer_rx_root_ns(steering);
2981 if (err)
2982 goto err;
2983 }
2984
2985 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2986 err = init_sniffer_tx_root_ns(steering);
2987 if (err)
2988 goto err;
2989 }
2990
2991 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2992 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
2993 err = init_rdma_rx_root_ns(steering);
2994 if (err)
2995 goto err;
2996 }
2997
2998 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
2999 err = init_rdma_tx_root_ns(steering);
3000 if (err)
3001 goto err;
3002 }
3003
3004 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3005 MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3006 err = init_egress_root_ns(steering);
3007 if (err)
3008 goto err;
3009 }
3010
3011 return 0;
3012 err:
3013 mlx5_cleanup_fs(dev);
3014 return err;
3015 }
3016
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3017 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3018 {
3019 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3020 struct mlx5_ft_underlay_qp *new_uqp;
3021 int err = 0;
3022
3023 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3024 if (!new_uqp)
3025 return -ENOMEM;
3026
3027 mutex_lock(&root->chain_lock);
3028
3029 if (!root->root_ft) {
3030 err = -EINVAL;
3031 goto update_ft_fail;
3032 }
3033
3034 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3035 false);
3036 if (err) {
3037 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3038 underlay_qpn, err);
3039 goto update_ft_fail;
3040 }
3041
3042 new_uqp->qpn = underlay_qpn;
3043 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3044
3045 mutex_unlock(&root->chain_lock);
3046
3047 return 0;
3048
3049 update_ft_fail:
3050 mutex_unlock(&root->chain_lock);
3051 kfree(new_uqp);
3052 return err;
3053 }
3054 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3055
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3056 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3057 {
3058 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3059 struct mlx5_ft_underlay_qp *uqp;
3060 bool found = false;
3061 int err = 0;
3062
3063 mutex_lock(&root->chain_lock);
3064 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3065 if (uqp->qpn == underlay_qpn) {
3066 found = true;
3067 break;
3068 }
3069 }
3070
3071 if (!found) {
3072 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3073 underlay_qpn);
3074 err = -EINVAL;
3075 goto out;
3076 }
3077
3078 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3079 true);
3080 if (err)
3081 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3082 underlay_qpn, err);
3083
3084 list_del(&uqp->list);
3085 mutex_unlock(&root->chain_lock);
3086 kfree(uqp);
3087
3088 return 0;
3089
3090 out:
3091 mutex_unlock(&root->chain_lock);
3092 return err;
3093 }
3094 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3095
3096 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3097 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3098 {
3099 struct mlx5_flow_namespace *ns;
3100
3101 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3102 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3103 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3104 else
3105 ns = mlx5_get_flow_namespace(dev, ns_type);
3106 if (!ns)
3107 return NULL;
3108
3109 return find_root(&ns->node);
3110 }
3111
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3112 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3113 u8 ns_type, u8 num_actions,
3114 void *modify_actions)
3115 {
3116 struct mlx5_flow_root_namespace *root;
3117 struct mlx5_modify_hdr *modify_hdr;
3118 int err;
3119
3120 root = get_root_namespace(dev, ns_type);
3121 if (!root)
3122 return ERR_PTR(-EOPNOTSUPP);
3123
3124 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3125 if (!modify_hdr)
3126 return ERR_PTR(-ENOMEM);
3127
3128 modify_hdr->ns_type = ns_type;
3129 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3130 modify_actions, modify_hdr);
3131 if (err) {
3132 kfree(modify_hdr);
3133 return ERR_PTR(err);
3134 }
3135
3136 return modify_hdr;
3137 }
3138 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3139
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3140 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3141 struct mlx5_modify_hdr *modify_hdr)
3142 {
3143 struct mlx5_flow_root_namespace *root;
3144
3145 root = get_root_namespace(dev, modify_hdr->ns_type);
3146 if (WARN_ON(!root))
3147 return;
3148 root->cmds->modify_header_dealloc(root, modify_hdr);
3149 kfree(modify_hdr);
3150 }
3151 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3152
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type ns_type)3153 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3154 int reformat_type,
3155 size_t size,
3156 void *reformat_data,
3157 enum mlx5_flow_namespace_type ns_type)
3158 {
3159 struct mlx5_pkt_reformat *pkt_reformat;
3160 struct mlx5_flow_root_namespace *root;
3161 int err;
3162
3163 root = get_root_namespace(dev, ns_type);
3164 if (!root)
3165 return ERR_PTR(-EOPNOTSUPP);
3166
3167 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3168 if (!pkt_reformat)
3169 return ERR_PTR(-ENOMEM);
3170
3171 pkt_reformat->ns_type = ns_type;
3172 pkt_reformat->reformat_type = reformat_type;
3173 err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3174 reformat_data, ns_type,
3175 pkt_reformat);
3176 if (err) {
3177 kfree(pkt_reformat);
3178 return ERR_PTR(err);
3179 }
3180
3181 return pkt_reformat;
3182 }
3183 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3184
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3185 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3186 struct mlx5_pkt_reformat *pkt_reformat)
3187 {
3188 struct mlx5_flow_root_namespace *root;
3189
3190 root = get_root_namespace(dev, pkt_reformat->ns_type);
3191 if (WARN_ON(!root))
3192 return;
3193 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3194 kfree(pkt_reformat);
3195 }
3196 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3197
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3198 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3199 struct mlx5_flow_root_namespace *peer_ns)
3200 {
3201 if (peer_ns && ns->mode != peer_ns->mode) {
3202 mlx5_core_err(ns->dev,
3203 "Can't peer namespace of different steering mode\n");
3204 return -EINVAL;
3205 }
3206
3207 return ns->cmds->set_peer(ns, peer_ns);
3208 }
3209
3210 /* This function should be called only at init stage of the namespace.
3211 * It is not safe to call this function while steering operations
3212 * are executed in the namespace.
3213 */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3214 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3215 enum mlx5_flow_steering_mode mode)
3216 {
3217 struct mlx5_flow_root_namespace *root;
3218 const struct mlx5_flow_cmds *cmds;
3219 int err;
3220
3221 root = find_root(&ns->node);
3222 if (&root->ns != ns)
3223 /* Can't set cmds to non root namespace */
3224 return -EINVAL;
3225
3226 if (root->table_type != FS_FT_FDB)
3227 return -EOPNOTSUPP;
3228
3229 if (root->mode == mode)
3230 return 0;
3231
3232 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3233 cmds = mlx5_fs_cmd_get_dr_cmds();
3234 else
3235 cmds = mlx5_fs_cmd_get_fw_cmds();
3236 if (!cmds)
3237 return -EOPNOTSUPP;
3238
3239 err = cmds->create_ns(root);
3240 if (err) {
3241 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3242 err);
3243 return err;
3244 }
3245
3246 root->cmds->destroy_ns(root);
3247 root->cmds = cmds;
3248 root->mode = mode;
3249
3250 return 0;
3251 }
3252