1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mutex.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/vport.h> 36 #include <linux/mlx5/eswitch.h> 37 38 #include "mlx5_core.h" 39 #include "fs_core.h" 40 #include "fs_cmd.h" 41 #include "diag/fs_tracepoint.h" 42 #include "accel/ipsec.h" 43 #include "fpga/ipsec.h" 44 #include "eswitch.h" 45 46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ 47 sizeof(struct init_tree_node)) 48 49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\ 50 ...) {.type = FS_TYPE_PRIO,\ 51 .min_ft_level = min_level_val,\ 52 .num_levels = num_levels_val,\ 53 .num_leaf_prios = num_prios_val,\ 54 .caps = caps_val,\ 55 .children = (struct init_tree_node[]) {__VA_ARGS__},\ 56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ 57 } 58 59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\ 60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ 61 __VA_ARGS__)\ 62 63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \ 64 .def_miss_action = def_miss_act,\ 65 .children = (struct init_tree_node[]) {__VA_ARGS__},\ 66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ 67 } 68 69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ 70 sizeof(long)) 71 72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) 73 74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ 75 .caps = (long[]) {__VA_ARGS__} } 76 77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \ 78 FS_CAP(flow_table_properties_nic_receive.modify_root), \ 79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ 80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) 81 82 #define FS_CHAINING_CAPS_EGRESS \ 83 FS_REQUIRED_CAPS( \ 84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \ 85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \ 86 FS_CAP(flow_table_properties_nic_transmit \ 87 .identified_miss_table_mode), \ 88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify)) 89 90 #define LEFTOVERS_NUM_LEVELS 1 91 #define LEFTOVERS_NUM_PRIOS 1 92 93 #define BY_PASS_PRIO_NUM_LEVELS 1 94 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ 95 LEFTOVERS_NUM_PRIOS) 96 97 #define ETHTOOL_PRIO_NUM_LEVELS 1 98 #define ETHTOOL_NUM_PRIOS 11 99 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) 100 /* Vlan, mac, ttc, inner ttc, aRFS */ 101 #define KERNEL_NIC_PRIO_NUM_LEVELS 5 102 #define KERNEL_NIC_NUM_PRIOS 1 103 /* One more level for tc */ 104 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) 105 106 #define KERNEL_NIC_TC_NUM_PRIOS 1 107 #define KERNEL_NIC_TC_NUM_LEVELS 2 108 109 #define ANCHOR_NUM_LEVELS 1 110 #define ANCHOR_NUM_PRIOS 1 111 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) 112 113 #define OFFLOADS_MAX_FT 1 114 #define OFFLOADS_NUM_PRIOS 1 115 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) 116 117 #define LAG_PRIO_NUM_LEVELS 1 118 #define LAG_NUM_PRIOS 1 119 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) 120 121 struct node_caps { 122 size_t arr_sz; 123 long *caps; 124 }; 125 126 static struct init_tree_node { 127 enum fs_node_type type; 128 struct init_tree_node *children; 129 int ar_size; 130 struct node_caps caps; 131 int min_ft_level; 132 int num_leaf_prios; 133 int prio; 134 int num_levels; 135 enum mlx5_flow_table_miss_action def_miss_action; 136 } root_fs = { 137 .type = FS_TYPE_NAMESPACE, 138 .ar_size = 7, 139 .children = (struct init_tree_node[]){ 140 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, 141 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 142 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, 143 BY_PASS_PRIO_NUM_LEVELS))), 144 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, 145 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 146 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, 147 LAG_PRIO_NUM_LEVELS))), 148 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, 149 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 150 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, 151 OFFLOADS_MAX_FT))), 152 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS, 153 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 154 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, 155 ETHTOOL_PRIO_NUM_LEVELS))), 156 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, 157 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 158 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, 159 KERNEL_NIC_TC_NUM_LEVELS), 160 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, 161 KERNEL_NIC_PRIO_NUM_LEVELS))), 162 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, 163 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 164 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, 165 LEFTOVERS_NUM_LEVELS))), 166 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, 167 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 168 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, 169 ANCHOR_NUM_LEVELS))), 170 } 171 }; 172 173 static struct init_tree_node egress_root_fs = { 174 .type = FS_TYPE_NAMESPACE, 175 .ar_size = 1, 176 .children = (struct init_tree_node[]) { 177 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, 178 FS_CHAINING_CAPS_EGRESS, 179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 180 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, 181 BY_PASS_PRIO_NUM_LEVELS))), 182 } 183 }; 184 185 #define RDMA_RX_BYPASS_PRIO 0 186 #define RDMA_RX_KERNEL_PRIO 1 187 static struct init_tree_node rdma_rx_root_fs = { 188 .type = FS_TYPE_NAMESPACE, 189 .ar_size = 2, 190 .children = (struct init_tree_node[]) { 191 [RDMA_RX_BYPASS_PRIO] = 192 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, 193 FS_CHAINING_CAPS, 194 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 195 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, 196 BY_PASS_PRIO_NUM_LEVELS))), 197 [RDMA_RX_KERNEL_PRIO] = 198 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, 199 FS_CHAINING_CAPS, 200 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, 201 ADD_MULTIPLE_PRIO(1, 1))), 202 } 203 }; 204 205 enum fs_i_lock_class { 206 FS_LOCK_GRANDPARENT, 207 FS_LOCK_PARENT, 208 FS_LOCK_CHILD 209 }; 210 211 static const struct rhashtable_params rhash_fte = { 212 .key_len = sizeof_field(struct fs_fte, val), 213 .key_offset = offsetof(struct fs_fte, val), 214 .head_offset = offsetof(struct fs_fte, hash), 215 .automatic_shrinking = true, 216 .min_size = 1, 217 }; 218 219 static const struct rhashtable_params rhash_fg = { 220 .key_len = sizeof_field(struct mlx5_flow_group, mask), 221 .key_offset = offsetof(struct mlx5_flow_group, mask), 222 .head_offset = offsetof(struct mlx5_flow_group, hash), 223 .automatic_shrinking = true, 224 .min_size = 1, 225 226 }; 227 228 static void del_hw_flow_table(struct fs_node *node); 229 static void del_hw_flow_group(struct fs_node *node); 230 static void del_hw_fte(struct fs_node *node); 231 static void del_sw_flow_table(struct fs_node *node); 232 static void del_sw_flow_group(struct fs_node *node); 233 static void del_sw_fte(struct fs_node *node); 234 static void del_sw_prio(struct fs_node *node); 235 static void del_sw_ns(struct fs_node *node); 236 /* Delete rule (destination) is special case that 237 * requires to lock the FTE for all the deletion process. 238 */ 239 static void del_sw_hw_rule(struct fs_node *node); 240 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, 241 struct mlx5_flow_destination *d2); 242 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); 243 static struct mlx5_flow_rule * 244 find_flow_rule(struct fs_fte *fte, 245 struct mlx5_flow_destination *dest); 246 247 static void tree_init_node(struct fs_node *node, 248 void (*del_hw_func)(struct fs_node *), 249 void (*del_sw_func)(struct fs_node *)) 250 { 251 refcount_set(&node->refcount, 1); 252 INIT_LIST_HEAD(&node->list); 253 INIT_LIST_HEAD(&node->children); 254 init_rwsem(&node->lock); 255 node->del_hw_func = del_hw_func; 256 node->del_sw_func = del_sw_func; 257 node->active = false; 258 } 259 260 static void tree_add_node(struct fs_node *node, struct fs_node *parent) 261 { 262 if (parent) 263 refcount_inc(&parent->refcount); 264 node->parent = parent; 265 266 /* Parent is the root */ 267 if (!parent) 268 node->root = node; 269 else 270 node->root = parent->root; 271 } 272 273 static int tree_get_node(struct fs_node *node) 274 { 275 return refcount_inc_not_zero(&node->refcount); 276 } 277 278 static void nested_down_read_ref_node(struct fs_node *node, 279 enum fs_i_lock_class class) 280 { 281 if (node) { 282 down_read_nested(&node->lock, class); 283 refcount_inc(&node->refcount); 284 } 285 } 286 287 static void nested_down_write_ref_node(struct fs_node *node, 288 enum fs_i_lock_class class) 289 { 290 if (node) { 291 down_write_nested(&node->lock, class); 292 refcount_inc(&node->refcount); 293 } 294 } 295 296 static void down_write_ref_node(struct fs_node *node, bool locked) 297 { 298 if (node) { 299 if (!locked) 300 down_write(&node->lock); 301 refcount_inc(&node->refcount); 302 } 303 } 304 305 static void up_read_ref_node(struct fs_node *node) 306 { 307 refcount_dec(&node->refcount); 308 up_read(&node->lock); 309 } 310 311 static void up_write_ref_node(struct fs_node *node, bool locked) 312 { 313 refcount_dec(&node->refcount); 314 if (!locked) 315 up_write(&node->lock); 316 } 317 318 static void tree_put_node(struct fs_node *node, bool locked) 319 { 320 struct fs_node *parent_node = node->parent; 321 322 if (refcount_dec_and_test(&node->refcount)) { 323 if (node->del_hw_func) 324 node->del_hw_func(node); 325 if (parent_node) { 326 /* Only root namespace doesn't have parent and we just 327 * need to free its node. 328 */ 329 down_write_ref_node(parent_node, locked); 330 list_del_init(&node->list); 331 if (node->del_sw_func) 332 node->del_sw_func(node); 333 up_write_ref_node(parent_node, locked); 334 } else { 335 kfree(node); 336 } 337 node = NULL; 338 } 339 if (!node && parent_node) 340 tree_put_node(parent_node, locked); 341 } 342 343 static int tree_remove_node(struct fs_node *node, bool locked) 344 { 345 if (refcount_read(&node->refcount) > 1) { 346 refcount_dec(&node->refcount); 347 return -EEXIST; 348 } 349 tree_put_node(node, locked); 350 return 0; 351 } 352 353 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, 354 unsigned int prio) 355 { 356 struct fs_prio *iter_prio; 357 358 fs_for_each_prio(iter_prio, ns) { 359 if (iter_prio->prio == prio) 360 return iter_prio; 361 } 362 363 return NULL; 364 } 365 366 static bool check_valid_spec(const struct mlx5_flow_spec *spec) 367 { 368 int i; 369 370 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++) 371 if (spec->match_value[i] & ~spec->match_criteria[i]) { 372 pr_warn("mlx5_core: match_value differs from match_criteria\n"); 373 return false; 374 } 375 376 return true; 377 } 378 379 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) 380 { 381 struct fs_node *root; 382 struct mlx5_flow_namespace *ns; 383 384 root = node->root; 385 386 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) { 387 pr_warn("mlx5: flow steering node is not in tree or garbaged\n"); 388 return NULL; 389 } 390 391 ns = container_of(root, struct mlx5_flow_namespace, node); 392 return container_of(ns, struct mlx5_flow_root_namespace, ns); 393 } 394 395 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node) 396 { 397 struct mlx5_flow_root_namespace *root = find_root(node); 398 399 if (root) 400 return root->dev->priv.steering; 401 return NULL; 402 } 403 404 static inline struct mlx5_core_dev *get_dev(struct fs_node *node) 405 { 406 struct mlx5_flow_root_namespace *root = find_root(node); 407 408 if (root) 409 return root->dev; 410 return NULL; 411 } 412 413 static void del_sw_ns(struct fs_node *node) 414 { 415 kfree(node); 416 } 417 418 static void del_sw_prio(struct fs_node *node) 419 { 420 kfree(node); 421 } 422 423 static void del_hw_flow_table(struct fs_node *node) 424 { 425 struct mlx5_flow_root_namespace *root; 426 struct mlx5_flow_table *ft; 427 struct mlx5_core_dev *dev; 428 int err; 429 430 fs_get_obj(ft, node); 431 dev = get_dev(&ft->node); 432 root = find_root(&ft->node); 433 trace_mlx5_fs_del_ft(ft); 434 435 if (node->active) { 436 err = root->cmds->destroy_flow_table(root, ft); 437 if (err) 438 mlx5_core_warn(dev, "flow steering can't destroy ft\n"); 439 } 440 } 441 442 static void del_sw_flow_table(struct fs_node *node) 443 { 444 struct mlx5_flow_table *ft; 445 struct fs_prio *prio; 446 447 fs_get_obj(ft, node); 448 449 rhltable_destroy(&ft->fgs_hash); 450 fs_get_obj(prio, ft->node.parent); 451 prio->num_ft--; 452 kfree(ft); 453 } 454 455 static void modify_fte(struct fs_fte *fte) 456 { 457 struct mlx5_flow_root_namespace *root; 458 struct mlx5_flow_table *ft; 459 struct mlx5_flow_group *fg; 460 struct mlx5_core_dev *dev; 461 int err; 462 463 fs_get_obj(fg, fte->node.parent); 464 fs_get_obj(ft, fg->node.parent); 465 dev = get_dev(&fte->node); 466 467 root = find_root(&ft->node); 468 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte); 469 if (err) 470 mlx5_core_warn(dev, 471 "%s can't del rule fg id=%d fte_index=%d\n", 472 __func__, fg->id, fte->index); 473 fte->modify_mask = 0; 474 } 475 476 static void del_sw_hw_rule(struct fs_node *node) 477 { 478 struct mlx5_flow_rule *rule; 479 struct fs_fte *fte; 480 481 fs_get_obj(rule, node); 482 fs_get_obj(fte, rule->node.parent); 483 trace_mlx5_fs_del_rule(rule); 484 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 485 mutex_lock(&rule->dest_attr.ft->lock); 486 list_del(&rule->next_ft); 487 mutex_unlock(&rule->dest_attr.ft->lock); 488 } 489 490 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && 491 --fte->dests_size) { 492 fte->modify_mask |= 493 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | 494 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); 495 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; 496 goto out; 497 } 498 499 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 500 --fte->dests_size) { 501 fte->modify_mask |= 502 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); 503 } 504 out: 505 kfree(rule); 506 } 507 508 static void del_hw_fte(struct fs_node *node) 509 { 510 struct mlx5_flow_root_namespace *root; 511 struct mlx5_flow_table *ft; 512 struct mlx5_flow_group *fg; 513 struct mlx5_core_dev *dev; 514 struct fs_fte *fte; 515 int err; 516 517 fs_get_obj(fte, node); 518 fs_get_obj(fg, fte->node.parent); 519 fs_get_obj(ft, fg->node.parent); 520 521 trace_mlx5_fs_del_fte(fte); 522 dev = get_dev(&ft->node); 523 root = find_root(&ft->node); 524 if (node->active) { 525 err = root->cmds->delete_fte(root, ft, fte); 526 if (err) 527 mlx5_core_warn(dev, 528 "flow steering can't delete fte in index %d of flow group id %d\n", 529 fte->index, fg->id); 530 node->active = 0; 531 } 532 } 533 534 static void del_sw_fte(struct fs_node *node) 535 { 536 struct mlx5_flow_steering *steering = get_steering(node); 537 struct mlx5_flow_group *fg; 538 struct fs_fte *fte; 539 int err; 540 541 fs_get_obj(fte, node); 542 fs_get_obj(fg, fte->node.parent); 543 544 err = rhashtable_remove_fast(&fg->ftes_hash, 545 &fte->hash, 546 rhash_fte); 547 WARN_ON(err); 548 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); 549 kmem_cache_free(steering->ftes_cache, fte); 550 } 551 552 static void del_hw_flow_group(struct fs_node *node) 553 { 554 struct mlx5_flow_root_namespace *root; 555 struct mlx5_flow_group *fg; 556 struct mlx5_flow_table *ft; 557 struct mlx5_core_dev *dev; 558 559 fs_get_obj(fg, node); 560 fs_get_obj(ft, fg->node.parent); 561 dev = get_dev(&ft->node); 562 trace_mlx5_fs_del_fg(fg); 563 564 root = find_root(&ft->node); 565 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg)) 566 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", 567 fg->id, ft->id); 568 } 569 570 static void del_sw_flow_group(struct fs_node *node) 571 { 572 struct mlx5_flow_steering *steering = get_steering(node); 573 struct mlx5_flow_group *fg; 574 struct mlx5_flow_table *ft; 575 int err; 576 577 fs_get_obj(fg, node); 578 fs_get_obj(ft, fg->node.parent); 579 580 rhashtable_destroy(&fg->ftes_hash); 581 ida_destroy(&fg->fte_allocator); 582 if (ft->autogroup.active && 583 fg->max_ftes == ft->autogroup.group_size && 584 fg->start_index < ft->autogroup.max_fte) 585 ft->autogroup.num_groups--; 586 err = rhltable_remove(&ft->fgs_hash, 587 &fg->hash, 588 rhash_fg); 589 WARN_ON(err); 590 kmem_cache_free(steering->fgs_cache, fg); 591 } 592 593 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) 594 { 595 int index; 596 int ret; 597 598 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL); 599 if (index < 0) 600 return index; 601 602 fte->index = index + fg->start_index; 603 ret = rhashtable_insert_fast(&fg->ftes_hash, 604 &fte->hash, 605 rhash_fte); 606 if (ret) 607 goto err_ida_remove; 608 609 tree_add_node(&fte->node, &fg->node); 610 list_add_tail(&fte->node.list, &fg->node.children); 611 return 0; 612 613 err_ida_remove: 614 ida_simple_remove(&fg->fte_allocator, index); 615 return ret; 616 } 617 618 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, 619 const struct mlx5_flow_spec *spec, 620 struct mlx5_flow_act *flow_act) 621 { 622 struct mlx5_flow_steering *steering = get_steering(&ft->node); 623 struct fs_fte *fte; 624 625 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL); 626 if (!fte) 627 return ERR_PTR(-ENOMEM); 628 629 memcpy(fte->val, &spec->match_value, sizeof(fte->val)); 630 fte->node.type = FS_TYPE_FLOW_ENTRY; 631 fte->action = *flow_act; 632 fte->flow_context = spec->flow_context; 633 634 tree_init_node(&fte->node, NULL, del_sw_fte); 635 636 return fte; 637 } 638 639 static void dealloc_flow_group(struct mlx5_flow_steering *steering, 640 struct mlx5_flow_group *fg) 641 { 642 rhashtable_destroy(&fg->ftes_hash); 643 kmem_cache_free(steering->fgs_cache, fg); 644 } 645 646 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering, 647 u8 match_criteria_enable, 648 const void *match_criteria, 649 int start_index, 650 int end_index) 651 { 652 struct mlx5_flow_group *fg; 653 int ret; 654 655 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL); 656 if (!fg) 657 return ERR_PTR(-ENOMEM); 658 659 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte); 660 if (ret) { 661 kmem_cache_free(steering->fgs_cache, fg); 662 return ERR_PTR(ret); 663 } 664 665 ida_init(&fg->fte_allocator); 666 fg->mask.match_criteria_enable = match_criteria_enable; 667 memcpy(&fg->mask.match_criteria, match_criteria, 668 sizeof(fg->mask.match_criteria)); 669 fg->node.type = FS_TYPE_FLOW_GROUP; 670 fg->start_index = start_index; 671 fg->max_ftes = end_index - start_index + 1; 672 673 return fg; 674 } 675 676 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft, 677 u8 match_criteria_enable, 678 const void *match_criteria, 679 int start_index, 680 int end_index, 681 struct list_head *prev) 682 { 683 struct mlx5_flow_steering *steering = get_steering(&ft->node); 684 struct mlx5_flow_group *fg; 685 int ret; 686 687 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria, 688 start_index, end_index); 689 if (IS_ERR(fg)) 690 return fg; 691 692 /* initialize refcnt, add to parent list */ 693 ret = rhltable_insert(&ft->fgs_hash, 694 &fg->hash, 695 rhash_fg); 696 if (ret) { 697 dealloc_flow_group(steering, fg); 698 return ERR_PTR(ret); 699 } 700 701 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group); 702 tree_add_node(&fg->node, &ft->node); 703 /* Add node to group list */ 704 list_add(&fg->node.list, prev); 705 atomic_inc(&ft->node.version); 706 707 return fg; 708 } 709 710 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, 711 enum fs_flow_table_type table_type, 712 enum fs_flow_table_op_mod op_mod, 713 u32 flags) 714 { 715 struct mlx5_flow_table *ft; 716 int ret; 717 718 ft = kzalloc(sizeof(*ft), GFP_KERNEL); 719 if (!ft) 720 return ERR_PTR(-ENOMEM); 721 722 ret = rhltable_init(&ft->fgs_hash, &rhash_fg); 723 if (ret) { 724 kfree(ft); 725 return ERR_PTR(ret); 726 } 727 728 ft->level = level; 729 ft->node.type = FS_TYPE_FLOW_TABLE; 730 ft->op_mod = op_mod; 731 ft->type = table_type; 732 ft->vport = vport; 733 ft->max_fte = max_fte; 734 ft->flags = flags; 735 INIT_LIST_HEAD(&ft->fwd_rules); 736 mutex_init(&ft->lock); 737 738 return ft; 739 } 740 741 /* If reverse is false, then we search for the first flow table in the 742 * root sub-tree from start(closest from right), else we search for the 743 * last flow table in the root sub-tree till start(closest from left). 744 */ 745 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, 746 struct list_head *start, 747 bool reverse) 748 { 749 #define list_advance_entry(pos, reverse) \ 750 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list)) 751 752 #define list_for_each_advance_continue(pos, head, reverse) \ 753 for (pos = list_advance_entry(pos, reverse); \ 754 &pos->list != (head); \ 755 pos = list_advance_entry(pos, reverse)) 756 757 struct fs_node *iter = list_entry(start, struct fs_node, list); 758 struct mlx5_flow_table *ft = NULL; 759 760 if (!root || root->type == FS_TYPE_PRIO_CHAINS) 761 return NULL; 762 763 list_for_each_advance_continue(iter, &root->children, reverse) { 764 if (iter->type == FS_TYPE_FLOW_TABLE) { 765 fs_get_obj(ft, iter); 766 return ft; 767 } 768 ft = find_closest_ft_recursive(iter, &iter->children, reverse); 769 if (ft) 770 return ft; 771 } 772 773 return ft; 774 } 775 776 /* If reverse if false then return the first flow table in next priority of 777 * prio in the tree, else return the last flow table in the previous priority 778 * of prio in the tree. 779 */ 780 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) 781 { 782 struct mlx5_flow_table *ft = NULL; 783 struct fs_node *curr_node; 784 struct fs_node *parent; 785 786 parent = prio->node.parent; 787 curr_node = &prio->node; 788 while (!ft && parent) { 789 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); 790 curr_node = parent; 791 parent = curr_node->parent; 792 } 793 return ft; 794 } 795 796 /* Assuming all the tree is locked by mutex chain lock */ 797 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) 798 { 799 return find_closest_ft(prio, false); 800 } 801 802 /* Assuming all the tree is locked by mutex chain lock */ 803 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) 804 { 805 return find_closest_ft(prio, true); 806 } 807 808 static int connect_fts_in_prio(struct mlx5_core_dev *dev, 809 struct fs_prio *prio, 810 struct mlx5_flow_table *ft) 811 { 812 struct mlx5_flow_root_namespace *root = find_root(&prio->node); 813 struct mlx5_flow_table *iter; 814 int i = 0; 815 int err; 816 817 fs_for_each_ft(iter, prio) { 818 i++; 819 err = root->cmds->modify_flow_table(root, iter, ft); 820 if (err) { 821 mlx5_core_warn(dev, "Failed to modify flow table %d\n", 822 iter->id); 823 /* The driver is out of sync with the FW */ 824 if (i > 1) 825 WARN_ON(true); 826 return err; 827 } 828 } 829 return 0; 830 } 831 832 /* Connect flow tables from previous priority of prio to ft */ 833 static int connect_prev_fts(struct mlx5_core_dev *dev, 834 struct mlx5_flow_table *ft, 835 struct fs_prio *prio) 836 { 837 struct mlx5_flow_table *prev_ft; 838 839 prev_ft = find_prev_chained_ft(prio); 840 if (prev_ft) { 841 struct fs_prio *prev_prio; 842 843 fs_get_obj(prev_prio, prev_ft->node.parent); 844 return connect_fts_in_prio(dev, prev_prio, ft); 845 } 846 return 0; 847 } 848 849 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio 850 *prio) 851 { 852 struct mlx5_flow_root_namespace *root = find_root(&prio->node); 853 struct mlx5_ft_underlay_qp *uqp; 854 int min_level = INT_MAX; 855 int err = 0; 856 u32 qpn; 857 858 if (root->root_ft) 859 min_level = root->root_ft->level; 860 861 if (ft->level >= min_level) 862 return 0; 863 864 if (list_empty(&root->underlay_qpns)) { 865 /* Don't set any QPN (zero) in case QPN list is empty */ 866 qpn = 0; 867 err = root->cmds->update_root_ft(root, ft, qpn, false); 868 } else { 869 list_for_each_entry(uqp, &root->underlay_qpns, list) { 870 qpn = uqp->qpn; 871 err = root->cmds->update_root_ft(root, ft, 872 qpn, false); 873 if (err) 874 break; 875 } 876 } 877 878 if (err) 879 mlx5_core_warn(root->dev, 880 "Update root flow table of id(%u) qpn(%d) failed\n", 881 ft->id, qpn); 882 else 883 root->root_ft = ft; 884 885 return err; 886 } 887 888 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, 889 struct mlx5_flow_destination *dest) 890 { 891 struct mlx5_flow_root_namespace *root; 892 struct mlx5_flow_table *ft; 893 struct mlx5_flow_group *fg; 894 struct fs_fte *fte; 895 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); 896 int err = 0; 897 898 fs_get_obj(fte, rule->node.parent); 899 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) 900 return -EINVAL; 901 down_write_ref_node(&fte->node, false); 902 fs_get_obj(fg, fte->node.parent); 903 fs_get_obj(ft, fg->node.parent); 904 905 memcpy(&rule->dest_attr, dest, sizeof(*dest)); 906 root = find_root(&ft->node); 907 err = root->cmds->update_fte(root, ft, fg, 908 modify_mask, fte); 909 up_write_ref_node(&fte->node, false); 910 911 return err; 912 } 913 914 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle, 915 struct mlx5_flow_destination *new_dest, 916 struct mlx5_flow_destination *old_dest) 917 { 918 int i; 919 920 if (!old_dest) { 921 if (handle->num_rules != 1) 922 return -EINVAL; 923 return _mlx5_modify_rule_destination(handle->rule[0], 924 new_dest); 925 } 926 927 for (i = 0; i < handle->num_rules; i++) { 928 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr)) 929 return _mlx5_modify_rule_destination(handle->rule[i], 930 new_dest); 931 } 932 933 return -EINVAL; 934 } 935 936 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */ 937 static int connect_fwd_rules(struct mlx5_core_dev *dev, 938 struct mlx5_flow_table *new_next_ft, 939 struct mlx5_flow_table *old_next_ft) 940 { 941 struct mlx5_flow_destination dest = {}; 942 struct mlx5_flow_rule *iter; 943 int err = 0; 944 945 /* new_next_ft and old_next_ft could be NULL only 946 * when we create/destroy the anchor flow table. 947 */ 948 if (!new_next_ft || !old_next_ft) 949 return 0; 950 951 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 952 dest.ft = new_next_ft; 953 954 mutex_lock(&old_next_ft->lock); 955 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules); 956 mutex_unlock(&old_next_ft->lock); 957 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { 958 err = _mlx5_modify_rule_destination(iter, &dest); 959 if (err) 960 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n", 961 new_next_ft->id); 962 } 963 return 0; 964 } 965 966 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, 967 struct fs_prio *prio) 968 { 969 struct mlx5_flow_table *next_ft; 970 int err = 0; 971 972 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ 973 974 if (list_empty(&prio->node.children)) { 975 err = connect_prev_fts(dev, ft, prio); 976 if (err) 977 return err; 978 979 next_ft = find_next_chained_ft(prio); 980 err = connect_fwd_rules(dev, ft, next_ft); 981 if (err) 982 return err; 983 } 984 985 if (MLX5_CAP_FLOWTABLE(dev, 986 flow_table_properties_nic_receive.modify_root)) 987 err = update_root_ft_create(ft, prio); 988 return err; 989 } 990 991 static void list_add_flow_table(struct mlx5_flow_table *ft, 992 struct fs_prio *prio) 993 { 994 struct list_head *prev = &prio->node.children; 995 struct mlx5_flow_table *iter; 996 997 fs_for_each_ft(iter, prio) { 998 if (iter->level > ft->level) 999 break; 1000 prev = &iter->node.list; 1001 } 1002 list_add(&ft->node.list, prev); 1003 } 1004 1005 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, 1006 struct mlx5_flow_table_attr *ft_attr, 1007 enum fs_flow_table_op_mod op_mod, 1008 u16 vport) 1009 { 1010 struct mlx5_flow_root_namespace *root = find_root(&ns->node); 1011 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED; 1012 struct mlx5_flow_table *next_ft; 1013 struct fs_prio *fs_prio = NULL; 1014 struct mlx5_flow_table *ft; 1015 int log_table_sz; 1016 int err; 1017 1018 if (!root) { 1019 pr_err("mlx5: flow steering failed to find root of namespace\n"); 1020 return ERR_PTR(-ENODEV); 1021 } 1022 1023 mutex_lock(&root->chain_lock); 1024 fs_prio = find_prio(ns, ft_attr->prio); 1025 if (!fs_prio) { 1026 err = -EINVAL; 1027 goto unlock_root; 1028 } 1029 if (!unmanaged) { 1030 /* The level is related to the 1031 * priority level range. 1032 */ 1033 if (ft_attr->level >= fs_prio->num_levels) { 1034 err = -ENOSPC; 1035 goto unlock_root; 1036 } 1037 1038 ft_attr->level += fs_prio->start_level; 1039 } 1040 1041 /* The level is related to the 1042 * priority level range. 1043 */ 1044 ft = alloc_flow_table(ft_attr->level, 1045 vport, 1046 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, 1047 root->table_type, 1048 op_mod, ft_attr->flags); 1049 if (IS_ERR(ft)) { 1050 err = PTR_ERR(ft); 1051 goto unlock_root; 1052 } 1053 1054 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); 1055 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; 1056 next_ft = unmanaged ? ft_attr->next_ft : 1057 find_next_chained_ft(fs_prio); 1058 ft->def_miss_action = ns->def_miss_action; 1059 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); 1060 if (err) 1061 goto free_ft; 1062 1063 if (!unmanaged) { 1064 err = connect_flow_table(root->dev, ft, fs_prio); 1065 if (err) 1066 goto destroy_ft; 1067 } 1068 1069 ft->node.active = true; 1070 down_write_ref_node(&fs_prio->node, false); 1071 if (!unmanaged) { 1072 tree_add_node(&ft->node, &fs_prio->node); 1073 list_add_flow_table(ft, fs_prio); 1074 } else { 1075 ft->node.root = fs_prio->node.root; 1076 } 1077 fs_prio->num_ft++; 1078 up_write_ref_node(&fs_prio->node, false); 1079 mutex_unlock(&root->chain_lock); 1080 trace_mlx5_fs_add_ft(ft); 1081 return ft; 1082 destroy_ft: 1083 root->cmds->destroy_flow_table(root, ft); 1084 free_ft: 1085 kfree(ft); 1086 unlock_root: 1087 mutex_unlock(&root->chain_lock); 1088 return ERR_PTR(err); 1089 } 1090 1091 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, 1092 struct mlx5_flow_table_attr *ft_attr) 1093 { 1094 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0); 1095 } 1096 1097 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 1098 int prio, int max_fte, 1099 u32 level, u16 vport) 1100 { 1101 struct mlx5_flow_table_attr ft_attr = {}; 1102 1103 ft_attr.max_fte = max_fte; 1104 ft_attr.level = level; 1105 ft_attr.prio = prio; 1106 1107 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); 1108 } 1109 1110 struct mlx5_flow_table* 1111 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns, 1112 int prio, u32 level) 1113 { 1114 struct mlx5_flow_table_attr ft_attr = {}; 1115 1116 ft_attr.level = level; 1117 ft_attr.prio = prio; 1118 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0); 1119 } 1120 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); 1121 1122 struct mlx5_flow_table* 1123 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, 1124 struct mlx5_flow_table_attr *ft_attr) 1125 { 1126 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries; 1127 int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries; 1128 int max_num_groups = ft_attr->autogroup.max_num_groups; 1129 struct mlx5_flow_table *ft; 1130 1131 if (max_num_groups > autogroups_max_fte) 1132 return ERR_PTR(-EINVAL); 1133 if (num_reserved_entries > ft_attr->max_fte) 1134 return ERR_PTR(-EINVAL); 1135 1136 ft = mlx5_create_flow_table(ns, ft_attr); 1137 if (IS_ERR(ft)) 1138 return ft; 1139 1140 ft->autogroup.active = true; 1141 ft->autogroup.required_groups = max_num_groups; 1142 ft->autogroup.max_fte = autogroups_max_fte; 1143 /* We save place for flow groups in addition to max types */ 1144 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1); 1145 1146 return ft; 1147 } 1148 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); 1149 1150 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, 1151 u32 *fg_in) 1152 { 1153 struct mlx5_flow_root_namespace *root = find_root(&ft->node); 1154 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1155 fg_in, match_criteria); 1156 u8 match_criteria_enable = MLX5_GET(create_flow_group_in, 1157 fg_in, 1158 match_criteria_enable); 1159 int start_index = MLX5_GET(create_flow_group_in, fg_in, 1160 start_flow_index); 1161 int end_index = MLX5_GET(create_flow_group_in, fg_in, 1162 end_flow_index); 1163 struct mlx5_flow_group *fg; 1164 int err; 1165 1166 if (ft->autogroup.active && start_index < ft->autogroup.max_fte) 1167 return ERR_PTR(-EPERM); 1168 1169 down_write_ref_node(&ft->node, false); 1170 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, 1171 start_index, end_index, 1172 ft->node.children.prev); 1173 up_write_ref_node(&ft->node, false); 1174 if (IS_ERR(fg)) 1175 return fg; 1176 1177 err = root->cmds->create_flow_group(root, ft, fg_in, fg); 1178 if (err) { 1179 tree_put_node(&fg->node, false); 1180 return ERR_PTR(err); 1181 } 1182 trace_mlx5_fs_add_fg(fg); 1183 fg->node.active = true; 1184 1185 return fg; 1186 } 1187 1188 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) 1189 { 1190 struct mlx5_flow_rule *rule; 1191 1192 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1193 if (!rule) 1194 return NULL; 1195 1196 INIT_LIST_HEAD(&rule->next_ft); 1197 rule->node.type = FS_TYPE_FLOW_DEST; 1198 if (dest) 1199 memcpy(&rule->dest_attr, dest, sizeof(*dest)); 1200 1201 return rule; 1202 } 1203 1204 static struct mlx5_flow_handle *alloc_handle(int num_rules) 1205 { 1206 struct mlx5_flow_handle *handle; 1207 1208 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL); 1209 if (!handle) 1210 return NULL; 1211 1212 handle->num_rules = num_rules; 1213 1214 return handle; 1215 } 1216 1217 static void destroy_flow_handle(struct fs_fte *fte, 1218 struct mlx5_flow_handle *handle, 1219 struct mlx5_flow_destination *dest, 1220 int i) 1221 { 1222 for (; --i >= 0;) { 1223 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) { 1224 fte->dests_size--; 1225 list_del(&handle->rule[i]->node.list); 1226 kfree(handle->rule[i]); 1227 } 1228 } 1229 kfree(handle); 1230 } 1231 1232 static struct mlx5_flow_handle * 1233 create_flow_handle(struct fs_fte *fte, 1234 struct mlx5_flow_destination *dest, 1235 int dest_num, 1236 int *modify_mask, 1237 bool *new_rule) 1238 { 1239 struct mlx5_flow_handle *handle; 1240 struct mlx5_flow_rule *rule = NULL; 1241 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); 1242 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); 1243 int type; 1244 int i = 0; 1245 1246 handle = alloc_handle((dest_num) ? dest_num : 1); 1247 if (!handle) 1248 return ERR_PTR(-ENOMEM); 1249 1250 do { 1251 if (dest) { 1252 rule = find_flow_rule(fte, dest + i); 1253 if (rule) { 1254 refcount_inc(&rule->node.refcount); 1255 goto rule_found; 1256 } 1257 } 1258 1259 *new_rule = true; 1260 rule = alloc_rule(dest + i); 1261 if (!rule) 1262 goto free_rules; 1263 1264 /* Add dest to dests list- we need flow tables to be in the 1265 * end of the list for forward to next prio rules. 1266 */ 1267 tree_init_node(&rule->node, NULL, del_sw_hw_rule); 1268 if (dest && 1269 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) 1270 list_add(&rule->node.list, &fte->node.children); 1271 else 1272 list_add_tail(&rule->node.list, &fte->node.children); 1273 if (dest) { 1274 fte->dests_size++; 1275 1276 type = dest[i].type == 1277 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1278 *modify_mask |= type ? count : dst; 1279 } 1280 rule_found: 1281 handle->rule[i] = rule; 1282 } while (++i < dest_num); 1283 1284 return handle; 1285 1286 free_rules: 1287 destroy_flow_handle(fte, handle, dest, i); 1288 return ERR_PTR(-ENOMEM); 1289 } 1290 1291 /* fte should not be deleted while calling this function */ 1292 static struct mlx5_flow_handle * 1293 add_rule_fte(struct fs_fte *fte, 1294 struct mlx5_flow_group *fg, 1295 struct mlx5_flow_destination *dest, 1296 int dest_num, 1297 bool update_action) 1298 { 1299 struct mlx5_flow_root_namespace *root; 1300 struct mlx5_flow_handle *handle; 1301 struct mlx5_flow_table *ft; 1302 int modify_mask = 0; 1303 int err; 1304 bool new_rule = false; 1305 1306 handle = create_flow_handle(fte, dest, dest_num, &modify_mask, 1307 &new_rule); 1308 if (IS_ERR(handle) || !new_rule) 1309 goto out; 1310 1311 if (update_action) 1312 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); 1313 1314 fs_get_obj(ft, fg->node.parent); 1315 root = find_root(&fg->node); 1316 if (!(fte->status & FS_FTE_STATUS_EXISTING)) 1317 err = root->cmds->create_fte(root, ft, fg, fte); 1318 else 1319 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte); 1320 if (err) 1321 goto free_handle; 1322 1323 fte->node.active = true; 1324 fte->status |= FS_FTE_STATUS_EXISTING; 1325 atomic_inc(&fte->node.version); 1326 1327 out: 1328 return handle; 1329 1330 free_handle: 1331 destroy_flow_handle(fte, handle, dest, handle->num_rules); 1332 return ERR_PTR(err); 1333 } 1334 1335 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft, 1336 const struct mlx5_flow_spec *spec) 1337 { 1338 struct list_head *prev = &ft->node.children; 1339 u32 max_fte = ft->autogroup.max_fte; 1340 unsigned int candidate_index = 0; 1341 unsigned int group_size = 0; 1342 struct mlx5_flow_group *fg; 1343 1344 if (!ft->autogroup.active) 1345 return ERR_PTR(-ENOENT); 1346 1347 if (ft->autogroup.num_groups < ft->autogroup.required_groups) 1348 group_size = ft->autogroup.group_size; 1349 1350 /* max_fte == ft->autogroup.max_types */ 1351 if (group_size == 0) 1352 group_size = 1; 1353 1354 /* sorted by start_index */ 1355 fs_for_each_fg(fg, ft) { 1356 if (candidate_index + group_size > fg->start_index) 1357 candidate_index = fg->start_index + fg->max_ftes; 1358 else 1359 break; 1360 prev = &fg->node.list; 1361 } 1362 1363 if (candidate_index + group_size > max_fte) 1364 return ERR_PTR(-ENOSPC); 1365 1366 fg = alloc_insert_flow_group(ft, 1367 spec->match_criteria_enable, 1368 spec->match_criteria, 1369 candidate_index, 1370 candidate_index + group_size - 1, 1371 prev); 1372 if (IS_ERR(fg)) 1373 goto out; 1374 1375 if (group_size == ft->autogroup.group_size) 1376 ft->autogroup.num_groups++; 1377 1378 out: 1379 return fg; 1380 } 1381 1382 static int create_auto_flow_group(struct mlx5_flow_table *ft, 1383 struct mlx5_flow_group *fg) 1384 { 1385 struct mlx5_flow_root_namespace *root = find_root(&ft->node); 1386 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1387 void *match_criteria_addr; 1388 u8 src_esw_owner_mask_on; 1389 void *misc; 1390 int err; 1391 u32 *in; 1392 1393 in = kvzalloc(inlen, GFP_KERNEL); 1394 if (!in) 1395 return -ENOMEM; 1396 1397 MLX5_SET(create_flow_group_in, in, match_criteria_enable, 1398 fg->mask.match_criteria_enable); 1399 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index); 1400 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index + 1401 fg->max_ftes - 1); 1402 1403 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria, 1404 misc_parameters); 1405 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc, 1406 source_eswitch_owner_vhca_id); 1407 MLX5_SET(create_flow_group_in, in, 1408 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on); 1409 1410 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, 1411 in, match_criteria); 1412 memcpy(match_criteria_addr, fg->mask.match_criteria, 1413 sizeof(fg->mask.match_criteria)); 1414 1415 err = root->cmds->create_flow_group(root, ft, in, fg); 1416 if (!err) { 1417 fg->node.active = true; 1418 trace_mlx5_fs_add_fg(fg); 1419 } 1420 1421 kvfree(in); 1422 return err; 1423 } 1424 1425 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, 1426 struct mlx5_flow_destination *d2) 1427 { 1428 if (d1->type == d2->type) { 1429 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && 1430 d1->vport.num == d2->vport.num && 1431 d1->vport.flags == d2->vport.flags && 1432 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? 1433 (d1->vport.vhca_id == d2->vport.vhca_id) : true) && 1434 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? 1435 (d1->vport.pkt_reformat->id == 1436 d2->vport.pkt_reformat->id) : true)) || 1437 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 1438 d1->ft == d2->ft) || 1439 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && 1440 d1->tir_num == d2->tir_num) || 1441 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM && 1442 d1->ft_num == d2->ft_num)) 1443 return true; 1444 } 1445 1446 return false; 1447 } 1448 1449 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, 1450 struct mlx5_flow_destination *dest) 1451 { 1452 struct mlx5_flow_rule *rule; 1453 1454 list_for_each_entry(rule, &fte->node.children, node.list) { 1455 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest)) 1456 return rule; 1457 } 1458 return NULL; 1459 } 1460 1461 static bool check_conflicting_actions(u32 action1, u32 action2) 1462 { 1463 u32 xored_actions = action1 ^ action2; 1464 1465 /* if one rule only wants to count, it's ok */ 1466 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || 1467 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT) 1468 return false; 1469 1470 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | 1471 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | 1472 MLX5_FLOW_CONTEXT_ACTION_DECAP | 1473 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 1474 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | 1475 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 1476 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 | 1477 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) 1478 return true; 1479 1480 return false; 1481 } 1482 1483 static int check_conflicting_ftes(struct fs_fte *fte, 1484 const struct mlx5_flow_context *flow_context, 1485 const struct mlx5_flow_act *flow_act) 1486 { 1487 if (check_conflicting_actions(flow_act->action, fte->action.action)) { 1488 mlx5_core_warn(get_dev(&fte->node), 1489 "Found two FTEs with conflicting actions\n"); 1490 return -EEXIST; 1491 } 1492 1493 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) && 1494 fte->flow_context.flow_tag != flow_context->flow_tag) { 1495 mlx5_core_warn(get_dev(&fte->node), 1496 "FTE flow tag %u already exists with different flow tag %u\n", 1497 fte->flow_context.flow_tag, 1498 flow_context->flow_tag); 1499 return -EEXIST; 1500 } 1501 1502 return 0; 1503 } 1504 1505 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, 1506 const struct mlx5_flow_spec *spec, 1507 struct mlx5_flow_act *flow_act, 1508 struct mlx5_flow_destination *dest, 1509 int dest_num, 1510 struct fs_fte *fte) 1511 { 1512 struct mlx5_flow_handle *handle; 1513 int old_action; 1514 int i; 1515 int ret; 1516 1517 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act); 1518 if (ret) 1519 return ERR_PTR(ret); 1520 1521 old_action = fte->action.action; 1522 fte->action.action |= flow_act->action; 1523 handle = add_rule_fte(fte, fg, dest, dest_num, 1524 old_action != flow_act->action); 1525 if (IS_ERR(handle)) { 1526 fte->action.action = old_action; 1527 return handle; 1528 } 1529 trace_mlx5_fs_set_fte(fte, false); 1530 1531 for (i = 0; i < handle->num_rules; i++) { 1532 if (refcount_read(&handle->rule[i]->node.refcount) == 1) { 1533 tree_add_node(&handle->rule[i]->node, &fte->node); 1534 trace_mlx5_fs_add_rule(handle->rule[i]); 1535 } 1536 } 1537 return handle; 1538 } 1539 1540 static bool counter_is_valid(u32 action) 1541 { 1542 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP | 1543 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)); 1544 } 1545 1546 static bool dest_is_valid(struct mlx5_flow_destination *dest, 1547 struct mlx5_flow_act *flow_act, 1548 struct mlx5_flow_table *ft) 1549 { 1550 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL; 1551 u32 action = flow_act->action; 1552 1553 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) 1554 return counter_is_valid(action); 1555 1556 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) 1557 return true; 1558 1559 if (ignore_level) { 1560 if (ft->type != FS_FT_FDB) 1561 return false; 1562 1563 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 1564 dest->ft->type != FS_FT_FDB) 1565 return false; 1566 } 1567 1568 if (!dest || ((dest->type == 1569 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && 1570 (dest->ft->level <= ft->level && !ignore_level))) 1571 return false; 1572 return true; 1573 } 1574 1575 struct match_list { 1576 struct list_head list; 1577 struct mlx5_flow_group *g; 1578 }; 1579 1580 struct match_list_head { 1581 struct list_head list; 1582 struct match_list first; 1583 }; 1584 1585 static void free_match_list(struct match_list_head *head, bool ft_locked) 1586 { 1587 if (!list_empty(&head->list)) { 1588 struct match_list *iter, *match_tmp; 1589 1590 list_del(&head->first.list); 1591 tree_put_node(&head->first.g->node, ft_locked); 1592 list_for_each_entry_safe(iter, match_tmp, &head->list, 1593 list) { 1594 tree_put_node(&iter->g->node, ft_locked); 1595 list_del(&iter->list); 1596 kfree(iter); 1597 } 1598 } 1599 } 1600 1601 static int build_match_list(struct match_list_head *match_head, 1602 struct mlx5_flow_table *ft, 1603 const struct mlx5_flow_spec *spec, 1604 bool ft_locked) 1605 { 1606 struct rhlist_head *tmp, *list; 1607 struct mlx5_flow_group *g; 1608 int err = 0; 1609 1610 rcu_read_lock(); 1611 INIT_LIST_HEAD(&match_head->list); 1612 /* Collect all fgs which has a matching match_criteria */ 1613 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg); 1614 /* RCU is atomic, we can't execute FW commands here */ 1615 rhl_for_each_entry_rcu(g, tmp, list, hash) { 1616 struct match_list *curr_match; 1617 1618 if (likely(list_empty(&match_head->list))) { 1619 if (!tree_get_node(&g->node)) 1620 continue; 1621 match_head->first.g = g; 1622 list_add_tail(&match_head->first.list, 1623 &match_head->list); 1624 continue; 1625 } 1626 1627 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); 1628 if (!curr_match) { 1629 free_match_list(match_head, ft_locked); 1630 err = -ENOMEM; 1631 goto out; 1632 } 1633 if (!tree_get_node(&g->node)) { 1634 kfree(curr_match); 1635 continue; 1636 } 1637 curr_match->g = g; 1638 list_add_tail(&curr_match->list, &match_head->list); 1639 } 1640 out: 1641 rcu_read_unlock(); 1642 return err; 1643 } 1644 1645 static u64 matched_fgs_get_version(struct list_head *match_head) 1646 { 1647 struct match_list *iter; 1648 u64 version = 0; 1649 1650 list_for_each_entry(iter, match_head, list) 1651 version += (u64)atomic_read(&iter->g->node.version); 1652 return version; 1653 } 1654 1655 static struct fs_fte * 1656 lookup_fte_locked(struct mlx5_flow_group *g, 1657 const u32 *match_value, 1658 bool take_write) 1659 { 1660 struct fs_fte *fte_tmp; 1661 1662 if (take_write) 1663 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1664 else 1665 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); 1666 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, 1667 rhash_fte); 1668 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { 1669 fte_tmp = NULL; 1670 goto out; 1671 } 1672 if (!fte_tmp->node.active) { 1673 tree_put_node(&fte_tmp->node, false); 1674 fte_tmp = NULL; 1675 goto out; 1676 } 1677 1678 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1679 out: 1680 if (take_write) 1681 up_write_ref_node(&g->node, false); 1682 else 1683 up_read_ref_node(&g->node); 1684 return fte_tmp; 1685 } 1686 1687 static struct mlx5_flow_handle * 1688 try_add_to_existing_fg(struct mlx5_flow_table *ft, 1689 struct list_head *match_head, 1690 const struct mlx5_flow_spec *spec, 1691 struct mlx5_flow_act *flow_act, 1692 struct mlx5_flow_destination *dest, 1693 int dest_num, 1694 int ft_version) 1695 { 1696 struct mlx5_flow_steering *steering = get_steering(&ft->node); 1697 struct mlx5_flow_group *g; 1698 struct mlx5_flow_handle *rule; 1699 struct match_list *iter; 1700 bool take_write = false; 1701 struct fs_fte *fte; 1702 u64 version; 1703 int err; 1704 1705 fte = alloc_fte(ft, spec, flow_act); 1706 if (IS_ERR(fte)) 1707 return ERR_PTR(-ENOMEM); 1708 1709 search_again_locked: 1710 version = matched_fgs_get_version(match_head); 1711 if (flow_act->flags & FLOW_ACT_NO_APPEND) 1712 goto skip_search; 1713 /* Try to find a fg that already contains a matching fte */ 1714 list_for_each_entry(iter, match_head, list) { 1715 struct fs_fte *fte_tmp; 1716 1717 g = iter->g; 1718 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); 1719 if (!fte_tmp) 1720 continue; 1721 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); 1722 up_write_ref_node(&fte_tmp->node, false); 1723 tree_put_node(&fte_tmp->node, false); 1724 kmem_cache_free(steering->ftes_cache, fte); 1725 return rule; 1726 } 1727 1728 skip_search: 1729 /* No group with matching fte found, or we skipped the search. 1730 * Try to add a new fte to any matching fg. 1731 */ 1732 1733 /* Check the ft version, for case that new flow group 1734 * was added while the fgs weren't locked 1735 */ 1736 if (atomic_read(&ft->node.version) != ft_version) { 1737 rule = ERR_PTR(-EAGAIN); 1738 goto out; 1739 } 1740 1741 /* Check the fgs version, for case the new FTE with the 1742 * same values was added while the fgs weren't locked 1743 */ 1744 if (version != matched_fgs_get_version(match_head)) { 1745 take_write = true; 1746 goto search_again_locked; 1747 } 1748 1749 list_for_each_entry(iter, match_head, list) { 1750 g = iter->g; 1751 1752 if (!g->node.active) 1753 continue; 1754 1755 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1756 1757 err = insert_fte(g, fte); 1758 if (err) { 1759 up_write_ref_node(&g->node, false); 1760 if (err == -ENOSPC) 1761 continue; 1762 kmem_cache_free(steering->ftes_cache, fte); 1763 return ERR_PTR(err); 1764 } 1765 1766 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); 1767 up_write_ref_node(&g->node, false); 1768 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); 1769 up_write_ref_node(&fte->node, false); 1770 tree_put_node(&fte->node, false); 1771 return rule; 1772 } 1773 rule = ERR_PTR(-ENOENT); 1774 out: 1775 kmem_cache_free(steering->ftes_cache, fte); 1776 return rule; 1777 } 1778 1779 static struct mlx5_flow_handle * 1780 _mlx5_add_flow_rules(struct mlx5_flow_table *ft, 1781 const struct mlx5_flow_spec *spec, 1782 struct mlx5_flow_act *flow_act, 1783 struct mlx5_flow_destination *dest, 1784 int dest_num) 1785 1786 { 1787 struct mlx5_flow_steering *steering = get_steering(&ft->node); 1788 struct mlx5_flow_group *g; 1789 struct mlx5_flow_handle *rule; 1790 struct match_list_head match_head; 1791 bool take_write = false; 1792 struct fs_fte *fte; 1793 int version; 1794 int err; 1795 int i; 1796 1797 if (!check_valid_spec(spec)) 1798 return ERR_PTR(-EINVAL); 1799 1800 for (i = 0; i < dest_num; i++) { 1801 if (!dest_is_valid(&dest[i], flow_act, ft)) 1802 return ERR_PTR(-EINVAL); 1803 } 1804 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT); 1805 search_again_locked: 1806 version = atomic_read(&ft->node.version); 1807 1808 /* Collect all fgs which has a matching match_criteria */ 1809 err = build_match_list(&match_head, ft, spec, take_write); 1810 if (err) { 1811 if (take_write) 1812 up_write_ref_node(&ft->node, false); 1813 else 1814 up_read_ref_node(&ft->node); 1815 return ERR_PTR(err); 1816 } 1817 1818 if (!take_write) 1819 up_read_ref_node(&ft->node); 1820 1821 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, 1822 dest_num, version); 1823 free_match_list(&match_head, take_write); 1824 if (!IS_ERR(rule) || 1825 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { 1826 if (take_write) 1827 up_write_ref_node(&ft->node, false); 1828 return rule; 1829 } 1830 1831 if (!take_write) { 1832 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); 1833 take_write = true; 1834 } 1835 1836 if (PTR_ERR(rule) == -EAGAIN || 1837 version != atomic_read(&ft->node.version)) 1838 goto search_again_locked; 1839 1840 g = alloc_auto_flow_group(ft, spec); 1841 if (IS_ERR(g)) { 1842 rule = ERR_CAST(g); 1843 up_write_ref_node(&ft->node, false); 1844 return rule; 1845 } 1846 1847 fte = alloc_fte(ft, spec, flow_act); 1848 if (IS_ERR(fte)) { 1849 up_write_ref_node(&ft->node, false); 1850 err = PTR_ERR(fte); 1851 goto err_alloc_fte; 1852 } 1853 1854 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1855 up_write_ref_node(&ft->node, false); 1856 1857 err = create_auto_flow_group(ft, g); 1858 if (err) 1859 goto err_release_fg; 1860 1861 err = insert_fte(g, fte); 1862 if (err) 1863 goto err_release_fg; 1864 1865 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); 1866 up_write_ref_node(&g->node, false); 1867 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); 1868 up_write_ref_node(&fte->node, false); 1869 tree_put_node(&fte->node, false); 1870 tree_put_node(&g->node, false); 1871 return rule; 1872 1873 err_release_fg: 1874 up_write_ref_node(&g->node, false); 1875 kmem_cache_free(steering->ftes_cache, fte); 1876 err_alloc_fte: 1877 tree_put_node(&g->node, false); 1878 return ERR_PTR(err); 1879 } 1880 1881 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) 1882 { 1883 return ((ft->type == FS_FT_NIC_RX) && 1884 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs))); 1885 } 1886 1887 struct mlx5_flow_handle * 1888 mlx5_add_flow_rules(struct mlx5_flow_table *ft, 1889 const struct mlx5_flow_spec *spec, 1890 struct mlx5_flow_act *flow_act, 1891 struct mlx5_flow_destination *dest, 1892 int num_dest) 1893 { 1894 struct mlx5_flow_root_namespace *root = find_root(&ft->node); 1895 struct mlx5_flow_destination gen_dest = {}; 1896 struct mlx5_flow_table *next_ft = NULL; 1897 struct mlx5_flow_handle *handle = NULL; 1898 u32 sw_action = flow_act->action; 1899 struct fs_prio *prio; 1900 1901 fs_get_obj(prio, ft->node.parent); 1902 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1903 if (!fwd_next_prio_supported(ft)) 1904 return ERR_PTR(-EOPNOTSUPP); 1905 if (num_dest) 1906 return ERR_PTR(-EINVAL); 1907 mutex_lock(&root->chain_lock); 1908 next_ft = find_next_chained_ft(prio); 1909 if (next_ft) { 1910 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1911 gen_dest.ft = next_ft; 1912 dest = &gen_dest; 1913 num_dest = 1; 1914 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1915 } else { 1916 mutex_unlock(&root->chain_lock); 1917 return ERR_PTR(-EOPNOTSUPP); 1918 } 1919 } 1920 1921 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest); 1922 1923 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1924 if (!IS_ERR_OR_NULL(handle) && 1925 (list_empty(&handle->rule[0]->next_ft))) { 1926 mutex_lock(&next_ft->lock); 1927 list_add(&handle->rule[0]->next_ft, 1928 &next_ft->fwd_rules); 1929 mutex_unlock(&next_ft->lock); 1930 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1931 } 1932 mutex_unlock(&root->chain_lock); 1933 } 1934 return handle; 1935 } 1936 EXPORT_SYMBOL(mlx5_add_flow_rules); 1937 1938 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) 1939 { 1940 struct fs_fte *fte; 1941 int i; 1942 1943 /* In order to consolidate the HW changes we lock the FTE for other 1944 * changes, and increase its refcount, in order not to perform the 1945 * "del" functions of the FTE. Will handle them here. 1946 * The removal of the rules is done under locked FTE. 1947 * After removing all the handle's rules, if there are remaining 1948 * rules, it means we just need to modify the FTE in FW, and 1949 * unlock/decrease the refcount we increased before. 1950 * Otherwise, it means the FTE should be deleted. First delete the 1951 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of 1952 * the FTE, which will handle the last decrease of the refcount, as 1953 * well as required handling of its parent. 1954 */ 1955 fs_get_obj(fte, handle->rule[0]->node.parent); 1956 down_write_ref_node(&fte->node, false); 1957 for (i = handle->num_rules - 1; i >= 0; i--) 1958 tree_remove_node(&handle->rule[i]->node, true); 1959 if (fte->modify_mask && fte->dests_size) { 1960 modify_fte(fte); 1961 up_write_ref_node(&fte->node, false); 1962 } else { 1963 del_hw_fte(&fte->node); 1964 up_write(&fte->node.lock); 1965 tree_put_node(&fte->node, false); 1966 } 1967 kfree(handle); 1968 } 1969 EXPORT_SYMBOL(mlx5_del_flow_rules); 1970 1971 /* Assuming prio->node.children(flow tables) is sorted by level */ 1972 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) 1973 { 1974 struct fs_prio *prio; 1975 1976 fs_get_obj(prio, ft->node.parent); 1977 1978 if (!list_is_last(&ft->node.list, &prio->node.children)) 1979 return list_next_entry(ft, node.list); 1980 return find_next_chained_ft(prio); 1981 } 1982 1983 static int update_root_ft_destroy(struct mlx5_flow_table *ft) 1984 { 1985 struct mlx5_flow_root_namespace *root = find_root(&ft->node); 1986 struct mlx5_ft_underlay_qp *uqp; 1987 struct mlx5_flow_table *new_root_ft = NULL; 1988 int err = 0; 1989 u32 qpn; 1990 1991 if (root->root_ft != ft) 1992 return 0; 1993 1994 new_root_ft = find_next_ft(ft); 1995 if (!new_root_ft) { 1996 root->root_ft = NULL; 1997 return 0; 1998 } 1999 2000 if (list_empty(&root->underlay_qpns)) { 2001 /* Don't set any QPN (zero) in case QPN list is empty */ 2002 qpn = 0; 2003 err = root->cmds->update_root_ft(root, new_root_ft, 2004 qpn, false); 2005 } else { 2006 list_for_each_entry(uqp, &root->underlay_qpns, list) { 2007 qpn = uqp->qpn; 2008 err = root->cmds->update_root_ft(root, 2009 new_root_ft, qpn, 2010 false); 2011 if (err) 2012 break; 2013 } 2014 } 2015 2016 if (err) 2017 mlx5_core_warn(root->dev, 2018 "Update root flow table of id(%u) qpn(%d) failed\n", 2019 ft->id, qpn); 2020 else 2021 root->root_ft = new_root_ft; 2022 2023 return 0; 2024 } 2025 2026 /* Connect flow table from previous priority to 2027 * the next flow table. 2028 */ 2029 static int disconnect_flow_table(struct mlx5_flow_table *ft) 2030 { 2031 struct mlx5_core_dev *dev = get_dev(&ft->node); 2032 struct mlx5_flow_table *next_ft; 2033 struct fs_prio *prio; 2034 int err = 0; 2035 2036 err = update_root_ft_destroy(ft); 2037 if (err) 2038 return err; 2039 2040 fs_get_obj(prio, ft->node.parent); 2041 if (!(list_first_entry(&prio->node.children, 2042 struct mlx5_flow_table, 2043 node.list) == ft)) 2044 return 0; 2045 2046 next_ft = find_next_chained_ft(prio); 2047 err = connect_fwd_rules(dev, next_ft, ft); 2048 if (err) 2049 return err; 2050 2051 err = connect_prev_fts(dev, next_ft, prio); 2052 if (err) 2053 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n", 2054 ft->id); 2055 return err; 2056 } 2057 2058 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) 2059 { 2060 struct mlx5_flow_root_namespace *root = find_root(&ft->node); 2061 int err = 0; 2062 2063 mutex_lock(&root->chain_lock); 2064 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED)) 2065 err = disconnect_flow_table(ft); 2066 if (err) { 2067 mutex_unlock(&root->chain_lock); 2068 return err; 2069 } 2070 if (tree_remove_node(&ft->node, false)) 2071 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", 2072 ft->id); 2073 mutex_unlock(&root->chain_lock); 2074 2075 return err; 2076 } 2077 EXPORT_SYMBOL(mlx5_destroy_flow_table); 2078 2079 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) 2080 { 2081 if (tree_remove_node(&fg->node, false)) 2082 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", 2083 fg->id); 2084 } 2085 2086 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, 2087 int n) 2088 { 2089 struct mlx5_flow_steering *steering = dev->priv.steering; 2090 2091 if (!steering || !steering->fdb_sub_ns) 2092 return NULL; 2093 2094 return steering->fdb_sub_ns[n]; 2095 } 2096 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns); 2097 2098 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 2099 enum mlx5_flow_namespace_type type) 2100 { 2101 struct mlx5_flow_steering *steering = dev->priv.steering; 2102 struct mlx5_flow_root_namespace *root_ns; 2103 int prio = 0; 2104 struct fs_prio *fs_prio; 2105 struct mlx5_flow_namespace *ns; 2106 2107 if (!steering) 2108 return NULL; 2109 2110 switch (type) { 2111 case MLX5_FLOW_NAMESPACE_FDB: 2112 if (steering->fdb_root_ns) 2113 return &steering->fdb_root_ns->ns; 2114 return NULL; 2115 case MLX5_FLOW_NAMESPACE_SNIFFER_RX: 2116 if (steering->sniffer_rx_root_ns) 2117 return &steering->sniffer_rx_root_ns->ns; 2118 return NULL; 2119 case MLX5_FLOW_NAMESPACE_SNIFFER_TX: 2120 if (steering->sniffer_tx_root_ns) 2121 return &steering->sniffer_tx_root_ns->ns; 2122 return NULL; 2123 default: 2124 break; 2125 } 2126 2127 if (type == MLX5_FLOW_NAMESPACE_EGRESS) { 2128 root_ns = steering->egress_root_ns; 2129 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { 2130 root_ns = steering->rdma_rx_root_ns; 2131 prio = RDMA_RX_BYPASS_PRIO; 2132 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { 2133 root_ns = steering->rdma_rx_root_ns; 2134 prio = RDMA_RX_KERNEL_PRIO; 2135 } else { /* Must be NIC RX */ 2136 root_ns = steering->root_ns; 2137 prio = type; 2138 } 2139 2140 if (!root_ns) 2141 return NULL; 2142 2143 fs_prio = find_prio(&root_ns->ns, prio); 2144 if (!fs_prio) 2145 return NULL; 2146 2147 ns = list_first_entry(&fs_prio->node.children, 2148 typeof(*ns), 2149 node.list); 2150 2151 return ns; 2152 } 2153 EXPORT_SYMBOL(mlx5_get_flow_namespace); 2154 2155 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, 2156 enum mlx5_flow_namespace_type type, 2157 int vport) 2158 { 2159 struct mlx5_flow_steering *steering = dev->priv.steering; 2160 2161 if (!steering || vport >= mlx5_eswitch_get_total_vports(dev)) 2162 return NULL; 2163 2164 switch (type) { 2165 case MLX5_FLOW_NAMESPACE_ESW_EGRESS: 2166 if (steering->esw_egress_root_ns && 2167 steering->esw_egress_root_ns[vport]) 2168 return &steering->esw_egress_root_ns[vport]->ns; 2169 else 2170 return NULL; 2171 case MLX5_FLOW_NAMESPACE_ESW_INGRESS: 2172 if (steering->esw_ingress_root_ns && 2173 steering->esw_ingress_root_ns[vport]) 2174 return &steering->esw_ingress_root_ns[vport]->ns; 2175 else 2176 return NULL; 2177 default: 2178 return NULL; 2179 } 2180 } 2181 2182 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns, 2183 unsigned int prio, 2184 int num_levels, 2185 enum fs_node_type type) 2186 { 2187 struct fs_prio *fs_prio; 2188 2189 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL); 2190 if (!fs_prio) 2191 return ERR_PTR(-ENOMEM); 2192 2193 fs_prio->node.type = type; 2194 tree_init_node(&fs_prio->node, NULL, del_sw_prio); 2195 tree_add_node(&fs_prio->node, &ns->node); 2196 fs_prio->num_levels = num_levels; 2197 fs_prio->prio = prio; 2198 list_add_tail(&fs_prio->node.list, &ns->node.children); 2199 2200 return fs_prio; 2201 } 2202 2203 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns, 2204 unsigned int prio, 2205 int num_levels) 2206 { 2207 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS); 2208 } 2209 2210 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, 2211 unsigned int prio, int num_levels) 2212 { 2213 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO); 2214 } 2215 2216 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace 2217 *ns) 2218 { 2219 ns->node.type = FS_TYPE_NAMESPACE; 2220 2221 return ns; 2222 } 2223 2224 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, 2225 int def_miss_act) 2226 { 2227 struct mlx5_flow_namespace *ns; 2228 2229 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 2230 if (!ns) 2231 return ERR_PTR(-ENOMEM); 2232 2233 fs_init_namespace(ns); 2234 ns->def_miss_action = def_miss_act; 2235 tree_init_node(&ns->node, NULL, del_sw_ns); 2236 tree_add_node(&ns->node, &prio->node); 2237 list_add_tail(&ns->node.list, &prio->node.children); 2238 2239 return ns; 2240 } 2241 2242 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, 2243 struct init_tree_node *prio_metadata) 2244 { 2245 struct fs_prio *fs_prio; 2246 int i; 2247 2248 for (i = 0; i < prio_metadata->num_leaf_prios; i++) { 2249 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels); 2250 if (IS_ERR(fs_prio)) 2251 return PTR_ERR(fs_prio); 2252 } 2253 return 0; 2254 } 2255 2256 #define FLOW_TABLE_BIT_SZ 1 2257 #define GET_FLOW_TABLE_CAP(dev, offset) \ 2258 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ 2259 offset / 32)) >> \ 2260 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) 2261 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) 2262 { 2263 int i; 2264 2265 for (i = 0; i < caps->arr_sz; i++) { 2266 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) 2267 return false; 2268 } 2269 return true; 2270 } 2271 2272 static int init_root_tree_recursive(struct mlx5_flow_steering *steering, 2273 struct init_tree_node *init_node, 2274 struct fs_node *fs_parent_node, 2275 struct init_tree_node *init_parent_node, 2276 int prio) 2277 { 2278 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev, 2279 flow_table_properties_nic_receive. 2280 max_ft_level); 2281 struct mlx5_flow_namespace *fs_ns; 2282 struct fs_prio *fs_prio; 2283 struct fs_node *base; 2284 int i; 2285 int err; 2286 2287 if (init_node->type == FS_TYPE_PRIO) { 2288 if ((init_node->min_ft_level > max_ft_level) || 2289 !has_required_caps(steering->dev, &init_node->caps)) 2290 return 0; 2291 2292 fs_get_obj(fs_ns, fs_parent_node); 2293 if (init_node->num_leaf_prios) 2294 return create_leaf_prios(fs_ns, prio, init_node); 2295 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels); 2296 if (IS_ERR(fs_prio)) 2297 return PTR_ERR(fs_prio); 2298 base = &fs_prio->node; 2299 } else if (init_node->type == FS_TYPE_NAMESPACE) { 2300 fs_get_obj(fs_prio, fs_parent_node); 2301 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action); 2302 if (IS_ERR(fs_ns)) 2303 return PTR_ERR(fs_ns); 2304 base = &fs_ns->node; 2305 } else { 2306 return -EINVAL; 2307 } 2308 prio = 0; 2309 for (i = 0; i < init_node->ar_size; i++) { 2310 err = init_root_tree_recursive(steering, &init_node->children[i], 2311 base, init_node, prio); 2312 if (err) 2313 return err; 2314 if (init_node->children[i].type == FS_TYPE_PRIO && 2315 init_node->children[i].num_leaf_prios) { 2316 prio += init_node->children[i].num_leaf_prios; 2317 } 2318 } 2319 2320 return 0; 2321 } 2322 2323 static int init_root_tree(struct mlx5_flow_steering *steering, 2324 struct init_tree_node *init_node, 2325 struct fs_node *fs_parent_node) 2326 { 2327 int i; 2328 struct mlx5_flow_namespace *fs_ns; 2329 int err; 2330 2331 fs_get_obj(fs_ns, fs_parent_node); 2332 for (i = 0; i < init_node->ar_size; i++) { 2333 err = init_root_tree_recursive(steering, &init_node->children[i], 2334 &fs_ns->node, 2335 init_node, i); 2336 if (err) 2337 return err; 2338 } 2339 return 0; 2340 } 2341 2342 static struct mlx5_flow_root_namespace 2343 *create_root_ns(struct mlx5_flow_steering *steering, 2344 enum fs_flow_table_type table_type) 2345 { 2346 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type); 2347 struct mlx5_flow_root_namespace *root_ns; 2348 struct mlx5_flow_namespace *ns; 2349 2350 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && 2351 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) 2352 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); 2353 2354 /* Create the root namespace */ 2355 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); 2356 if (!root_ns) 2357 return NULL; 2358 2359 root_ns->dev = steering->dev; 2360 root_ns->table_type = table_type; 2361 root_ns->cmds = cmds; 2362 2363 INIT_LIST_HEAD(&root_ns->underlay_qpns); 2364 2365 ns = &root_ns->ns; 2366 fs_init_namespace(ns); 2367 mutex_init(&root_ns->chain_lock); 2368 tree_init_node(&ns->node, NULL, NULL); 2369 tree_add_node(&ns->node, NULL); 2370 2371 return root_ns; 2372 } 2373 2374 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level); 2375 2376 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) 2377 { 2378 struct fs_prio *prio; 2379 2380 fs_for_each_prio(prio, ns) { 2381 /* This updates prio start_level and num_levels */ 2382 set_prio_attrs_in_prio(prio, acc_level); 2383 acc_level += prio->num_levels; 2384 } 2385 return acc_level; 2386 } 2387 2388 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) 2389 { 2390 struct mlx5_flow_namespace *ns; 2391 int acc_level_ns = acc_level; 2392 2393 prio->start_level = acc_level; 2394 fs_for_each_ns(ns, prio) { 2395 /* This updates start_level and num_levels of ns's priority descendants */ 2396 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); 2397 2398 /* If this a prio with chains, and we can jump from one chain 2399 * (namepsace) to another, so we accumulate the levels 2400 */ 2401 if (prio->node.type == FS_TYPE_PRIO_CHAINS) 2402 acc_level = acc_level_ns; 2403 } 2404 2405 if (!prio->num_levels) 2406 prio->num_levels = acc_level_ns - prio->start_level; 2407 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); 2408 } 2409 2410 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) 2411 { 2412 struct mlx5_flow_namespace *ns = &root_ns->ns; 2413 struct fs_prio *prio; 2414 int start_level = 0; 2415 2416 fs_for_each_prio(prio, ns) { 2417 set_prio_attrs_in_prio(prio, start_level); 2418 start_level += prio->num_levels; 2419 } 2420 } 2421 2422 #define ANCHOR_PRIO 0 2423 #define ANCHOR_SIZE 1 2424 #define ANCHOR_LEVEL 0 2425 static int create_anchor_flow_table(struct mlx5_flow_steering *steering) 2426 { 2427 struct mlx5_flow_namespace *ns = NULL; 2428 struct mlx5_flow_table_attr ft_attr = {}; 2429 struct mlx5_flow_table *ft; 2430 2431 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 2432 if (WARN_ON(!ns)) 2433 return -EINVAL; 2434 2435 ft_attr.max_fte = ANCHOR_SIZE; 2436 ft_attr.level = ANCHOR_LEVEL; 2437 ft_attr.prio = ANCHOR_PRIO; 2438 2439 ft = mlx5_create_flow_table(ns, &ft_attr); 2440 if (IS_ERR(ft)) { 2441 mlx5_core_err(steering->dev, "Failed to create last anchor flow table"); 2442 return PTR_ERR(ft); 2443 } 2444 return 0; 2445 } 2446 2447 static int init_root_ns(struct mlx5_flow_steering *steering) 2448 { 2449 int err; 2450 2451 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 2452 if (!steering->root_ns) 2453 return -ENOMEM; 2454 2455 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); 2456 if (err) 2457 goto out_err; 2458 2459 set_prio_attrs(steering->root_ns); 2460 err = create_anchor_flow_table(steering); 2461 if (err) 2462 goto out_err; 2463 2464 return 0; 2465 2466 out_err: 2467 cleanup_root_ns(steering->root_ns); 2468 steering->root_ns = NULL; 2469 return err; 2470 } 2471 2472 static void clean_tree(struct fs_node *node) 2473 { 2474 if (node) { 2475 struct fs_node *iter; 2476 struct fs_node *temp; 2477 2478 tree_get_node(node); 2479 list_for_each_entry_safe(iter, temp, &node->children, list) 2480 clean_tree(iter); 2481 tree_put_node(node, false); 2482 tree_remove_node(node, false); 2483 } 2484 } 2485 2486 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) 2487 { 2488 if (!root_ns) 2489 return; 2490 2491 clean_tree(&root_ns->ns.node); 2492 } 2493 2494 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) 2495 { 2496 struct mlx5_flow_steering *steering = dev->priv.steering; 2497 int i; 2498 2499 if (!steering->esw_egress_root_ns) 2500 return; 2501 2502 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) 2503 cleanup_root_ns(steering->esw_egress_root_ns[i]); 2504 2505 kfree(steering->esw_egress_root_ns); 2506 steering->esw_egress_root_ns = NULL; 2507 } 2508 2509 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) 2510 { 2511 struct mlx5_flow_steering *steering = dev->priv.steering; 2512 int i; 2513 2514 if (!steering->esw_ingress_root_ns) 2515 return; 2516 2517 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) 2518 cleanup_root_ns(steering->esw_ingress_root_ns[i]); 2519 2520 kfree(steering->esw_ingress_root_ns); 2521 steering->esw_ingress_root_ns = NULL; 2522 } 2523 2524 void mlx5_cleanup_fs(struct mlx5_core_dev *dev) 2525 { 2526 struct mlx5_flow_steering *steering = dev->priv.steering; 2527 2528 cleanup_root_ns(steering->root_ns); 2529 cleanup_egress_acls_root_ns(dev); 2530 cleanup_ingress_acls_root_ns(dev); 2531 cleanup_root_ns(steering->fdb_root_ns); 2532 steering->fdb_root_ns = NULL; 2533 kfree(steering->fdb_sub_ns); 2534 steering->fdb_sub_ns = NULL; 2535 cleanup_root_ns(steering->sniffer_rx_root_ns); 2536 cleanup_root_ns(steering->sniffer_tx_root_ns); 2537 cleanup_root_ns(steering->rdma_rx_root_ns); 2538 cleanup_root_ns(steering->egress_root_ns); 2539 mlx5_cleanup_fc_stats(dev); 2540 kmem_cache_destroy(steering->ftes_cache); 2541 kmem_cache_destroy(steering->fgs_cache); 2542 kfree(steering); 2543 } 2544 2545 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) 2546 { 2547 struct fs_prio *prio; 2548 2549 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX); 2550 if (!steering->sniffer_tx_root_ns) 2551 return -ENOMEM; 2552 2553 /* Create single prio */ 2554 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1); 2555 return PTR_ERR_OR_ZERO(prio); 2556 } 2557 2558 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) 2559 { 2560 struct fs_prio *prio; 2561 2562 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX); 2563 if (!steering->sniffer_rx_root_ns) 2564 return -ENOMEM; 2565 2566 /* Create single prio */ 2567 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1); 2568 return PTR_ERR_OR_ZERO(prio); 2569 } 2570 2571 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) 2572 { 2573 int err; 2574 2575 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX); 2576 if (!steering->rdma_rx_root_ns) 2577 return -ENOMEM; 2578 2579 err = init_root_tree(steering, &rdma_rx_root_fs, 2580 &steering->rdma_rx_root_ns->ns.node); 2581 if (err) 2582 goto out_err; 2583 2584 set_prio_attrs(steering->rdma_rx_root_ns); 2585 2586 return 0; 2587 2588 out_err: 2589 cleanup_root_ns(steering->rdma_rx_root_ns); 2590 steering->rdma_rx_root_ns = NULL; 2591 return err; 2592 } 2593 2594 /* FT and tc chains are stored in the same array so we can re-use the 2595 * mlx5_get_fdb_sub_ns() and tc api for FT chains. 2596 * When creating a new ns for each chain store it in the first available slot. 2597 * Assume tc chains are created and stored first and only then the FT chain. 2598 */ 2599 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, 2600 struct mlx5_flow_namespace *ns) 2601 { 2602 int chain = 0; 2603 2604 while (steering->fdb_sub_ns[chain]) 2605 ++chain; 2606 2607 steering->fdb_sub_ns[chain] = ns; 2608 } 2609 2610 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, 2611 struct fs_prio *maj_prio) 2612 { 2613 struct mlx5_flow_namespace *ns; 2614 struct fs_prio *min_prio; 2615 int prio; 2616 2617 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); 2618 if (IS_ERR(ns)) 2619 return PTR_ERR(ns); 2620 2621 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) { 2622 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO); 2623 if (IS_ERR(min_prio)) 2624 return PTR_ERR(min_prio); 2625 } 2626 2627 store_fdb_sub_ns_prio_chain(steering, ns); 2628 2629 return 0; 2630 } 2631 2632 static int create_fdb_chains(struct mlx5_flow_steering *steering, 2633 int fs_prio, 2634 int chains) 2635 { 2636 struct fs_prio *maj_prio; 2637 int levels; 2638 int chain; 2639 int err; 2640 2641 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains; 2642 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 2643 fs_prio, 2644 levels); 2645 if (IS_ERR(maj_prio)) 2646 return PTR_ERR(maj_prio); 2647 2648 for (chain = 0; chain < chains; chain++) { 2649 err = create_fdb_sub_ns_prio_chain(steering, maj_prio); 2650 if (err) 2651 return err; 2652 } 2653 2654 return 0; 2655 } 2656 2657 static int create_fdb_fast_path(struct mlx5_flow_steering *steering) 2658 { 2659 int err; 2660 2661 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS, 2662 sizeof(*steering->fdb_sub_ns), 2663 GFP_KERNEL); 2664 if (!steering->fdb_sub_ns) 2665 return -ENOMEM; 2666 2667 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1); 2668 if (err) 2669 return err; 2670 2671 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1); 2672 if (err) 2673 return err; 2674 2675 return 0; 2676 } 2677 2678 static int init_fdb_root_ns(struct mlx5_flow_steering *steering) 2679 { 2680 struct fs_prio *maj_prio; 2681 int err; 2682 2683 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); 2684 if (!steering->fdb_root_ns) 2685 return -ENOMEM; 2686 2687 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 2688 1); 2689 if (IS_ERR(maj_prio)) { 2690 err = PTR_ERR(maj_prio); 2691 goto out_err; 2692 } 2693 err = create_fdb_fast_path(steering); 2694 if (err) 2695 goto out_err; 2696 2697 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1); 2698 if (IS_ERR(maj_prio)) { 2699 err = PTR_ERR(maj_prio); 2700 goto out_err; 2701 } 2702 2703 set_prio_attrs(steering->fdb_root_ns); 2704 return 0; 2705 2706 out_err: 2707 cleanup_root_ns(steering->fdb_root_ns); 2708 kfree(steering->fdb_sub_ns); 2709 steering->fdb_sub_ns = NULL; 2710 steering->fdb_root_ns = NULL; 2711 return err; 2712 } 2713 2714 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) 2715 { 2716 struct fs_prio *prio; 2717 2718 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); 2719 if (!steering->esw_egress_root_ns[vport]) 2720 return -ENOMEM; 2721 2722 /* create 1 prio*/ 2723 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1); 2724 return PTR_ERR_OR_ZERO(prio); 2725 } 2726 2727 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) 2728 { 2729 struct fs_prio *prio; 2730 2731 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); 2732 if (!steering->esw_ingress_root_ns[vport]) 2733 return -ENOMEM; 2734 2735 /* create 1 prio*/ 2736 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1); 2737 return PTR_ERR_OR_ZERO(prio); 2738 } 2739 2740 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) 2741 { 2742 struct mlx5_flow_steering *steering = dev->priv.steering; 2743 int total_vports = mlx5_eswitch_get_total_vports(dev); 2744 int err; 2745 int i; 2746 2747 steering->esw_egress_root_ns = 2748 kcalloc(total_vports, 2749 sizeof(*steering->esw_egress_root_ns), 2750 GFP_KERNEL); 2751 if (!steering->esw_egress_root_ns) 2752 return -ENOMEM; 2753 2754 for (i = 0; i < total_vports; i++) { 2755 err = init_egress_acl_root_ns(steering, i); 2756 if (err) 2757 goto cleanup_root_ns; 2758 } 2759 2760 return 0; 2761 2762 cleanup_root_ns: 2763 for (i--; i >= 0; i--) 2764 cleanup_root_ns(steering->esw_egress_root_ns[i]); 2765 kfree(steering->esw_egress_root_ns); 2766 steering->esw_egress_root_ns = NULL; 2767 return err; 2768 } 2769 2770 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) 2771 { 2772 struct mlx5_flow_steering *steering = dev->priv.steering; 2773 int total_vports = mlx5_eswitch_get_total_vports(dev); 2774 int err; 2775 int i; 2776 2777 steering->esw_ingress_root_ns = 2778 kcalloc(total_vports, 2779 sizeof(*steering->esw_ingress_root_ns), 2780 GFP_KERNEL); 2781 if (!steering->esw_ingress_root_ns) 2782 return -ENOMEM; 2783 2784 for (i = 0; i < total_vports; i++) { 2785 err = init_ingress_acl_root_ns(steering, i); 2786 if (err) 2787 goto cleanup_root_ns; 2788 } 2789 2790 return 0; 2791 2792 cleanup_root_ns: 2793 for (i--; i >= 0; i--) 2794 cleanup_root_ns(steering->esw_ingress_root_ns[i]); 2795 kfree(steering->esw_ingress_root_ns); 2796 steering->esw_ingress_root_ns = NULL; 2797 return err; 2798 } 2799 2800 static int init_egress_root_ns(struct mlx5_flow_steering *steering) 2801 { 2802 int err; 2803 2804 steering->egress_root_ns = create_root_ns(steering, 2805 FS_FT_NIC_TX); 2806 if (!steering->egress_root_ns) 2807 return -ENOMEM; 2808 2809 err = init_root_tree(steering, &egress_root_fs, 2810 &steering->egress_root_ns->ns.node); 2811 if (err) 2812 goto cleanup; 2813 set_prio_attrs(steering->egress_root_ns); 2814 return 0; 2815 cleanup: 2816 cleanup_root_ns(steering->egress_root_ns); 2817 steering->egress_root_ns = NULL; 2818 return err; 2819 } 2820 2821 int mlx5_init_fs(struct mlx5_core_dev *dev) 2822 { 2823 struct mlx5_flow_steering *steering; 2824 int err = 0; 2825 2826 err = mlx5_init_fc_stats(dev); 2827 if (err) 2828 return err; 2829 2830 steering = kzalloc(sizeof(*steering), GFP_KERNEL); 2831 if (!steering) 2832 return -ENOMEM; 2833 steering->dev = dev; 2834 dev->priv.steering = steering; 2835 2836 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs", 2837 sizeof(struct mlx5_flow_group), 0, 2838 0, NULL); 2839 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0, 2840 0, NULL); 2841 if (!steering->ftes_cache || !steering->fgs_cache) { 2842 err = -ENOMEM; 2843 goto err; 2844 } 2845 2846 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 2847 (MLX5_CAP_GEN(dev, nic_flow_table))) || 2848 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 2849 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) && 2850 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { 2851 err = init_root_ns(steering); 2852 if (err) 2853 goto err; 2854 } 2855 2856 if (MLX5_ESWITCH_MANAGER(dev)) { 2857 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 2858 err = init_fdb_root_ns(steering); 2859 if (err) 2860 goto err; 2861 } 2862 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 2863 err = init_egress_acls_root_ns(dev); 2864 if (err) 2865 goto err; 2866 } 2867 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 2868 err = init_ingress_acls_root_ns(dev); 2869 if (err) 2870 goto err; 2871 } 2872 } 2873 2874 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { 2875 err = init_sniffer_rx_root_ns(steering); 2876 if (err) 2877 goto err; 2878 } 2879 2880 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) { 2881 err = init_sniffer_tx_root_ns(steering); 2882 if (err) 2883 goto err; 2884 } 2885 2886 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && 2887 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { 2888 err = init_rdma_rx_root_ns(steering); 2889 if (err) 2890 goto err; 2891 } 2892 2893 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { 2894 err = init_egress_root_ns(steering); 2895 if (err) 2896 goto err; 2897 } 2898 2899 return 0; 2900 err: 2901 mlx5_cleanup_fs(dev); 2902 return err; 2903 } 2904 2905 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) 2906 { 2907 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; 2908 struct mlx5_ft_underlay_qp *new_uqp; 2909 int err = 0; 2910 2911 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL); 2912 if (!new_uqp) 2913 return -ENOMEM; 2914 2915 mutex_lock(&root->chain_lock); 2916 2917 if (!root->root_ft) { 2918 err = -EINVAL; 2919 goto update_ft_fail; 2920 } 2921 2922 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, 2923 false); 2924 if (err) { 2925 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", 2926 underlay_qpn, err); 2927 goto update_ft_fail; 2928 } 2929 2930 new_uqp->qpn = underlay_qpn; 2931 list_add_tail(&new_uqp->list, &root->underlay_qpns); 2932 2933 mutex_unlock(&root->chain_lock); 2934 2935 return 0; 2936 2937 update_ft_fail: 2938 mutex_unlock(&root->chain_lock); 2939 kfree(new_uqp); 2940 return err; 2941 } 2942 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn); 2943 2944 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) 2945 { 2946 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; 2947 struct mlx5_ft_underlay_qp *uqp; 2948 bool found = false; 2949 int err = 0; 2950 2951 mutex_lock(&root->chain_lock); 2952 list_for_each_entry(uqp, &root->underlay_qpns, list) { 2953 if (uqp->qpn == underlay_qpn) { 2954 found = true; 2955 break; 2956 } 2957 } 2958 2959 if (!found) { 2960 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n", 2961 underlay_qpn); 2962 err = -EINVAL; 2963 goto out; 2964 } 2965 2966 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, 2967 true); 2968 if (err) 2969 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", 2970 underlay_qpn, err); 2971 2972 list_del(&uqp->list); 2973 mutex_unlock(&root->chain_lock); 2974 kfree(uqp); 2975 2976 return 0; 2977 2978 out: 2979 mutex_unlock(&root->chain_lock); 2980 return err; 2981 } 2982 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); 2983 2984 static struct mlx5_flow_root_namespace 2985 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) 2986 { 2987 struct mlx5_flow_namespace *ns; 2988 2989 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS || 2990 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS) 2991 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0); 2992 else 2993 ns = mlx5_get_flow_namespace(dev, ns_type); 2994 if (!ns) 2995 return NULL; 2996 2997 return find_root(&ns->node); 2998 } 2999 3000 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, 3001 u8 ns_type, u8 num_actions, 3002 void *modify_actions) 3003 { 3004 struct mlx5_flow_root_namespace *root; 3005 struct mlx5_modify_hdr *modify_hdr; 3006 int err; 3007 3008 root = get_root_namespace(dev, ns_type); 3009 if (!root) 3010 return ERR_PTR(-EOPNOTSUPP); 3011 3012 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL); 3013 if (!modify_hdr) 3014 return ERR_PTR(-ENOMEM); 3015 3016 modify_hdr->ns_type = ns_type; 3017 err = root->cmds->modify_header_alloc(root, ns_type, num_actions, 3018 modify_actions, modify_hdr); 3019 if (err) { 3020 kfree(modify_hdr); 3021 return ERR_PTR(err); 3022 } 3023 3024 return modify_hdr; 3025 } 3026 EXPORT_SYMBOL(mlx5_modify_header_alloc); 3027 3028 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, 3029 struct mlx5_modify_hdr *modify_hdr) 3030 { 3031 struct mlx5_flow_root_namespace *root; 3032 3033 root = get_root_namespace(dev, modify_hdr->ns_type); 3034 if (WARN_ON(!root)) 3035 return; 3036 root->cmds->modify_header_dealloc(root, modify_hdr); 3037 kfree(modify_hdr); 3038 } 3039 EXPORT_SYMBOL(mlx5_modify_header_dealloc); 3040 3041 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, 3042 int reformat_type, 3043 size_t size, 3044 void *reformat_data, 3045 enum mlx5_flow_namespace_type ns_type) 3046 { 3047 struct mlx5_pkt_reformat *pkt_reformat; 3048 struct mlx5_flow_root_namespace *root; 3049 int err; 3050 3051 root = get_root_namespace(dev, ns_type); 3052 if (!root) 3053 return ERR_PTR(-EOPNOTSUPP); 3054 3055 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL); 3056 if (!pkt_reformat) 3057 return ERR_PTR(-ENOMEM); 3058 3059 pkt_reformat->ns_type = ns_type; 3060 pkt_reformat->reformat_type = reformat_type; 3061 err = root->cmds->packet_reformat_alloc(root, reformat_type, size, 3062 reformat_data, ns_type, 3063 pkt_reformat); 3064 if (err) { 3065 kfree(pkt_reformat); 3066 return ERR_PTR(err); 3067 } 3068 3069 return pkt_reformat; 3070 } 3071 EXPORT_SYMBOL(mlx5_packet_reformat_alloc); 3072 3073 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, 3074 struct mlx5_pkt_reformat *pkt_reformat) 3075 { 3076 struct mlx5_flow_root_namespace *root; 3077 3078 root = get_root_namespace(dev, pkt_reformat->ns_type); 3079 if (WARN_ON(!root)) 3080 return; 3081 root->cmds->packet_reformat_dealloc(root, pkt_reformat); 3082 kfree(pkt_reformat); 3083 } 3084 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); 3085 3086 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, 3087 struct mlx5_flow_root_namespace *peer_ns) 3088 { 3089 if (peer_ns && ns->mode != peer_ns->mode) { 3090 mlx5_core_err(ns->dev, 3091 "Can't peer namespace of different steering mode\n"); 3092 return -EINVAL; 3093 } 3094 3095 return ns->cmds->set_peer(ns, peer_ns); 3096 } 3097 3098 /* This function should be called only at init stage of the namespace. 3099 * It is not safe to call this function while steering operations 3100 * are executed in the namespace. 3101 */ 3102 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, 3103 enum mlx5_flow_steering_mode mode) 3104 { 3105 struct mlx5_flow_root_namespace *root; 3106 const struct mlx5_flow_cmds *cmds; 3107 int err; 3108 3109 root = find_root(&ns->node); 3110 if (&root->ns != ns) 3111 /* Can't set cmds to non root namespace */ 3112 return -EINVAL; 3113 3114 if (root->table_type != FS_FT_FDB) 3115 return -EOPNOTSUPP; 3116 3117 if (root->mode == mode) 3118 return 0; 3119 3120 if (mode == MLX5_FLOW_STEERING_MODE_SMFS) 3121 cmds = mlx5_fs_cmd_get_dr_cmds(); 3122 else 3123 cmds = mlx5_fs_cmd_get_fw_cmds(); 3124 if (!cmds) 3125 return -EOPNOTSUPP; 3126 3127 err = cmds->create_ns(root); 3128 if (err) { 3129 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n", 3130 err); 3131 return err; 3132 } 3133 3134 root->cmds->destroy_ns(root); 3135 root->cmds = cmds; 3136 root->mode = mode; 3137 3138 return 0; 3139 } 3140